Issue
I am trying to scrape the texts of all of the episodes of all of the TV Series in a webpage. The entire thing is nested hence it goes through 3 webpages before finding the list of links.It is showing some error which I have pasted below.
import requests
import bs4 as bs
urls='http://dl5.lavinmovie.net/Series/'
url=requests.get(urls).text
soup=bs.BeautifulSoup(url,'lxml')
title=soup.find_all('a')
ur=[""]
names=[""]
season=[""]
quality=[""]
for i in title:
# names.append(i.text)
urlss=urls+i.text+"/"
urla=requests.get(urls).text
soupp=bs.BeautifulSoup(urla,'lxml')
ur=soupp.find_all('a')
for i in ur:
# names.append(i.text)
urls=urls+i.text+"/"
urla=requests.get(urls).text
soupp=bs.BeautifulSoup(urla,'lxml')
ur=soupp.find_all('a')
for i in ur:
# quality.append(i.text)
urls=urls+i.text+"/"
urla=requests.get(urls).text
soupp=bs.BeautifulSoup(urla,'lxml')
ur=soupp.find_all('a')
for i in ur:
print(i.text)
Traceback (most recent call last):
File "C:\Users\Vedant Mamgain\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 603, in urlopen
chunked=chunked)
File "C:\Users\Vedant Mamgain\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 387, in _make_request
six.raise_from(e, None)
File "<string>", line 2, in raise_from
File "C:\Users\Vedant Mamgain\AppData\Local\Programs\Python\Python37\lib\site-packages\urllib3\connectionpool.py", line 383, in _make_request
httplib_response = conn.getresponse()
File "C:\Users\Vedant Mamgain\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 1321, in getresponse
response.begin()
File "C:\Users\Vedant Mamgain\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 296, in begin
version, status, reason = self._read_status()
File "C:\Users\Vedant Mamgain\AppData\Local\Programs\Python\Python37\lib\http\client.py", line 257, in _read_status
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
File "C:\Users\Vedant Mamgain\AppData\Local\Programs\Python\Python37\lib\socket.py", line 589, in readinto
return self._sock.recv_into(b)
ConnectionResetError: [WinError 10054] An existing connection was forcibly closed by the remote host
During handling of the above exception, another exception occurred:
Solution
Try using this, it worked for me:
import requests
import bs4 as bs
names = list()
name_links = list()
base_url = 'http://dl5.lavinmovie.net/Series/'
final_list = list()
soup = bs.BeautifulSoup(requests.get(base_url).text, 'lxml')
title = soup.find_all('a')
for link in title[1:]:
names.append(link.text)
current_link = link['href']
print(link.text)
name_links.append(str(current_link))
# get seasons
soup = bs.BeautifulSoup(requests.get(base_url + current_link).text, 'lxml')
title = soup.find_all('a')
for link in title[1:]:
season_link = link['href']
# get quality of the seasons
soup = bs.BeautifulSoup(requests.get(base_url + current_link +season_link).text, 'lxml')
title = soup.find_all('a')
for link in title[1:]:
quality_link = link['href']
# get list of episodes
soup = bs.BeautifulSoup(requests.get(base_url + current_link + season_link + quality_link).text, 'lxml')
title = soup.find_all('a')
for link in title[1:]:
episode_link = link['href']
final_list.a
Check if this works for you.
Answered By - Nitin
0 comments:
Post a Comment
Note: Only a member of this blog may post a comment.