I am new to web scraping and am having a hard time figuring out how to manage an issue: The website I am scraping is cooperating with half my code but now for the other half. I am scraping data from mmadecisions.com with the below crawling code. I pull out the first page links successfully, then open the pages of those links successfully, but when I get to the third 'layer' it gives me an error. Is it javascript? It's odd because when I input the href link to the 'get_single_item_data' function, it runs perfectly. does that mean I should use selenium? Is it a block from the website? Then why is half the scraping working (for the http://mmadecisions.com/decisions-by-event/2013/ & http://mmadecisions.com/decision/4801/John-Maguire-vs-Phil-Mulpeter) as you can see in my output below where I've gotten the href links printed before getting to the third layer.:
import requests
from bs4 import BeautifulSoup
import time
my_headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36"}
def ufc_spider(max_pages):
page = 2013
while page <= max_pages:
url = 'http://mmadecisions.com/decisions-by-event/'+str(page)+'/'
print(url)
source_code = requests.get(url, headers=my_headers)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, "html.parser")
data = soup.findAll('table',{'width':'100%'})[2]
for link in data.findAll('a', href=True):
href = 'http://mmadecisions.com/' + str(link.get('href'))
source_code = requests.get(href, "html.parser")
plain_text = source_code.text
soup2 = BeautifulSoup(plain_text, "html.parser")
tmp = []
other = soup2.findAll('table',{'width':'100%'})[1]
for con in other.findAll('td', {'class':'list2'}):
CON = con.a
ahref = 'http://mmadecisions.com/' + str(CON.get('href'))
print(ahref)
time.sleep(5)
get_single_item_data(ahref)
page += 1
def get_single_item_data(item_url):
tmp = []
source_code = requests.get(item_url, headers=my_headers)
time.sleep(10)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, "html.parser")
print(soup)
ufc_spider(2017)
This is the output where I am able to get the website urls but it won't let me get the data from the second url:
http://mmadecisions.com/decisions-by-event/2013/
http://mmadecisions.com/decision/4801/John-Maguire-vs-Phil-Mulpeter
<html><head><title>Apache Tomcat/7.0.68 (Ubuntu) - Error report</title><style><!--H1 {font-family:Tahoma,Arial,sans-serif;color:white;background-color:#525D76;font-size:22px;} H2 {font-family:Tahoma,Arial,sans-serif;color:white;background-color:#525D76;font-size:16px;} H3 {font-family:Tahoma,Arial,sans-serif;color:white;background-color:#525D76;font-size:14px;} BODY {font-family:Tahoma,Arial,sans-serif;color:black;background-color:white;} B {font-family:Tahoma,Arial,sans-serif;color:white;background-color:#525D76;} P {font-family:Tahoma,Arial,sans-serif;background:white;color:black;font-size:12px;}A {color : black;}A.name {color : black;}HR {color : #525D76;}--></style> </head><body><h1>HTTP Status 404 - /decision/4801/John-Maguire-vs-Phil-Mulpeter%0D%0A</h1><hr noshade="noshade" size="1"/><p><b>type</b> Status report</p><p><b>message</b> <u>/decision/4801/John-Maguire-vs-Phil-Mulpeter%0D%0A</u></p><p><b>description</b> <u>The requested resource is not available.</u></p><hr noshade="noshade" size="1"/><h3>Apache Tomcat/7.0.68 (Ubuntu)</h3></body></html>
http://mmadecisions.com/decision/4793/Amanda-English-vs-Slavka-Vitaly
<html><head><title>Apache Tomcat/7.0.68 (Ubuntu) - Error report</title><style><!--H1 {font-family:Tahoma,Arial,sans-serif;color:white;background-color:#525D76;font-size:22px;} H2 {font-family:Tahoma,Arial,sans-serif;color:white;background-color:#525D76;font-size:16px;} H3 {font-family:Tahoma,Arial,sans-serif;color:white;background-color:#525D76;font-size:14px;} BODY {font-family:Tahoma,Arial,sans-serif;color:black;background-color:white;} B {font-family:Tahoma,Arial,sans-serif;color:white;background-color:#525D76;} P {font-family:Tahoma,Arial,sans-serif;background:white;color:black;font-size:12px;}A {color : black;}A.name {color : black;}HR {color : #525D76;}--></style> </head><body><h1>HTTP Status 404 - /decision/4793/Amanda-English-vs-Slavka-Vitaly%0D%0A</h1><hr noshade="noshade" size="1"/><p><b>type</b> Status report</p><p><b>message</b> <u>/decision/4793/Amanda-English-vs-Slavka-Vitaly%0D%0A</u></p><p><b>description</b> <u>The requested resource is not available.</u></p><hr noshade="noshade" size="1"/><h3>Apache Tomcat/7.0.68 (Ubuntu)</h3></body></html>
http://mmadecisions.com/decision/4792/Chris-Boujard-vs-Peter-Queally
......
I have tried to change the user agent header, I've tried to do a time delay, and I've run the code with my VPN. None are working and all are giving the same output. Please help!
import requests
from bs4 import BeautifulSoup
links = []
for item in range(2013, 2020):
print(f"{'-'*30}Extracting Year# {item}{'-'*30}")
r = requests.get(f"http://mmadecisions.com/decisions-by-event/{item}/")
soup = BeautifulSoup(r.text, 'html.parser')
for item in soup.findAll('a', {'href': True}):
item = item.get('href')
if item.startswith('event'):
print(f"http://mmadecisions.com/{item}")
links.append(f"http://mmadecisions.com/{item}")
print("\nNow Fetching all urls inside Years..\n")
for item in links:
r = requests.get(item)
soup = BeautifulSoup(r.text, 'html.parser')
for item in soup.findAll('a', {'href': True}):
item = item.get('href')
if item.startswith('decision/'):
print(f"http://mmadecisions.com/{item}".strip())
Run Code Online: Click Here
Note that you can use the following:
for item in soup.findAll('td', {'class': 'list'}):
for an in item.findAll('a'):
print(an.get('href'))
And
for item in soup.findAll('td', {'class': 'list2'}):
for an in item.findAll('a'):
print(an.get('href').strip())