I would like to do the processing and loading of items in a function separated from parse_product
which is the prepare_item_download()
function in my case. However, when I run my spider, I get the error message that it needs to return a Request, BaseItem, dict or None and not a generator. It works when I leave it in the parse_product
function.
Here's my code:
from scrapy import Request, Spider
from scrapy.loader import ItemLoader
from firmware.items import FirmwareItem
import re
class AvmSpider(Spider):
name = 'avm'
start_urls = [
'http://download.avm.de/fritzbox/',
'http://download.avm.de/fritzwlan/',
'http://download.avm.de/fritzpowerline/'
]
# parse top-level pages
def parse(self, response):
for product_url in self.link_extractor(response=response, prefix=('beta', 'tools', 'license', '..')):
yield Request(url=product_url, callback=self.parse_product)
# parse each product; call self as long as not in fritz.os directory
def parse_product(self, response):
path = response.request.url.split('/')[:-1]
if path[-1] == 'fritz.os':
yield self.prepare_item_download(response, path)
else:
for sub in self.link_extractor(response=response, prefix=('recover', '..')):
yield Request(url=response.urljoin(sub), callback=self.parse_product)
# get release dates, populate and load item
def prepare_item_download(self, response, path: str):
release_dates = self.date_extractor(response)
for index, file_url in enumerate(self.link_extractor(response=response, prefix='..')):
if file_url.endswith('.image'):
loader = ItemLoader(item=FirmwareItem(), selector=file_url)
loader.add_value('file_urls', file_url)
loader.add_value('vendor', 'avm')
loader.add_value('device_name', path[-3])
loader.add_value('device_class', path[-4])
loader.add_value('release_date', release_dates[index])
yield loader.load_item()
yield None
# return all links which do not start with a certain prefix
@staticmethod
def link_extractor(response, prefix) -> list:
return [response.urljoin(p) for p in response.xpath('//a/@href').extract() if not p.startswith(prefix)]
# return release dates of all images listed on current page
@staticmethod
def date_extractor(response) -> list:
release_dates = list()
for text in response.xpath('//pre/text()').extract():
match = re.search(r'(\d{2}-\w{3}-\d{4} \d{2}:\d{2})', text)
if match:
release_dates.append(match.group(1))
return release_dates
The above code does not work. Below you can see the working code:
def parse_product(self, response):
path = response.request.url.split('/')[:-1]
if path[-1] == 'fritz.os':
release_dates = self.date_extractor(response)
for index, file_url in enumerate(self.link_extractor(response=response, prefix='..')):
if file_url.endswith('.image'):
loader = ItemLoader(item=FirmwareItem(), selector=file_url)
loader.add_value('file_urls', file_url)
loader.add_value('vendor', 'avm')
loader.add_value('device_name', path[-3])
loader.add_value('device_class', path[-4])
loader.add_value('release_date', release_dates[index])
yield loader.load_item()
else:
for sub in self.link_extractor(response=response, prefix=('recover', '..')):
yield Request(url=response.urljoin(sub), callback=self.parse_product)
You parse_product
function generates a generator:
def parse_product(self, response):
path = response.request.url.split('/')[:-1]
if path[-1] == 'fritz.os':
yield self.prepare_item_download(response, path)
^^^^^^^^^^^^^
Instead you should use yield from
statement to unpack the generator:
def parse_product(self, response):
path = response.request.url.split('/')[:-1]
if path[-1] == 'fritz.os':
yield from self.prepare_item_download(response, path)
^^^^
# or for python <3.3
for item in self.prepare_item_download(response, path):
yield item