I'm trying to crawl a forum for ultimately posts in threads that have links posted in them. Right now I'm just trying to scrape the users names of the posts. But I think there is a problem with the urls not being static.
spider.py
from scrapy.spiders import CrawlSpider
from scrapy.selector import Selector
from scrapy.item import Item, Field
class TextPostItem(Item):
title = Field()
url = Field()
submitted = Field()
class RedditCrawler(CrawlSpider):
name = 'post-spider'
allowed_domains = ['flashback.org']
start_urls = ['https://www.flashback.org/t2637903']
def parse(self, response):
s = Selector(response)
next_link = s.xpath('//a[@class="smallfont2"]//@href').extract()[0]
if len(next_link):
yield self.make_requests_from_url(next_link)
posts = Selector(response).xpath('//div[@id="posts"]/div[@class="alignc.p4.post"]')
for post in posts:
i = TextPostItem()
i['title'] = post.xpath('tbody/tr[1]/td/span/text()').extract() [0]
#i['url'] = post.xpath('div[2]/ul/li[1]/a/@href').extract()[0]
yield i
Provide me the following error:
raise ValueError('Missing scheme in request url: %s' % self._url)
ValueError: Missing scheme in request url: /t2637903p2
Any idea?
You need to "join" the response.url
with the relative url you've extracted using urljoin()
:
from urlparse import urljoin
urljoin(response.url, next_link)
Also note that there is no need to instantiate a Selector
object - you can use the response.xpath()
shortcut directly:
def parse(self, response):
next_link = response.xpath('//a[@class="smallfont2"]//@href').extract()[0]
# ...