I am using the scrapy to consume the message(url) from the RabbitMQ,But When I use the yield to call the parse method passing my url as parameters .The program does not comes inside the callback method.Below is the foloowing code of my spider
# -*- coding: utf-8 -*-
import scrapy
import pika
from scrapy import cmdline
import json
class MydeletespiderSpider(scrapy.Spider):
name = 'Mydeletespider'
allowed_domains = []
start_urls = []
def callback(self,ch, method, properties, body):
print(" [x] Received %r" % body)
body=json.loads(body)
url=body.get('url')
yield scrapy.Request(url=url,callback=self.parse)
def start_requests(self):
cre = pika.PlainCredentials('test', 'test')
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='10.0.12.103', port=5672, credentials=cre, socket_timeout=60))
channel = connection.channel()
channel.basic_consume(self.callback,
queue='Deletespider_Batch_Test',
no_ack=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
def parse(self, response):
print response.url
pass
cmdline.execute('scrapy crawl Mydeletespider'.split())
My goal is to pass the url response to parse method
To consume urls from rabbitmq you can take a look at scrapy-rabbitmq
package:
Scrapy-rabbitmq is a tool that lets you feed and queue URLs from RabbitMQ via Scrapy spiders, using the Scrapy framework.
To enable it, set these values in your settings.py
:
# Enables scheduling storing requests queue in rabbitmq.
SCHEDULER = "scrapy_rabbitmq.scheduler.Scheduler"
# Don't cleanup rabbitmq queues, allows to pause/resume crawls.
SCHEDULER_PERSIST = True
# Schedule requests using a priority queue. (default)
SCHEDULER_QUEUE_CLASS = 'scrapy_rabbitmq.queue.SpiderQueue'
# RabbitMQ Queue to use to store requests
RABBITMQ_QUEUE_NAME = 'scrapy_queue'
# Provide host and port to RabbitMQ daemon
RABBITMQ_CONNECTION_PARAMETERS = {'host': 'localhost', 'port': 6666}
# Bonus:
# Store scraped item in rabbitmq for post-processing.
# ITEM_PIPELINES = {
# 'scrapy_rabbitmq.pipelines.RabbitMQPipeline': 1
# }
And in your spider:
from scrapy import Spider
from scrapy_rabbitmq.spiders import RabbitMQMixin
class RabbitSpider(RabbitMQMixin, Spider):
name = 'rabbitspider'
def parse(self, response):
# mixin will take urls from rabbit queue by itself
pass