scrapy 中的延迟请求
Deferred requests in scrapy
我想以不同的延迟重复抓取相同的 URL。在研究了这个问题之后,似乎合适的解决方案是使用像
这样的东西
nextreq = scrapy.Request(url, dont_filter=True)
d = defer.Deferred()
delay = 1
reactor.callLater(delay, d.callback, nextreq)
yield d
正在解析。
但是,我无法完成这项工作。我收到错误消息
ERROR: Spider must return Request, BaseItem, dict or None, got 'Deferred'
我不熟悉 twisted 所以我希望我只是遗漏了一些明显的东西
有没有更好的方法来实现我的目标,而不是与框架冲突太多?
我终于在an old PR
中找到了答案
def parse():
req = scrapy.Request(...)
delay = 0
reactor.callLater(delay, self.crawler.engine.schedule, request=req, spider=self)
但是,由于太早空闲,蜘蛛可能会退出。基于过时的中间件 https://github.com/ArturGaspar/scrapy-delayed-requests,这可以通过
解决
from scrapy import signals
from scrapy.exceptions import DontCloseSpider
class ImmortalSpiderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_idle, signal=signals.spider_idle)
return s
@classmethod
def spider_idle(cls, spider):
raise DontCloseSpider()
最后一个选项,由 ArturGaspar 更新中间件,导致:
from weakref import WeakKeyDictionary
from scrapy import signals
from scrapy.exceptions import DontCloseSpider
from twisted.internet import reactor
class DelayedRequestsMiddleware(object):
requests = WeakKeyDictionary()
@classmethod
def from_crawler(cls, crawler):
ext = cls()
crawler.signals.connect(ext.spider_idle, signal=signals.spider_idle)
return ext
@classmethod
def spider_idle(cls, spider):
if cls.requests.get(spider):
spider.log("delayed requests pending, not closing spider")
raise DontCloseSpider()
def process_request(self, request, spider):
delay = request.meta.pop('delay_request', None)
if delay:
self.requests.setdefault(spider, 0)
self.requests[spider] += 1
reactor.callLater(delay, self.schedule_request, request.copy(),
spider)
raise IgnoreRequest()
def schedule_request(self, request, spider):
spider.crawler.engine.schedule(request, spider)
self.requests[spider] -= 1
并且可以像这样在解析中使用:
yield Request(..., meta={'delay_request': 5})
我想以不同的延迟重复抓取相同的 URL。在研究了这个问题之后,似乎合适的解决方案是使用像
这样的东西nextreq = scrapy.Request(url, dont_filter=True)
d = defer.Deferred()
delay = 1
reactor.callLater(delay, d.callback, nextreq)
yield d
正在解析。
但是,我无法完成这项工作。我收到错误消息
ERROR: Spider must return Request, BaseItem, dict or None, got 'Deferred'
我不熟悉 twisted 所以我希望我只是遗漏了一些明显的东西
有没有更好的方法来实现我的目标,而不是与框架冲突太多?
我终于在an old PR
中找到了答案def parse():
req = scrapy.Request(...)
delay = 0
reactor.callLater(delay, self.crawler.engine.schedule, request=req, spider=self)
但是,由于太早空闲,蜘蛛可能会退出。基于过时的中间件 https://github.com/ArturGaspar/scrapy-delayed-requests,这可以通过
解决from scrapy import signals
from scrapy.exceptions import DontCloseSpider
class ImmortalSpiderMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_idle, signal=signals.spider_idle)
return s
@classmethod
def spider_idle(cls, spider):
raise DontCloseSpider()
最后一个选项,由 ArturGaspar 更新中间件,导致:
from weakref import WeakKeyDictionary
from scrapy import signals
from scrapy.exceptions import DontCloseSpider
from twisted.internet import reactor
class DelayedRequestsMiddleware(object):
requests = WeakKeyDictionary()
@classmethod
def from_crawler(cls, crawler):
ext = cls()
crawler.signals.connect(ext.spider_idle, signal=signals.spider_idle)
return ext
@classmethod
def spider_idle(cls, spider):
if cls.requests.get(spider):
spider.log("delayed requests pending, not closing spider")
raise DontCloseSpider()
def process_request(self, request, spider):
delay = request.meta.pop('delay_request', None)
if delay:
self.requests.setdefault(spider, 0)
self.requests[spider] += 1
reactor.callLater(delay, self.schedule_request, request.copy(),
spider)
raise IgnoreRequest()
def schedule_request(self, request, spider):
spider.crawler.engine.schedule(request, spider)
self.requests[spider] -= 1
并且可以像这样在解析中使用:
yield Request(..., meta={'delay_request': 5})