运行 scrapy splash 作为脚本
Run scrapy splash as a script
我正在尝试 运行 使用 splash
的 scrapy 脚本,因为我想抓取基于 javascript
的网页,但没有结果。当我使用 python 命令执行此脚本时,出现此错误:crochet._eventloop.TimeoutError
。此外,parse 方法中的 print 语句从未打印出来,所以我认为 SplashRequest
有问题。我为实现这一点而编写的代码是:
import logging
import scrapy
from scrapy import signals
from scrapy.crawler import CrawlerRunner
from scrapy.item import Item, Field
from scrapy.signalmanager import dispatcher
from scrapy_splash import SplashRequest
from crochet import setup, wait_for
setup()
# logging.getLogger('scrapy').propagate = False
class GooglePatentsSpider(scrapy.spiders.Spider):
name = "google_patents_spider"
allowed_domains = ['patents.google.com']
script = '''
function main(splash, args)
splash.private_mode_enabled = false
assert(splash:go(args.url))
splash:wait(5)
return splash:html()
end
'''
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(GooglePatentsSpider, cls).from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.item_scraped, signal=signals.item_scraped)
return spider
def item_scraped(self, item):
return item
def start_requests(self):
for url in self.start_urls:
yield SplashRequest(
url=url,
callback = self.parse,
endpoint='execute',
args={
'lua_source': self.script
}
)
def parse(self, response):
print('from parse')
item = {}
item['status'] = 'Hello world'
return item
@wait_for(timeout=50.0)
async def run_spider():
"""Returns all the scraped items of the provided publication number"""
results = []
def crawler_results(signal, sender, item, response, spider):
results.append(item)
dispatcher.connect(crawler_results, signal=signals.item_scraped)
runner = CrawlerRunner(settings={
'BOT_NAME': 'web_page_crawler',
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36',
'ROBOTSTXT_OBEY': False,
'SPLASH_URL': 'http://192.168.59.103:8050',
'DOWNLOADER_MIDDLEWARES': {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
},
'SPIDER_MIDDLEWARES': {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
},
'DUPEFILTER_CLASS': 'scrapy_splash.SplashAwareDupeFilter',
'HTTPCACHE_STORAGE': 'scrapy_splash.SplashAwareFSCacheStorage'
})
await runner.crawl(GooglePatentsSpider, start_urls=[f'https://patents.google.com/?q=CL%3dgenistein'])
if results:
return results[0]
else:
return 'This publication number cannot be retrieved'
run_spider()
完整的回溯:
Traceback (most recent call last):
File "hits_scraper.py", line 89, in <module>
run_spider()
File "/home/shared/projects/siftlink/scrapers/.scrapers-api/lib/python3.8/site-packages/crochet/_eventloop.py", line 461, in wrapper
return eventual_result.wait(timeout)
File "/home/shared/projects/siftlink/scrapers/.scrapers-api/lib/python3.8/site-packages/crochet/_eventloop.py", line 196, in wait
result = self._result(timeout)
File "/home/shared/projects/siftlink/scrapers/.scrapers-api/lib/python3.8/site-packages/crochet/_eventloop.py", line 175, in _result
raise TimeoutError()
crochet._eventloop.TimeoutError
我在 运行ning 代码之前没有启动 splash
时遇到了同样的错误。
如果我 运行 splash
(如 docker
图片)那么我也会收到此错误,因为它有不同的 IP
但是如果我在 'SPLASH_URL'
中使用正确的 IP
那么它就可以工作了。
在 Linux 我使用
获得了 运行 宁图像的 IP
docker inspect --format '{{ .NetworkSettings.IPAddress }}' $(docker ps -q)
但代码似乎也适用于通用 IP 0.0.0.0
'SPLASH_URL': 'http://0.0.0.0:8050'
我正在尝试 运行 使用 splash
的 scrapy 脚本,因为我想抓取基于 javascript
的网页,但没有结果。当我使用 python 命令执行此脚本时,出现此错误:crochet._eventloop.TimeoutError
。此外,parse 方法中的 print 语句从未打印出来,所以我认为 SplashRequest
有问题。我为实现这一点而编写的代码是:
import logging
import scrapy
from scrapy import signals
from scrapy.crawler import CrawlerRunner
from scrapy.item import Item, Field
from scrapy.signalmanager import dispatcher
from scrapy_splash import SplashRequest
from crochet import setup, wait_for
setup()
# logging.getLogger('scrapy').propagate = False
class GooglePatentsSpider(scrapy.spiders.Spider):
name = "google_patents_spider"
allowed_domains = ['patents.google.com']
script = '''
function main(splash, args)
splash.private_mode_enabled = false
assert(splash:go(args.url))
splash:wait(5)
return splash:html()
end
'''
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(GooglePatentsSpider, cls).from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.item_scraped, signal=signals.item_scraped)
return spider
def item_scraped(self, item):
return item
def start_requests(self):
for url in self.start_urls:
yield SplashRequest(
url=url,
callback = self.parse,
endpoint='execute',
args={
'lua_source': self.script
}
)
def parse(self, response):
print('from parse')
item = {}
item['status'] = 'Hello world'
return item
@wait_for(timeout=50.0)
async def run_spider():
"""Returns all the scraped items of the provided publication number"""
results = []
def crawler_results(signal, sender, item, response, spider):
results.append(item)
dispatcher.connect(crawler_results, signal=signals.item_scraped)
runner = CrawlerRunner(settings={
'BOT_NAME': 'web_page_crawler',
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36',
'ROBOTSTXT_OBEY': False,
'SPLASH_URL': 'http://192.168.59.103:8050',
'DOWNLOADER_MIDDLEWARES': {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
},
'SPIDER_MIDDLEWARES': {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
},
'DUPEFILTER_CLASS': 'scrapy_splash.SplashAwareDupeFilter',
'HTTPCACHE_STORAGE': 'scrapy_splash.SplashAwareFSCacheStorage'
})
await runner.crawl(GooglePatentsSpider, start_urls=[f'https://patents.google.com/?q=CL%3dgenistein'])
if results:
return results[0]
else:
return 'This publication number cannot be retrieved'
run_spider()
完整的回溯:
Traceback (most recent call last):
File "hits_scraper.py", line 89, in <module>
run_spider()
File "/home/shared/projects/siftlink/scrapers/.scrapers-api/lib/python3.8/site-packages/crochet/_eventloop.py", line 461, in wrapper
return eventual_result.wait(timeout)
File "/home/shared/projects/siftlink/scrapers/.scrapers-api/lib/python3.8/site-packages/crochet/_eventloop.py", line 196, in wait
result = self._result(timeout)
File "/home/shared/projects/siftlink/scrapers/.scrapers-api/lib/python3.8/site-packages/crochet/_eventloop.py", line 175, in _result
raise TimeoutError()
crochet._eventloop.TimeoutError
我在 运行ning 代码之前没有启动 splash
时遇到了同样的错误。
如果我 运行 splash
(如 docker
图片)那么我也会收到此错误,因为它有不同的 IP
但是如果我在 'SPLASH_URL'
中使用正确的 IP
那么它就可以工作了。
在 Linux 我使用
获得了 运行 宁图像的IP
docker inspect --format '{{ .NetworkSettings.IPAddress }}' $(docker ps -q)
但代码似乎也适用于通用 IP 0.0.0.0
'SPLASH_URL': 'http://0.0.0.0:8050'