Scrapy:如何将 url_id 与爬取的数据一起存储
Scrapy: how to store url_id along with the crawled data
from scrapy import Spider, Request
from selenium import webdriver
class MySpider(Spider):
name = "my_spider"
def __init__(self):
self.browser = webdriver.Chrome(executable_path='E:/chromedriver')
self.browser.set_page_load_timeout(100)
def closed(self,spider):
print("spider closed")
self.browser.close()
def start_requests(self):
start_urls = []
with open("target_urls.txt", 'r', encoding='utf-8') as f:
for line in f:
url_id, url = line.split('\t\t')
start_urls.append(url)
for url in start_urls:
yield Request(url=url, callback=self.parse)
def parse(self, response):
yield {
'target_url': response.url,
'comments': response.xpath('//div[@class="comments"]//em//text()').extract()
}
以上是我的 scrapy 代码。我使用 scrapy crawl my_spider -o comments.json
到 运行 爬虫。
您可能会注意到,对于我的每个 url
,都有一个与之关联的唯一 url_id
。如何将每个抓取结果与 url_id
匹配。理想情况下,我想将 yield 输出结果中的 url_id
存储在 comments.json
.
非常感谢!
尝试传入meta
参数,例如。我已经对您的代码进行了一些更新:
def start_requests(self):
with open("target_urls.txt", 'r', encoding='utf-8') as f:
for line in f:
url_id, url = line.split('\t\t')
yield Request(url, self.parse, meta={'url_id': url_id, 'original_url': url})
def parse(self, response):
yield {
'target_url': response.meta['original_url'],
'url_id': response.meta['url_id'],
'comments': response.xpath('//div[@class="comments"]//em//text()').extract()
}
回答问题和评论,尝试这样的事情:
from scrapy import Spider, Request
from selenium import webdriver
class MySpider(Spider):
name = "my_spider"
def __init__(self):
self.browser = webdriver.Chrome(executable_path='E:/chromedriver')
self.browser.set_page_load_timeout(100)
def closed(self,spider):
print("spider closed")
self.browser.close()
def start_requests(self):
with open("target_urls.txt", 'r', encoding='utf-8') as f:
for line in f:
url_id, url = line.split('\t\t')
yield Request(url=url, callback=self.parse, meta={'url_id':url_id,'url':url})
def parse(self, response):
yield {
'target_url': response.meta['url'],
'comments': response.xpath('//div[@class="comments"]//em//text()').extract(),
'url_id':response.meta['url_id']
}
如前一个答案所述,您可以使用 META (http://scrapingauthority.com/scrapy-meta) 在各种方法之间传递参数。
from scrapy import Spider, Request
from selenium import webdriver
class MySpider(Spider):
name = "my_spider"
def __init__(self):
self.browser = webdriver.Chrome(executable_path='E:/chromedriver')
self.browser.set_page_load_timeout(100)
def closed(self,spider):
print("spider closed")
self.browser.close()
def start_requests(self):
start_urls = []
with open("target_urls.txt", 'r', encoding='utf-8') as f:
for line in f:
url_id, url = line.split('\t\t')
start_urls.append(url)
for url in start_urls:
yield Request(url=url, callback=self.parse)
def parse(self, response):
yield {
'target_url': response.url,
'comments': response.xpath('//div[@class="comments"]//em//text()').extract()
}
以上是我的 scrapy 代码。我使用 scrapy crawl my_spider -o comments.json
到 运行 爬虫。
您可能会注意到,对于我的每个 url
,都有一个与之关联的唯一 url_id
。如何将每个抓取结果与 url_id
匹配。理想情况下,我想将 yield 输出结果中的 url_id
存储在 comments.json
.
非常感谢!
尝试传入meta
参数,例如。我已经对您的代码进行了一些更新:
def start_requests(self):
with open("target_urls.txt", 'r', encoding='utf-8') as f:
for line in f:
url_id, url = line.split('\t\t')
yield Request(url, self.parse, meta={'url_id': url_id, 'original_url': url})
def parse(self, response):
yield {
'target_url': response.meta['original_url'],
'url_id': response.meta['url_id'],
'comments': response.xpath('//div[@class="comments"]//em//text()').extract()
}
回答问题和评论,尝试这样的事情:
from scrapy import Spider, Request
from selenium import webdriver
class MySpider(Spider):
name = "my_spider"
def __init__(self):
self.browser = webdriver.Chrome(executable_path='E:/chromedriver')
self.browser.set_page_load_timeout(100)
def closed(self,spider):
print("spider closed")
self.browser.close()
def start_requests(self):
with open("target_urls.txt", 'r', encoding='utf-8') as f:
for line in f:
url_id, url = line.split('\t\t')
yield Request(url=url, callback=self.parse, meta={'url_id':url_id,'url':url})
def parse(self, response):
yield {
'target_url': response.meta['url'],
'comments': response.xpath('//div[@class="comments"]//em//text()').extract(),
'url_id':response.meta['url_id']
}
如前一个答案所述,您可以使用 META (http://scrapingauthority.com/scrapy-meta) 在各种方法之间传递参数。