如何 Keep/Export 每个蜘蛛 Class 定义中特定顺序的字段项目,利用 Scrapy 中的项目管道
How To Keep/Export Field Items in Specific Order Per Spider Class Definition, Utilizing The Items Pipeline in Scrapy
我有一个蜘蛛,它可以将数据导出到不同的 CSV 文件(根据蜘蛛 class 中定义的 class 定义的名称)。但是,我还想在处理字段并将其导出到不同的 CSV 文件时以特定顺序保持字段的顺序。
例如,这是我的 items.py:
import scrapy
class first_class_def_Item(scrapy.Item):
f1 = scrapy.Field() # f1 an arbitrary id used for both class definition items
f2 = scrapy.Field()
f3 = scrapy.Field()
class second_class_def_Item(scrapy.Item):
f1 = scrapy.Field()
f4 = scrapy.Field()
f5 = scrapy.Field()
f6 = scrapy.Field()
这是我的 pipelines.py:
from scrapy.exporters import CsvItemExporter
from scrapy import signals
from pydispatch import dispatcher
def item_type(item):
# The CSV file names are used (imported) from the scrapy spider.
# For this example, I just want to keep "first_class_def.csv" without,
# the "_item", as in "first_class_def_Item.csv" as defined in the main scrapy spider
return type(item).__name__.replace('_Item','')
class SomeSitePipeline(object):
# For simplicity, I'm using the same class def names as found in the,
# main scrapy spider and as defined in the items.py
SaveTypes = ['first_class_def','second_class_def']
def __init__(self):
dispatcher.connect(self.spider_opened, signal=signals.spider_opened)
dispatcher.connect(self.spider_closed, signal=signals.spider_closed)
def spider_opened(self, spider):
self.files = dict([ (name, open("/somefolder/"+name+'.csv','wb')) for name in self.SaveTypes ])
self.exporters = dict([ (name,CsvItemExporter(self.files[name])) for name in self.SaveTypes ])
[e.start_exporting() for e in self.exporters.values()]
def spider_closed(self, spider):
[e.finish_exporting() for e in self.exporters.values()]
[f.close() for f in self.files.values()]
def process_item(self, item, spider):
typesItem = item_type(item)
if typesItem in set(self.SaveTypes):
self.exporters[typesItem].export_item(item)
return item
这是我的 spider.py:
import os
import scrapy
from itertools import zip_longest
from somesite.items import first_class_def_Item, second_class_def_Item
from csv import DictReader
path = os.path.join(os.path.expanduser('~'), 'user', 'somefolder', 'IDs.csv')
class SomeSiteSpider(scrapy.Spider):
name = 'somesite'
allowed_domains = ['somesite.com']
start_urls = ['https://somesite.com/login.aspx']
def parse(self, response):
return scrapy.FormRequest.from_response(response,
formdata={'txtLogin$txtInput': 'User',
'txtPassword$txtInput': 'pass',
'btnLogin.x': '53',
'btnLogin.y': '33'},
callback=self.Tables)
def Tables(self, response):
with open(path) as rows:
for row in DictReader(rows):
id=row["id"]
yield scrapy.Request("https://somesite.com/page1.aspx",
meta={'mid': mid,
'form_control': some_form_control},
dont_filter = True,
callback=self.first_class_def)
yield scrapy.Request("https://somesite.com/page2.aspx",
meta={'mid': mid,
'form_control': some_form_control},
dont_filter = True,
callback=self.second_class_def)
def first_class_def(self, response):
return scrapy.FormRequest.from_response(response,
formdata={'id': response.meta['id'],
'form_control': response.meta['some_form_control'],
'SearchControl$btnCreateReport': 'Create Report'},
meta={'id': response.meta['id']},
callback=self.scrap_page_1)
def scrap_page_1(self, response):
items = first_class_def_Item()
field_1 = response.xpath('//*[@class="formatText"][1]/text()').extract()
field_2 = response.xpath('//*[@class="formatCurrency"][1]/text()').extract()
for a,b in zip(field_1,field_2):
items['f1'] = response.meta['id']
items['f2'] = a
items['f3'] = b
yield items
def second_class_def(self, response):
return scrapy.FormRequest.from_response(response,
formdata={'id': response.meta['id'],
'form_control': response.meta['some_form_control'],
'form_control_two': 'some_form_control_two',
'SearchControl$btnCreateReport': 'Create Report'},
meta={'id': response.meta['id']},
callback=self.scrap_page_2)
def scrap_page_2(self, response):
items = second_class_def_Item()
field_1 = response.xpath('//*[@class="formatText"][1]/text()').extract()
field_2 = response.xpath('//*[@class="formatCurrency"][1]/text()').extract()
field_3 = response.xpath('//*[@class="formatText"][3]/text()').extract()
for a,b,c in zip(field_1,field_2,field_3):
items['f1'] = response.meta['id']
items['f4'] = a
items['f5'] = b
items['f6'] = c
yield items
由于蜘蛛正在处理和导出数据,我一直在寻找一种方法来保留 CSV 生成文件 "first_class_def.csv" 和 "second_class_def.csv" 中的字段,这些字段的导出顺序与 items.py:
f1,f2,f3
和
f1,f4,f5,f6
但是,每当我抓取蜘蛛时,CSV 文件中的字段都会以随机顺序导出:
f2、f1、f3 和 f5、f1、f4、f6
解决方法贴在下面!
不幸的是,由于 scrapy Item
的实现方式,字段定义的顺序信息没有保留。
如果顺序很重要,您最好将您想要的顺序定义为单独的 class 变量,然后在您的管道中使用它。将 fields_to_export
参数传递给 CsvItemExporter
可能是最简单的。
这里有一个你可以尝试的基本想法:
# items.py
class Item1(scrapy.Item):
fields_to_export = ['fi', 'f2']
f1 = scrapy.Field()
f2 = scrapy.Field()
# pipelines.py
from project.items import Item1
class SomeSitePipeline(object):
save_types = {'item1': Item1}
def spider_opened(self, spider):
# (...)
self.exporters = dict(
(name, CsvItemExporter(self.files[name], fields_to_export=item_type.fields_to_export))
for name, item_type in self.save_types.items()
)
# (...)
此外,我刚刚注意到您正在对 side-effects 使用列表理解,这是个坏主意,您应该改用普通循环。
这是针对我的特定问题的解决方案:导出字段根据 scrapy 蜘蛛项目 items.py 中定义的项目 class 定义组织。
因此,在解决了这个问题并实施了@stranac 关于摆脱列表理解的建议之后,我提出了以下解决方案,允许将所有字段按顺序导出到它们的相关 csv 文件中:
from scrapy.exporters import CsvItemExporter
from scrapy import signals
from pydispatch import dispatcher
def item_type(item):
# just want "first_class_def.csv" not "first_class_def_Item.csv"
return type(item).__name__.replace('_Item','')
class SomeSitePipeline(object):
fileNamesCsv = ['first_class_def','second_class_def']
def __init__(self):
self.files = {}
self.exporters = {}
dispatcher.connect(self.spider_opened, signal=signals.spider_opened)
dispatcher.connect(self.spider_closed, signal=signals.spider_closed)
def spider_opened(self, spider):
self.files = dict([ (name, open("/somefolder/"+name+'.csv','wb')) for name in self.fileNamesCsv ])
for name in self.fileNamesCsv:
self.exporters[name] = CsvItemExporter(self.files[name])
if name == 'first_class_def':
self.exporters[name].fields_to_export = ['f1','f2','f3']
self.exporters[name].start_exporting()
if name == 'second_class_def':
self.exporters[name].fields_to_export = ['f1','f4','f5','f6']
self.exporters[name].start_exporting()
def spider_closed(self, spider):
[e.finish_exporting() for e in self.exporters.values()]
[f.close() for f in self.files.values()]
def process_item(self, item, spider):
typesItem = item_type(item)
if typesItem in set(self.fileNamesCsv):
self.exporters[typesItem].export_item(item)
return item
现在,一切都按照我最初的预期进行。
我有一个蜘蛛,它可以将数据导出到不同的 CSV 文件(根据蜘蛛 class 中定义的 class 定义的名称)。但是,我还想在处理字段并将其导出到不同的 CSV 文件时以特定顺序保持字段的顺序。
例如,这是我的 items.py:
import scrapy
class first_class_def_Item(scrapy.Item):
f1 = scrapy.Field() # f1 an arbitrary id used for both class definition items
f2 = scrapy.Field()
f3 = scrapy.Field()
class second_class_def_Item(scrapy.Item):
f1 = scrapy.Field()
f4 = scrapy.Field()
f5 = scrapy.Field()
f6 = scrapy.Field()
这是我的 pipelines.py:
from scrapy.exporters import CsvItemExporter
from scrapy import signals
from pydispatch import dispatcher
def item_type(item):
# The CSV file names are used (imported) from the scrapy spider.
# For this example, I just want to keep "first_class_def.csv" without,
# the "_item", as in "first_class_def_Item.csv" as defined in the main scrapy spider
return type(item).__name__.replace('_Item','')
class SomeSitePipeline(object):
# For simplicity, I'm using the same class def names as found in the,
# main scrapy spider and as defined in the items.py
SaveTypes = ['first_class_def','second_class_def']
def __init__(self):
dispatcher.connect(self.spider_opened, signal=signals.spider_opened)
dispatcher.connect(self.spider_closed, signal=signals.spider_closed)
def spider_opened(self, spider):
self.files = dict([ (name, open("/somefolder/"+name+'.csv','wb')) for name in self.SaveTypes ])
self.exporters = dict([ (name,CsvItemExporter(self.files[name])) for name in self.SaveTypes ])
[e.start_exporting() for e in self.exporters.values()]
def spider_closed(self, spider):
[e.finish_exporting() for e in self.exporters.values()]
[f.close() for f in self.files.values()]
def process_item(self, item, spider):
typesItem = item_type(item)
if typesItem in set(self.SaveTypes):
self.exporters[typesItem].export_item(item)
return item
这是我的 spider.py:
import os
import scrapy
from itertools import zip_longest
from somesite.items import first_class_def_Item, second_class_def_Item
from csv import DictReader
path = os.path.join(os.path.expanduser('~'), 'user', 'somefolder', 'IDs.csv')
class SomeSiteSpider(scrapy.Spider):
name = 'somesite'
allowed_domains = ['somesite.com']
start_urls = ['https://somesite.com/login.aspx']
def parse(self, response):
return scrapy.FormRequest.from_response(response,
formdata={'txtLogin$txtInput': 'User',
'txtPassword$txtInput': 'pass',
'btnLogin.x': '53',
'btnLogin.y': '33'},
callback=self.Tables)
def Tables(self, response):
with open(path) as rows:
for row in DictReader(rows):
id=row["id"]
yield scrapy.Request("https://somesite.com/page1.aspx",
meta={'mid': mid,
'form_control': some_form_control},
dont_filter = True,
callback=self.first_class_def)
yield scrapy.Request("https://somesite.com/page2.aspx",
meta={'mid': mid,
'form_control': some_form_control},
dont_filter = True,
callback=self.second_class_def)
def first_class_def(self, response):
return scrapy.FormRequest.from_response(response,
formdata={'id': response.meta['id'],
'form_control': response.meta['some_form_control'],
'SearchControl$btnCreateReport': 'Create Report'},
meta={'id': response.meta['id']},
callback=self.scrap_page_1)
def scrap_page_1(self, response):
items = first_class_def_Item()
field_1 = response.xpath('//*[@class="formatText"][1]/text()').extract()
field_2 = response.xpath('//*[@class="formatCurrency"][1]/text()').extract()
for a,b in zip(field_1,field_2):
items['f1'] = response.meta['id']
items['f2'] = a
items['f3'] = b
yield items
def second_class_def(self, response):
return scrapy.FormRequest.from_response(response,
formdata={'id': response.meta['id'],
'form_control': response.meta['some_form_control'],
'form_control_two': 'some_form_control_two',
'SearchControl$btnCreateReport': 'Create Report'},
meta={'id': response.meta['id']},
callback=self.scrap_page_2)
def scrap_page_2(self, response):
items = second_class_def_Item()
field_1 = response.xpath('//*[@class="formatText"][1]/text()').extract()
field_2 = response.xpath('//*[@class="formatCurrency"][1]/text()').extract()
field_3 = response.xpath('//*[@class="formatText"][3]/text()').extract()
for a,b,c in zip(field_1,field_2,field_3):
items['f1'] = response.meta['id']
items['f4'] = a
items['f5'] = b
items['f6'] = c
yield items
由于蜘蛛正在处理和导出数据,我一直在寻找一种方法来保留 CSV 生成文件 "first_class_def.csv" 和 "second_class_def.csv" 中的字段,这些字段的导出顺序与 items.py:
f1,f2,f3
和
f1,f4,f5,f6
但是,每当我抓取蜘蛛时,CSV 文件中的字段都会以随机顺序导出:
f2、f1、f3 和 f5、f1、f4、f6
解决方法贴在下面!
不幸的是,由于 scrapy Item
的实现方式,字段定义的顺序信息没有保留。
如果顺序很重要,您最好将您想要的顺序定义为单独的 class 变量,然后在您的管道中使用它。将 fields_to_export
参数传递给 CsvItemExporter
可能是最简单的。
这里有一个你可以尝试的基本想法:
# items.py
class Item1(scrapy.Item):
fields_to_export = ['fi', 'f2']
f1 = scrapy.Field()
f2 = scrapy.Field()
# pipelines.py
from project.items import Item1
class SomeSitePipeline(object):
save_types = {'item1': Item1}
def spider_opened(self, spider):
# (...)
self.exporters = dict(
(name, CsvItemExporter(self.files[name], fields_to_export=item_type.fields_to_export))
for name, item_type in self.save_types.items()
)
# (...)
此外,我刚刚注意到您正在对 side-effects 使用列表理解,这是个坏主意,您应该改用普通循环。
这是针对我的特定问题的解决方案:导出字段根据 scrapy 蜘蛛项目 items.py 中定义的项目 class 定义组织。
因此,在解决了这个问题并实施了@stranac 关于摆脱列表理解的建议之后,我提出了以下解决方案,允许将所有字段按顺序导出到它们的相关 csv 文件中:
from scrapy.exporters import CsvItemExporter
from scrapy import signals
from pydispatch import dispatcher
def item_type(item):
# just want "first_class_def.csv" not "first_class_def_Item.csv"
return type(item).__name__.replace('_Item','')
class SomeSitePipeline(object):
fileNamesCsv = ['first_class_def','second_class_def']
def __init__(self):
self.files = {}
self.exporters = {}
dispatcher.connect(self.spider_opened, signal=signals.spider_opened)
dispatcher.connect(self.spider_closed, signal=signals.spider_closed)
def spider_opened(self, spider):
self.files = dict([ (name, open("/somefolder/"+name+'.csv','wb')) for name in self.fileNamesCsv ])
for name in self.fileNamesCsv:
self.exporters[name] = CsvItemExporter(self.files[name])
if name == 'first_class_def':
self.exporters[name].fields_to_export = ['f1','f2','f3']
self.exporters[name].start_exporting()
if name == 'second_class_def':
self.exporters[name].fields_to_export = ['f1','f4','f5','f6']
self.exporters[name].start_exporting()
def spider_closed(self, spider):
[e.finish_exporting() for e in self.exporters.values()]
[f.close() for f in self.files.values()]
def process_item(self, item, spider):
typesItem = item_type(item)
if typesItem in set(self.fileNamesCsv):
self.exporters[typesItem].export_item(item)
return item
现在,一切都按照我最初的预期进行。