抓取 0 页 抓取 0 项
Crawled 0 pages scraped 0 items
刚开始学习Python和Scrapy
我的第一个项目是在包含网络安全信息的网站上抓取信息。但是当我 运行 使用 cmd 时,它说
抓取了 0 个页面(在 0 pages/min)抓取了 0 个项目(在 0 items/min)
似乎什么也没有出来。如果有人能解决我的问题,我将不胜感激。
以下是我的蜘蛛文件:
项目数:
import scrapy
class ReporteinmobiliarioItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
titulo = scrapy.Field()
precioAlquiler = scrapy.Field()
ubicacion = scrapy.Field()
descripcion = scrapy.Field()
superficieTotal = scrapy.Field()
superficieCubierta = scrapy.Field()
antiguedad = scrapy.Field()
pass
蜘蛛:
import scrapy
from scrapy.spider import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.exceptions import CloseSpider
from reporteInmobiliario.items import ReporteinmobiliarioItem
class reporteInmobiliario(CrawlSpider):
name = 'reporteInmobiliario'
allowed_domains = ['zonaprop.com.ar/']
item_count = 0
start_urls = ['https://www.zonaprop.com.ar/terrenos-alquiler-capital-federal.html']
rules = {
# Para cada item
Rule(LinkExtractor(allow = (), restrict_xpaths = ('//li[@class="pagination-action-next"]/a'))),
Rule(LinkExtractor(allow = (), restrict_xpaths = ('//h4[@class="aviso-data-title"]')),
callback = 'parse_item', follow = False)
}
def parse_item(self,response):
rp_item = ReporteinmobiliarioItem()
rp_item['titulo']= response.xpath('//div[@class="card-title"]/text()').extract()
rp_item['precioAlquiler'] = response.xpath('normalize-space(//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[2]/div[1]/div[2]/p/strong)').extract()
rp_item['ubicacion'] = response.xpath('normalize-space(//*[@id="map"]/div[1]/div/ul/li)').extract()
rp_item['descripcion'] = response.xpath('normalize-space(//*[@id="id-descipcion-aviso"]').extract()
rp_item['superficieTotal'] = response.xpath('//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[1]/div[4]/div[1]/div[1]/div/ul/li[4]/span)').extract()
rp_item['superficieCubierta'] = response.xpath('normalize-space(//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[1]/div[4]/div[1]/div[1]/div/ul/li[5]/span)').extract()
rp_item['antiguedad'] = response.xpath('normalize-space(//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[1]/div[4]/div[1]/div[1]/div/ul/li[6]/span)').extract()
self.item_count += 1
if self.item_count > 5:
raise CloseSpider('item_exceeded')
yield rp_item
您需要始终先检查日志:
2018-09-09 09:19:21 [scrapy.spidermiddlewares.offsite] DEBUG: Filtered
offsite request to 'www.zonaprop.com.ar': https://www.zonaprop.com.ar/propiedades/galpon-de-337-m2-7-79-x-43-30-m-a-metros-de-av-43096244.html>
您的第一个规则也有错误(正确的 class 名称是 "pagination-action-next ")。此外,不要忘记修复您的 XPath 错误 (parse_item
)!
class reporteInmobiliario(CrawlSpider):
name = 'reporteInmobiliario'
allowed_domains = ['zonaprop.com.ar']
item_count = 0
start_urls = ['https://www.zonaprop.com.ar/terrenos-alquiler-capital-federal.html']
rules = {
# Para cada item
Rule(LinkExtractor(allow = (), restrict_xpaths = ('//li[contains(@class, "pagination-action-next")]/a'))),
Rule(LinkExtractor(allow = (), restrict_xpaths = ('//h4[@class="aviso-data-title"]')),
callback = 'parse_item')
}
def parse_item(self,response):
rp_item = ReporteinmobiliarioItem()
rp_item['titulo']= response.xpath('//div[@class="card-title"]/text()').extract()
rp_item['precioAlquiler'] = response.xpath('normalize-space(//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[2]/div[1]/div[2]/p/strong)').extract()
rp_item['ubicacion'] = response.xpath('normalize-space(//*[@id="map"]/div[1]/div/ul/li)').extract()
rp_item['descripcion'] = response.xpath('normalize-space(//*[@id="id-descipcion-aviso"]').extract()
rp_item['superficieTotal'] = response.xpath('//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[1]/div[4]/div[1]/div[1]/div/ul/li[4]/span)').extract()
rp_item['superficieCubierta'] = response.xpath('normalize-space(//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[1]/div[4]/div[1]/div[1]/div/ul/li[5]/span)').extract()
rp_item['antiguedad'] = response.xpath('normalize-space(//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[1]/div[4]/div[1]/div[1]/div/ul/li[6]/span)').extract()
self.item_count += 1
if self.item_count > 5:
raise CloseSpider('item_exceeded')
yield rp_item
刚开始学习Python和Scrapy
我的第一个项目是在包含网络安全信息的网站上抓取信息。但是当我 运行 使用 cmd 时,它说
抓取了 0 个页面(在 0 pages/min)抓取了 0 个项目(在 0 items/min)
似乎什么也没有出来。如果有人能解决我的问题,我将不胜感激。
以下是我的蜘蛛文件:
项目数:
import scrapy
class ReporteinmobiliarioItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
titulo = scrapy.Field()
precioAlquiler = scrapy.Field()
ubicacion = scrapy.Field()
descripcion = scrapy.Field()
superficieTotal = scrapy.Field()
superficieCubierta = scrapy.Field()
antiguedad = scrapy.Field()
pass
蜘蛛:
import scrapy
from scrapy.spider import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.exceptions import CloseSpider
from reporteInmobiliario.items import ReporteinmobiliarioItem
class reporteInmobiliario(CrawlSpider):
name = 'reporteInmobiliario'
allowed_domains = ['zonaprop.com.ar/']
item_count = 0
start_urls = ['https://www.zonaprop.com.ar/terrenos-alquiler-capital-federal.html']
rules = {
# Para cada item
Rule(LinkExtractor(allow = (), restrict_xpaths = ('//li[@class="pagination-action-next"]/a'))),
Rule(LinkExtractor(allow = (), restrict_xpaths = ('//h4[@class="aviso-data-title"]')),
callback = 'parse_item', follow = False)
}
def parse_item(self,response):
rp_item = ReporteinmobiliarioItem()
rp_item['titulo']= response.xpath('//div[@class="card-title"]/text()').extract()
rp_item['precioAlquiler'] = response.xpath('normalize-space(//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[2]/div[1]/div[2]/p/strong)').extract()
rp_item['ubicacion'] = response.xpath('normalize-space(//*[@id="map"]/div[1]/div/ul/li)').extract()
rp_item['descripcion'] = response.xpath('normalize-space(//*[@id="id-descipcion-aviso"]').extract()
rp_item['superficieTotal'] = response.xpath('//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[1]/div[4]/div[1]/div[1]/div/ul/li[4]/span)').extract()
rp_item['superficieCubierta'] = response.xpath('normalize-space(//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[1]/div[4]/div[1]/div[1]/div/ul/li[5]/span)').extract()
rp_item['antiguedad'] = response.xpath('normalize-space(//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[1]/div[4]/div[1]/div[1]/div/ul/li[6]/span)').extract()
self.item_count += 1
if self.item_count > 5:
raise CloseSpider('item_exceeded')
yield rp_item
您需要始终先检查日志:
2018-09-09 09:19:21 [scrapy.spidermiddlewares.offsite] DEBUG: Filtered offsite request to 'www.zonaprop.com.ar': https://www.zonaprop.com.ar/propiedades/galpon-de-337-m2-7-79-x-43-30-m-a-metros-de-av-43096244.html>
您的第一个规则也有错误(正确的 class 名称是 "pagination-action-next ")。此外,不要忘记修复您的 XPath 错误 (parse_item
)!
class reporteInmobiliario(CrawlSpider):
name = 'reporteInmobiliario'
allowed_domains = ['zonaprop.com.ar']
item_count = 0
start_urls = ['https://www.zonaprop.com.ar/terrenos-alquiler-capital-federal.html']
rules = {
# Para cada item
Rule(LinkExtractor(allow = (), restrict_xpaths = ('//li[contains(@class, "pagination-action-next")]/a'))),
Rule(LinkExtractor(allow = (), restrict_xpaths = ('//h4[@class="aviso-data-title"]')),
callback = 'parse_item')
}
def parse_item(self,response):
rp_item = ReporteinmobiliarioItem()
rp_item['titulo']= response.xpath('//div[@class="card-title"]/text()').extract()
rp_item['precioAlquiler'] = response.xpath('normalize-space(//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[2]/div[1]/div[2]/p/strong)').extract()
rp_item['ubicacion'] = response.xpath('normalize-space(//*[@id="map"]/div[1]/div/ul/li)').extract()
rp_item['descripcion'] = response.xpath('normalize-space(//*[@id="id-descipcion-aviso"]').extract()
rp_item['superficieTotal'] = response.xpath('//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[1]/div[4]/div[1]/div[1]/div/ul/li[4]/span)').extract()
rp_item['superficieCubierta'] = response.xpath('normalize-space(//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[1]/div[4]/div[1]/div[1]/div/ul/li[5]/span)').extract()
rp_item['antiguedad'] = response.xpath('normalize-space(//*[@id="layout-content"]/div[1]/div[1]/div[2]/div[1]/div[4]/div[1]/div[1]/div/ul/li[6]/span)').extract()
self.item_count += 1
if self.item_count > 5:
raise CloseSpider('item_exceeded')
yield rp_item