Scrapy / Pipeline 未将数据插入 MySQL 数据库

Scrapy / Pipeline not inserting data to MySQL database

我正在用 scrapy 创建一个管道,以将抓取的数据存储在 mysql 数据库中。当蜘蛛在终端中为 运行 时,它可以完美运行。连管道都打开了。但是数据没有被发送到数据库。任何帮助表示赞赏! :)

这是管道代码:

import sys
import MySQLdb
import hashlib
from scrapy.exceptions import DropItem
from scrapy.http import Request
from tutorial.items import TutorialItem

class MySQLTest(object):
    def __init__(self):
        db = MySQLdb.connect(user='root', passwd='', host='localhost', db='python')
        cursor = db.cursor()

    def process_item(self, spider, item):    
        try:
            cursor.execute("INSERT INTO info (venue, datez) VALUES (%s, %s)", (item['artist'], item['date']))       
            self.conn.commit()

        except MySQLdb.Error, e:
            print "Error %d: %s" % (e.args[0], e.args[1])

            return item

这是蜘蛛代码

import scrapy # Import required libraries.
from scrapy.selector import HtmlXPathSelector # Allows for path detection in a websites code.
from scrapy.spider import BaseSpider # Used to create a simple spider to extract data.
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor # Needed for the extraction of href links in HTML to crawl further pages.
from scrapy.contrib.spiders import CrawlSpider # Needed to make the crawl spider.
from scrapy.contrib.spiders import Rule # Allows specified rules to affect what the link 
import spotipy
import soundcloud
import mysql.connector

from tutorial.items import TutorialItem

class AllGigsSpider(CrawlSpider):
    name = "allGigs" # Name of the Spider. In command promt, when in the correct folder, enter "scrapy crawl Allgigs".
    allowed_domains = ["www.allgigs.co.uk"] # Allowed domains is a String NOT a URL. 
    start_urls = [
        "http://www.allgigs.co.uk/whats_on/London/clubbing-1.html",
        "http://www.allgigs.co.uk/whats_on/London/festivals-1.html",
        "http://www.allgigs.co.uk/whats_on/London/comedy-1.html",
        "http://www.allgigs.co.uk/whats_on/London/theatre_and_opera-1.html",
        "http://www.allgigs.co.uk/whats_on/London/dance_and_ballet-1.html"
    ] # Specify the starting points for the web crawler.  

    rules = [
        Rule(SgmlLinkExtractor(restrict_xpaths='//div[@class="more"]'), # Search the start URL's for 
        callback="parse_me", 
        follow=True),
    ]

    def parse_me(self, response):
        for info in response.xpath('//div[@class="entry vevent"]|//div[@class="resultbox"]'):
            item = TutorialItem() # Extract items from the items folder.
            item ['artist'] = info.xpath('.//span[@class="summary"]//text()').extract() # Extract artist information.
            item ['date'] = info.xpath('.//span[@class="dates"]//text()').extract() # Extract date information.
            #item ['endDate'] = info.xpath('.//abbr[@class="dtend"]//text()').extract() # Extract end date information.         
            #item ['startDate'] = info.xpath('.//abbr[@class="dtstart"]//text()').extract() # Extract start date information.
            item ['genre'] = info.xpath('.//div[@class="header"]//text()').extract()
            yield item # Retreive items in item.
            client = soundcloud.Client(client_id='401c04a7271e93baee8633483510e263')
            tracks = client.get('/tracks', limit=1, license='cc-by-sa', q= item['artist']) 
            for track in tracks:
                print(tracks)

我认为问题出在我的 settings.py 文件中,我错过了一个逗号...哈欠。

ITEM_PIPELINES = {
    'tutorial.pipelines.MySQLTest': 300,
}