mysql.connector.errors.InterfaceError: 2003: Can't connect to MySQL server on '127.0.0.1:3306' on Scrapinghub
mysql.connector.errors.InterfaceError: 2003: Can't connect to MySQL server on '127.0.0.1:3306' on Scrapinghub
我尝试在 scrapinghub 上 运行 我的蜘蛛,运行 它出现错误
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks
result = g.send(result)
File "/usr/local/lib/python3.6/site-packages/scrapy/crawler.py", line 80, in crawl
self.engine = self._create_engine()
File "/usr/local/lib/python3.6/site-packages/scrapy/crawler.py", line 105, in _create_engine
return ExecutionEngine(self, lambda _: self.stop())
File "/usr/local/lib/python3.6/site-packages/scrapy/core/engine.py", line 70, in __init__
self.scraper = Scraper(crawler)
File "/usr/local/lib/python3.6/site-packages/scrapy/core/scraper.py", line 71, in __init__
self.itemproc = itemproc_cls.from_crawler(crawler)
File "/usr/local/lib/python3.6/site-packages/scrapy/middleware.py", line 53, in from_crawler
return cls.from_settings(crawler.settings, crawler)
File "/usr/local/lib/python3.6/site-packages/scrapy/middleware.py", line 35, in from_settings
mw = create_instance(mwcls, settings, crawler)
File "/usr/local/lib/python3.6/site-packages/scrapy/utils/misc.py", line 144, in create_instance
return objcls(*args, **kwargs)
File "/app/__main__.egg/skripsi/pipelines.py", line 19, in __init__
File "/app/__main__.egg/skripsi/pipelines.py", line 29, in create_connection
File "/app/python/lib/python3.6/site-packages/mysql/connector/__init__.py", line 173, in connect
return MySQLConnection(*args, **kwargs)
File "/app/python/lib/python3.6/site-packages/mysql/connector/connection.py", line 104, in __init__
self.connect(**kwargs)
File "/app/python/lib/python3.6/site-packages/mysql/connector/abstracts.py", line 780, in connect
self._open_connection()
File "/app/python/lib/python3.6/site-packages/mysql/connector/connection.py", line 284, in _open_connection
self._socket.open_connection()
File "/app/python/lib/python3.6/site-packages/mysql/connector/network.py", line 532, in open_connection
errno=2003, values=(self.get_address(), _strioerror(err)))
mysql.connector.errors.InterfaceError: 2003: Can't connect to MySQL server on '127.0.0.1:3306' (111 Connection refused)
我尝试在 requirements.txt 上添加 mysql-connector-python 并像这样 this[=15= 配置我对 scrapinghub.yml 的依赖]
我的requirements.txt
mysql-connector-python
我的scrapinghub.yml
projects:
default: 396892
stacks:
default: scrapy:1.6-py3
requirements:
file: requirements.txt
我的pipelines.py
import mysql.connector
class SkripsiPipeline(object):
def __init__(self):
self.create_connection()
# dispatcher.connect(self.close_spider, signals.close_spider)
# self.create_table()
def create_connection(self):
self.conn = mysql.connector.connect(
host = '127.0.0.1',
password = '',
user = 'root',
database = 'news'
)
self.curr = self.conn.cursor()
def process_item(self, item, spider):
self.store_db(item)
return item
def store_db(self,item):
self.curr.execute("INSERT INTO news_tb (url, title, author, time, crawl_time, imagelink, content) values (%s,%s,%s,%s,%s,%s,%s)",(
item['url'][0],
item['title'][0],
item['author'][0],
item['time'][0],
item['crawl_time'][0],
item['imagelink'][0],
item['content'][0]
))
self.conn.commit()
这是我在 scrapinghub 上 运行 爬虫时遇到的错误。有熟悉这个问题的朋友请告诉我。
谢谢。
根本不可能。因为 ScrapyCloud 不提供任何 SQL 支持。您正在尝试连接到 127.0.0.1 - 它是本地主机,这意味着 MySQL 应该安装在 ScrapyCloud 和 运行ning 上。这不可能。
我推荐你的一件事是 运行 MySQL 在网络上的某个地方并通过 domain/global ip 地址
连接到
我尝试在 scrapinghub 上 运行 我的蜘蛛,运行 它出现错误
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks
result = g.send(result)
File "/usr/local/lib/python3.6/site-packages/scrapy/crawler.py", line 80, in crawl
self.engine = self._create_engine()
File "/usr/local/lib/python3.6/site-packages/scrapy/crawler.py", line 105, in _create_engine
return ExecutionEngine(self, lambda _: self.stop())
File "/usr/local/lib/python3.6/site-packages/scrapy/core/engine.py", line 70, in __init__
self.scraper = Scraper(crawler)
File "/usr/local/lib/python3.6/site-packages/scrapy/core/scraper.py", line 71, in __init__
self.itemproc = itemproc_cls.from_crawler(crawler)
File "/usr/local/lib/python3.6/site-packages/scrapy/middleware.py", line 53, in from_crawler
return cls.from_settings(crawler.settings, crawler)
File "/usr/local/lib/python3.6/site-packages/scrapy/middleware.py", line 35, in from_settings
mw = create_instance(mwcls, settings, crawler)
File "/usr/local/lib/python3.6/site-packages/scrapy/utils/misc.py", line 144, in create_instance
return objcls(*args, **kwargs)
File "/app/__main__.egg/skripsi/pipelines.py", line 19, in __init__
File "/app/__main__.egg/skripsi/pipelines.py", line 29, in create_connection
File "/app/python/lib/python3.6/site-packages/mysql/connector/__init__.py", line 173, in connect
return MySQLConnection(*args, **kwargs)
File "/app/python/lib/python3.6/site-packages/mysql/connector/connection.py", line 104, in __init__
self.connect(**kwargs)
File "/app/python/lib/python3.6/site-packages/mysql/connector/abstracts.py", line 780, in connect
self._open_connection()
File "/app/python/lib/python3.6/site-packages/mysql/connector/connection.py", line 284, in _open_connection
self._socket.open_connection()
File "/app/python/lib/python3.6/site-packages/mysql/connector/network.py", line 532, in open_connection
errno=2003, values=(self.get_address(), _strioerror(err)))
mysql.connector.errors.InterfaceError: 2003: Can't connect to MySQL server on '127.0.0.1:3306' (111 Connection refused)
我尝试在 requirements.txt 上添加 mysql-connector-python 并像这样 this[=15= 配置我对 scrapinghub.yml 的依赖]
我的requirements.txt
mysql-connector-python
我的scrapinghub.yml
projects:
default: 396892
stacks:
default: scrapy:1.6-py3
requirements:
file: requirements.txt
我的pipelines.py
import mysql.connector
class SkripsiPipeline(object):
def __init__(self):
self.create_connection()
# dispatcher.connect(self.close_spider, signals.close_spider)
# self.create_table()
def create_connection(self):
self.conn = mysql.connector.connect(
host = '127.0.0.1',
password = '',
user = 'root',
database = 'news'
)
self.curr = self.conn.cursor()
def process_item(self, item, spider):
self.store_db(item)
return item
def store_db(self,item):
self.curr.execute("INSERT INTO news_tb (url, title, author, time, crawl_time, imagelink, content) values (%s,%s,%s,%s,%s,%s,%s)",(
item['url'][0],
item['title'][0],
item['author'][0],
item['time'][0],
item['crawl_time'][0],
item['imagelink'][0],
item['content'][0]
))
self.conn.commit()
这是我在 scrapinghub 上 运行 爬虫时遇到的错误。有熟悉这个问题的朋友请告诉我。
谢谢。
根本不可能。因为 ScrapyCloud 不提供任何 SQL 支持。您正在尝试连接到 127.0.0.1 - 它是本地主机,这意味着 MySQL 应该安装在 ScrapyCloud 和 运行ning 上。这不可能。 我推荐你的一件事是 运行 MySQL 在网络上的某个地方并通过 domain/global ip 地址
连接到