python 中的多线程爬虫

multi threaded crawler in python

我正在尝试实现一个多线程爬虫,它采用初始 url 并在该 link 内搜索 link 并显示每个 link 并在同时在每个 link

中寻找 links

这是我的代码

import urllib.request, re, threading, csv
from queue import Queue
from bs4 import BeautifulSoup
from sys import exit

class a3_6:

    __url_q = Queue(100)
    __html_q = Queue()
    __data_q = Queue()
    __visited_urls = []

    def __init__(self, start_url, max_threads):
        self.__url_q.put(start_url)
        self.max_threads = max_threads

    def gethtml(self,url):
        try:
            req=urllib.request.Request(url)
            html=urllib.request.urlopen(req).read()
            self.__html_q.put(html)
        except urllib.error.URLError as e:
            print(e.reason)
        except:
            print("invalid: " + url)
        self.__visited_urls.append(url)

    def mine_thread(self):
        while True:
            if not self.__html_q.empty():
                soup = BeautifulSoup(self.__html_q.get(),"html.parser")
                for a in soup.find_all('a', href=True):
                    if a not in self.__visited_urls:
                        link='https://en.wikipedia.org'+a.get('href')
                        self.__url_q.put(link)
                        self.__data_q.put(link)
            else:
                break

    def store(self):
        while True:
            if not self.__data_q.empty():
                print (self.__data_q.get())

    def download_thread(self):
        while True:
            if not self.__url_q.empty():
                self.gethtml(self.__url_q.get())
            else:
                break

    def run(self):
        self.download_thread()
        self.mine_thread()
        self.store()

    def op(self):
        for x in range(self.max_threads):
            t = threading.Thread(target=self.run)
            t.daemon = True
            t.start()
        self.store()


if __name__ == '__main__':
    a=a3_6('https://en.wikipedia.org/wiki/Main_Page', 5)
    a.op()

编辑:我编辑了代码,现在我得到了正确的结果,但还是没有结束。

我找到了解决方案。我接受了詹姆斯·哈里森的帮助。我不知道他为什么删除了他原来的解决方案,但这里是

import urllib.request, threading
from queue import Queue
from bs4 import BeautifulSoup
from sys import exit
from a3_3 import store_to_db

class a3_5:

    __url_q = Queue(100)
    __html_q = Queue()
    __data_q = Queue()
    __visited_urls=[]

    def gethtml(self,url):
        try:
            req=urllib.request.Request(url)
            html=urllib.request.urlopen(req).read()
            self.__html_q.put(html)
            pars=urlparse(url)
        except urllib.error.URLError as e:
            print(e.reason+':'+url)
        except:
            print("invalid: " + url)

    def mine_thread(self):
        while True:
            if not self.__html_q.empty():
                soup = BeautifulSoup(self.__html_q.get(),"html.parser")
                for a in soup.find_all('a', href=True):
                    link=a.get('href')
                    """if not link.startswith('www'):
                        link=self.__prfx+link"""
                    if link not in self.__visited_urls:
                        self.__url_q.put(link)
                        self.__data_q.put(link)
            else:
                break

    def store(self):
        while True:
            if not self.__data_q.empty():
                cont=self.__data_q.get()
                print (cont)
            else:
                break

    def download_thread(self):
        while True:
            if not self.__url_q.empty():
                self.gethtml(self.__url_q.get())
                self.__url_q.task_done()

    def op(self,*urls):
        for x in range(25):
            d = threading.Thread(target=self.download_thread)
            d.setDaemon(True)
            d.start()
        for url in urls:
            self.__url_q.put(url)
        self.__url_q.join()
        self.mine_thread()
        self.store()

if __name__ == '__main__':
    urls=['https://en.wikipedia.org/wiki/Bajirao']#,'https://en.wikipedia.org/wiki/Malharrao_Holkar','https://en.wikipedia.org/wiki/Ranoji_Scindia']
    a=a3_5()
    a.op(*urls)

基本上我必须安排另一个队列,我必须在其中设置工作人员以激活线程。此外,mine_thread 和存储方法需要在 download_thread 方法完成后启动,因为值不会被存储。