网络抓取无法同时​​获取页面中所有链接的数据

web scraping can't get data of all links in page at same time

从某一天开始,我将尝试使用其描述页面从 vesselfinder 中抓取所有船只数据,就像从描述页面中一样,我想要它的信息,例如 table 形式的船只类型、Imo 编号等。我尝试了不同的方法来做到这一点,但仍然有很多错误。首先,我发现我如何通过这些链接到它的描述页面,如何从所有页面获取所有这些链接,以及如何从它的描述页面获取特定的 table 数据(这仍然不完整但得到了一些). 但是今天我尝试同时从所有链接及其描述页面获取数据,它给了我很多错误让我很困惑(通过组合代码)。 我附上了我的代码,这不是很好,但到此为止 #print(len(vessellist)) 它在那之后工作......错误..

import requests
from bs4 import BeautifulSoup
import pandas as pd

headers = {
    'user-agent': 'Mozilla/5.0',
    'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',
}

baseurl = 'https://www.vesselfinder.com/vessels'

vessellist = []
for x in range(1,6):
    response = requests.get(
        f'https://www.vesselfinder.com/vessels?page={x}', 
        headers=headers)

    soup = BeautifulSoup(response.content, 'html.parser')
    contents = soup.find_all('td', class_='v2')

    for property in contents:
        for item in property.find_all('a', href=True):
            vessellist.append(baseurl + item['href'])

 
for link in vessellist:
      response = requests.get(link, headers=headers)

     soup = BeautifulSoup(response.content, 'html.parser')
     table = soup.find('table', class_ = 'tparams')

     head = []
     for i in table.find_all('td', class_ = 'n3'):
         title = i.text
         head.append(title)

    values =[]
    for row in table.find_all('td', class_ = 'v3'):
        data = row.text
        values.append(data)

    df = pd.DataFrame(values)
    print(df)

两个步骤:获取摘要数据(包括 href)。接下来获取详细信息。这两个步骤在两个函数中实现。在这里我得到前 10 页,还有 200 页可用。

import requests as rq
from bs4 import BeautifulSoup as bs
from requests.api import head

headers = {"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0"}


def getSummaryData():
    data = []
    url = "https://www.vesselfinder.com/vessels"
    for page in range(1, 10+1, 1): # only 200 first pages autorized ?
        print("Page : %d/10" % page)
        resp = rq.get(url + "?page=%s" % page, headers=headers)
        soup = bs(resp.content, "lxml")
        section = soup.find_all('section', {'class', 'listing'})[0]
        tbody = section.find_all('tbody')[0]
        trs = tbody.find_all('tr')
        for tr in trs:
            tds = tr.find_all('td')
            # column 1 data
            sub = tds[1].find('a')
            href = sub['href']
            divs = sub.find_all('div')
            country = divs[0]['title']
            sub_divs = divs[1].find_all('div')
            vessel_name = sub_divs[0].text
            vessel_type = sub_divs[1].text
            # column 2 data
            build_year = tds[2].text
            # column 3 data
            gt = tds[3].text
            # column 4 data
            dwt = tds[4].text
            # column 5 data
            size = tds[5].text
            # save data
            tr_data = {'country': country, 
                       'vessel_name': vessel_name,
                       'vessel_type': vessel_type,
                       'build_year': build_year,
                       'gt': gt,
                       'dwt': dwt,
                       'size': size,
                       'href': href}
            data.append(tr_data)
    return data

def getDetailledData(data):
    for (iel, el) in enumerate(data):
        print("%d/%d" % (iel+1, len(data)))
        url = "https://www.vesselfinder.com" + el['href']
        # make get call
        resp = rq.get(url, headers=headers)
        soup = bs(resp.content, "lxml")
        
        # position and voyage data
        table = soup.find_all('table', {'class', 'aparams'})[0]
        trs = table.find_all('tr')
        labels = ["course_speed", "current_draught","navigation_status",
        "position_received", "IMO_MMSI", "callsign", "flag", "length_beam"]
        for (i, tr) in enumerate(trs):
            td = tr.find_all('td')[1]
            el.update({'%s' % labels[i]: td.text})

        # vessel particulars
        table = soup.find_all('table', {'class', 'tparams'})[0]
        trs = table.find_all('tr')
        labels = ["IMO_number", "vessel_name", "ship_type", "flag", 
        "homeport", "gross_tonnage", "summer_deadweight_t", 
        "length_overall_m", "beam_m", "draught_m", "year_of_built", 
        "builder", "place_of_built", "yard", "TEU", "crude", "grain", 
        "bale", "classification_society", "registered_owner", "manager"]

        for (i, tr) in enumerate(trs):
            td = tr.find_all('td')[1]
            el.update({'%s' % labels[i]: td.text})

        #break
    return data

调用这些函数:

data = getSummaryData() # href include
data = getDetailledData(data)

不要依赖 'class' 标记来定位数据。通常,您需要通过 table -> tbody 然后获取 tds 或 trs 以确保这是正确的。