webscraping 公交车站 beautifulsoup

webscraping bus stops with beautifulsoup

我正在尝试通过网络抓取给定线路的公交车站名称,这是第 212 行 https://www.m2.rozkladzik.pl/warszawa/rozklad_jazdy.html?l=212 的示例页面。我想输出两个列表,一个是一个方向的公交车站名称,另一个是另一个方向的列表。 (在网页上看得很清楚)。我设法用

将所有名字都放在一个列表中
import requests
from bs4 import BeautifulSoup


def download_bus_schedule(bus_number):
    URL = "http://www.m2.rozkladzik.pl/warszawa/rozklad_jazdy.html?l=" + bus_number
    r = requests.get(URL)
    soup = BeautifulSoup(r.content,
                         'html5lib')
    print(soup.prettify())
    all_bus_stops = []
    table = soup.find_all('a')
    for element in table:
        if element.get_text() in all_bus_stops:
            continue
        else:
            all_bus_stops.append(element.get_text())
    return all_bus_stops

print(download_bus_schedule('212'))

我想解决办法是以某种方式将汤分成两部分。

我可能误解了,因为我不懂波兰语,但看看这是否有帮助。

from bs4 import BeautifulSoup
import requests

url = 'https://www.m2.rozkladzik.pl/warszawa/rozklad_jazdy.html?l=212'

resp = requests.get(url)
soup = BeautifulSoup(resp.content, "html.parser")

d = {}
for h2 in soup.select('h2.holo-divider'):
    d[h2.text] = []
    ul = h2.next_sibling
    for li in ul.select('li'):
        if li.a.text not in d[h2.text]:
            d[h2.text].append(li.a.text)

from pprint import pprint

pprint(d)
import requests
from bs4 import BeautifulSoup


def download_bus_schedule(bus_number):
    URL = "http://www.m2.rozkladzik.pl/warszawa/rozklad_jazdy.html?l=" + bus_number
    r = requests.get(URL)
    soup = BeautifulSoup(r.content,
                         'html5lib')

    bus_stops_1 = []
    bus_stops_2 = []

    directions = soup.find_all("ul", {"class":"holo-list"})
    
    for stop in directions[0].find_all("a"):
        if stop not in bus_stops_1:
            bus_stops_1.append(stop.text.strip())

    for stop in directions[1].find_all("a"):
        if stop not in bus_stops_2:
            bus_stops_2.append(stop.text.strip())
    
    all_bus_stops = (bus_stops_1, bus_stops_2)

    return all_bus_stops

print(download_bus_schedule('212')[0])
print(download_bus_schedule('212')[1])

您可以使用bs4.element.Tag.findAll方法:

import requests
from bs4 import BeautifulSoup


def download_bus_schedule(bus_number):
    all_bus_stops = []
    URL = "http://www.m2.rozkladzik.pl/warszawa/rozklad_jazdy.html?l=" + bus_number
    r = requests.get(URL)
    soup = BeautifulSoup(r.content, 'html.parser')
    for s in soup.select(".holo-list"):
        bus_stops = []
        for f in s.findAll("li"):
            if f.text not in bus_stops:
                bus_stops.append(f.text)
        all_bus_stops.append(bus_stops)
    return all_bus_stops

print(download_bus_schedule('212'))

输出:

[['Pl.Hallera', 'Pl.Hallera', 'Darwina', 'Namysłowska', 'Rondo Żaba', 'Rogowska', 'Kołowa', 'Dks Targówek', 'Metro Targówek Mieszkaniowy', 'Myszkowska', 'Handlowa', 'Metro Trocka', 'Bieżuńska', 'Jórskiego', 'Łokietka', 'Samarytanka', 'Rolanda', 'Żuromińska', 'Targówek-Ratusz', 'Św.Wincentego', 'Malborska', 'Ch Targówek'], 
 ['Ch Targówek', 'Ch Targówek', 'Malborska', 'Św.Wincentego', 'Targówek-Ratusz', 'Żuromińska', 'Gilarska', 'Rolanda', 'Samarytanka', 'Łokietka', 'Jórskiego', 'Bieżuńska', 'Metro Trocka', 'Metro Trocka', 'Metro Trocka', 'Handlowa', 'Myszkowska', 'Metro Targówek Mieszkaniowy', 'Dks Targówek', 'Kołowa', 'Rogowska', 'Rondo Żaba', '11 Listopada', 'Bródnowska', 'Szymanowskiego', 'Pl.Hallera', 'Pl.Hallera']]

由于所有站点都封装在下一个无序列表中,您可以使用bs4的find_next功能。 例如

URL = f"http://www.m2.rozkladzik.pl/warszawa/rozklad_jazdy.html?l={bus_number}"
r = requests.get(URL)
soup = BeautifulSoup(r.content,
                  'html5lib')
directions = ["Ch Targówek","Pl.Hallera"]
result = {}
for direction in directions:
  header = soup.find(text=direction)
  list = header.find_next("ul")
  stops_names = [stop.get_text() for stop in list]
  result[direction] = stops_names

return result 

此外,您可能希望使用 f-string 来格式化您的字符串,因为它可以提高阅读能力并且不易出错。