如何知道 beautifulsoup 中 web scrape 中的最后页码网站?
How to know the last page number website in web scrape in beautifulsoup?
我正在从 flipkart 抓取数据,我想在其中抓取所有产品的名称、价格和评级。所以我想从所有页面上抓取所有必需的信息。
link 共有 11 页:
https://www.flipkart.com/mobiles/mi~brand/pr?sid=tyy%2C4io&otracker=nmenu_sub_Electronics_0_Mi
那么我怎样才能循环直到我到达页面的末尾,即直到第 11 页编号。
from bs4 import BeautifulSoup
import requests
from itertools import zip_longest
def mxnum():
r = requests.get(
"https://www.flipkart.com/mobiles/mi~brand/pr?sid=tyy%2C4io&otracker=nmenu_sub_Electronics_0_Mi")
soup = BeautifulSoup(r.text, 'html.parser')
for item in soup.findAll("div", {'class': '_2zg3yZ'}):
mxnum = list(item.strings)[0].split(" ")[-1]
return int(mxnum) + 1
mxnum = mxnum()
def Parse():
with requests.Session() as req:
names = []
prices = []
rating = []
for num in range(1, mxnum):
print(f"Extracting Page# {num}")
r = req.get(
f"https://www.flipkart.com/mobiles/mi~brand/pr?sid=tyy%2C4io&otracker=nmenu_sub_Electronics_0_Mi&page={num}")
soup = BeautifulSoup(r.text, 'html.parser')
for name in soup.find_all("div", {'class': '_3wU53n'}):
names.append(name.text)
for price in soup.find_all("div", {'class': '_1vC4OE _2rQ-NK'}):
prices.append(price.text[1:])
for rate in soup.find_all("div", {'class': 'hGSR34'}):
rating.append(rate.text)
for a, b, c in zip_longest(names, prices, rating):
print("Name: {}, Price: {}, Rate: {}".format(a, b, c))
Parse()
第 1 页到第 11 页的 url 定义为:
https://www.flipkart.com/mobiles/mi~brand/pr?sid=tyy%2C4io&otracker=nmenu_sub_Electronics_0_Mi&page={n}
where n is from 1 to 11
因此您可以创建一个循环,其中 n=1 到 11,并用循环中的当前值替换 n。
我正在从 flipkart 抓取数据,我想在其中抓取所有产品的名称、价格和评级。所以我想从所有页面上抓取所有必需的信息。 link 共有 11 页: https://www.flipkart.com/mobiles/mi~brand/pr?sid=tyy%2C4io&otracker=nmenu_sub_Electronics_0_Mi 那么我怎样才能循环直到我到达页面的末尾,即直到第 11 页编号。
from bs4 import BeautifulSoup
import requests
from itertools import zip_longest
def mxnum():
r = requests.get(
"https://www.flipkart.com/mobiles/mi~brand/pr?sid=tyy%2C4io&otracker=nmenu_sub_Electronics_0_Mi")
soup = BeautifulSoup(r.text, 'html.parser')
for item in soup.findAll("div", {'class': '_2zg3yZ'}):
mxnum = list(item.strings)[0].split(" ")[-1]
return int(mxnum) + 1
mxnum = mxnum()
def Parse():
with requests.Session() as req:
names = []
prices = []
rating = []
for num in range(1, mxnum):
print(f"Extracting Page# {num}")
r = req.get(
f"https://www.flipkart.com/mobiles/mi~brand/pr?sid=tyy%2C4io&otracker=nmenu_sub_Electronics_0_Mi&page={num}")
soup = BeautifulSoup(r.text, 'html.parser')
for name in soup.find_all("div", {'class': '_3wU53n'}):
names.append(name.text)
for price in soup.find_all("div", {'class': '_1vC4OE _2rQ-NK'}):
prices.append(price.text[1:])
for rate in soup.find_all("div", {'class': 'hGSR34'}):
rating.append(rate.text)
for a, b, c in zip_longest(names, prices, rating):
print("Name: {}, Price: {}, Rate: {}".format(a, b, c))
Parse()
第 1 页到第 11 页的 url 定义为:
https://www.flipkart.com/mobiles/mi~brand/pr?sid=tyy%2C4io&otracker=nmenu_sub_Electronics_0_Mi&page={n}
where n is from 1 to 11
因此您可以创建一个循环,其中 n=1 到 11,并用循环中的当前值替换 n。