使用 xpath 和 LXML 或 selenium 从 HTML table 抓取数据
Scraping data from HTML table usin xpath and LXML or selenium
我需要从这个网站的 HTML table 中提取数据:
https://1x2.lucksport.com/result_en.shtml?dt=2019-04-12&cid=156
我使用 Python、selenium 和 lxml 以及 xpath
我想提取每场比赛的赔率
问题是每场比赛都在 2 行
二:tr class="dtd2",然后来二:tr class="dtd1"
我需要允许提取第一行及其后续行的 xpath
driver.get(u)
t = html.fromstring(driver.page_source)
for i in t.xpath('//*[@id="odds_tb"]/table/tbody/tr[@class="dtd2"]/td[1]/text()'):
您可以同时使用 selenium
和 pandas
来获取 table 信息。
from selenium import webdriver
import time
import pandas as pd
driver = webdriver.Chrome()
driver.get("https://1x2.lucksport.com/result_en.shtml?dt=2019-04-12&cid=156")
time.sleep(3)
htmlcontent=driver.page_source
tables=pd.read_html(htmlcontent)
print(tables[14])
您似乎想要迭代奇数 tr,然后包含 "next" tr。
在 css 中看起来像:
.dtd1:nth-child(odd),.dtd2:nth-child(odd)
你也可以用 xpath 获得赔率,只需添加:
[position() mod 2 = 1]
更详细的方法
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup as bs
import pandas as pd
import copy
d = webdriver.Chrome()
d.get('https://1x2.lucksport.com/result_en.shtml?dt=2019-04-12&cid=156')
WebDriverWait(d, 20).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "#odds_tb tr[class]")))
soup = bs(d.page_source, 'lxml')
rows = soup.select('#odds_tb tr[class]')
results = []
i = 1
headers = ['Competition', 'Date', 'Match' ,'OddsType', 'Home Win', 'Draw', 'Away Win', 'Result']
for row in rows[1:]:
cols = [td.text for td in row.select('td')]
if (i % 2 == 1):
record = {'Competition' : cols[0],
'Date' : cols[1],
'Match' : ' v '.join([cols[2], cols[6]]),
'OddsType' : 'average early odds',
'Home Win' : cols[3],
'Draw' : cols[4],
'Away Win' : cols[5],
'Result' : cols[7]}
else:
record['OddsType'] = 'average live odds'
record['Home Win'] = cols[0]
record['Draw'] = cols[1]
record['Away Win'] = cols[2]
results.append(copy.deepcopy(record))
i+=1
df = pd.DataFrame(results, columns = headers)
df.to_csv(r'C:\Users\User\Desktop\data.csv', sep=',', encoding='utf-8-sig',index = False )
d.quit()
我需要从这个网站的 HTML table 中提取数据: https://1x2.lucksport.com/result_en.shtml?dt=2019-04-12&cid=156
我使用 Python、selenium 和 lxml 以及 xpath
我想提取每场比赛的赔率 问题是每场比赛都在 2 行 二:tr class="dtd2",然后来二:tr class="dtd1"
我需要允许提取第一行及其后续行的 xpath
driver.get(u)
t = html.fromstring(driver.page_source)
for i in t.xpath('//*[@id="odds_tb"]/table/tbody/tr[@class="dtd2"]/td[1]/text()'):
您可以同时使用 selenium
和 pandas
来获取 table 信息。
from selenium import webdriver
import time
import pandas as pd
driver = webdriver.Chrome()
driver.get("https://1x2.lucksport.com/result_en.shtml?dt=2019-04-12&cid=156")
time.sleep(3)
htmlcontent=driver.page_source
tables=pd.read_html(htmlcontent)
print(tables[14])
您似乎想要迭代奇数 tr,然后包含 "next" tr。 在 css 中看起来像:
.dtd1:nth-child(odd),.dtd2:nth-child(odd)
你也可以用 xpath 获得赔率,只需添加:
[position() mod 2 = 1]
更详细的方法
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup as bs
import pandas as pd
import copy
d = webdriver.Chrome()
d.get('https://1x2.lucksport.com/result_en.shtml?dt=2019-04-12&cid=156')
WebDriverWait(d, 20).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "#odds_tb tr[class]")))
soup = bs(d.page_source, 'lxml')
rows = soup.select('#odds_tb tr[class]')
results = []
i = 1
headers = ['Competition', 'Date', 'Match' ,'OddsType', 'Home Win', 'Draw', 'Away Win', 'Result']
for row in rows[1:]:
cols = [td.text for td in row.select('td')]
if (i % 2 == 1):
record = {'Competition' : cols[0],
'Date' : cols[1],
'Match' : ' v '.join([cols[2], cols[6]]),
'OddsType' : 'average early odds',
'Home Win' : cols[3],
'Draw' : cols[4],
'Away Win' : cols[5],
'Result' : cols[7]}
else:
record['OddsType'] = 'average live odds'
record['Home Win'] = cols[0]
record['Draw'] = cols[1]
record['Away Win'] = cols[2]
results.append(copy.deepcopy(record))
i+=1
df = pd.DataFrame(results, columns = headers)
df.to_csv(r'C:\Users\User\Desktop\data.csv', sep=',', encoding='utf-8-sig',index = False )
d.quit()