如何使用 python selenium 从 Aliexpress 的每个产品页面抓取数据

How to scrape data from each product page from Aliexpress using python selenium

我正在尝试从该网站抓取每个产品页面:https://www.aliexpress.com/wholesale?catId=0&initiative_id=SB_20220315022920&SearchText=bluetooth+earphones

特别是我想得到我在照片中提到的评论和客户国家: enter image description here

主要问题是我的代码没有检查正确的元素,这是我一直在努力解决的问题。 首先,我尝试对这个产品进行抓取:https://www.aliexpress.com/item/1005003801507855.html?spm=a2g0o.productlist.0.0.1e951bc72xISfE&algo_pvid=6d3ed61e-f378-43d0-a429-5f6cddf3d6ad&algo_exp_id=6d3ed61e-f378-43d0-a429-5f6cddf3d6ad-8&pdp_ext_f=%7B%22sku_id%22%3A%2212000027213624098%22%7D&pdp_pi=-1%3B40.81%3B-1%3B-1%40salePrice%3BMAD%3Bsearch-mainSearch

这是我的代码:

from selenium import webdriver
from selenium.webdriver.common.by import By  
from lxml import html 
import cssselect
from time import sleep
from itertools import zip_longest
import csv

driver = webdriver.Edge(executable_path=r"C:/Users/OUISSAL/Desktop/wscraping/XEW/scraping/codes/msedgedriver")
url = "https://www.aliexpress.com/item/1005003801507855.html?spm=a2g0o.productlist.0.0.1e951bc72xISfE&algo_pvid=6d3ed61e-f378-43d0-a429-5f6cddf3d6ad&algo_exp_id=6d3ed61e-f378-43d0-a429-5f6cddf3d6ad-8&pdp_ext_f=%7B%22sku_id%22%3A%2212000027213624098%22%7D&pdp_pi=-1%3B40.81%3B-1%3B-1%40salePrice%3BMAD%3Bsearch-mainSearch"

with open ("data.csv", "w", encoding="utf-8") as csvfile:
    wr = csv.writer(csvfile)
    wr.writerow(["Comment","Custumer country"])

driver.get(url)
driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')


review_buttom = driver.find_element_by_xpath('//li[@ae_button_type="tab_feedback"]')
review_buttom.click()


html_source = driver.find_element_by_xpath('//div[@id="transction-feedback"]')
tree = html.fromstring(html_source)

#tree = html.fromstring(driver.page_source)
for rvw in tree.xpath('//div[@class="feedback-item clearfix"]'):
    country = rvw.xpath('//div[@class="user-country"]//b/text()')
    if country:
        country = country[0]
    else:
        country = ''
    print('country:', country)
    
    comment = rvw.xpath('//dt[@id="buyer-feedback"]//span/text()')
    if comment:
        comment = comment[0]
    else:
        comment = ''
    print('comment:', comment)
    
driver.close()

谢谢!!

会发生什么?

有一个主要问题,您要查找的反馈在 iframe 中,因此您无法通过直接调用元素来获取信息。

如何修复?

滚动到包含 iframe 的元素视图导航到其源并与其分页交互以获得所有反馈。

例子

from selenium import webdriver
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd

url = 'https://www.aliexpress.com/item/1005003801507855.html?spm=a2g0o.productlist.0.0.1e951bc72xISfE&algo_pvid=6d3ed61e-f378-43d0-a429-5f6cddf3d6ad&algo_exp_id=6d3ed61e-f378-43d0-a429-5f6cddf3d6ad-8&pdp_ext_f=%7B%22sku_id%22%3A%2212000027213624098%22%7D&pdp_pi=-1%3B40.81%3B-1%3B-1%40salePrice%3BMAD%3Bsearch-mainSearch'

driver = webdriver.Chrome(ChromeDriverManager().install())
driver.maximize_window()
driver.get(url)

wait = WebDriverWait(driver, 10)

driver.execute_script("arguments[0].scrollIntoView();", wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '.tab-content'))))
driver.get(wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#product-evaluation'))).get_attribute('src'))

data=[]

while True:

    for e in driver.find_elements(By.CSS_SELECTOR, 'div.feedback-item'):

        try:
            country = e.find_element(By.CSS_SELECTOR, '.user-country > b').text
        except:
            country = None

        try:
            comment = e.find_element(By.CSS_SELECTOR, '.buyer-feedback span').text
        except:
            comment = None

        data.append({
            'country':country,
            'comment':comment
        })
    try:
        wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '#complex-pager a.ui-pagination-next'))).click()
    except:
        break

pd.DataFrame(data).to_csv('filename.csv',index=False)