网络抓取后将文本写入 csv
Writing text to csv after web-scraping
我正在 python 中通过抓取来提取房地产数据。我希望这些数据在 CSV 文件中。
如果第一个抓取的项目没有我需要的值,当我将数据写入 CSV 时,它只是跳过所有行(但其他项目具有该值),这是 null 并且不创建任何行,甚至没有空值。
我的网页抓取代码块:
from selenium import webdriver
from bs4 import BeautifulSoup
import re
import csv
import time
PATH = 'C:\Program Files (x86)\chromedriver.exe'
driver = webdriver.Chrome(PATH)
data = []
def get_dl(soup):
d_list = {}
for dl in soup.findAll("dl", {"class": "obj-details"}):
for el in dl.find_all(["dt", "dd"]):
if el.name == 'dt':
key = el.get_text(strip=True)
elif key in ['Plotas:', 'Buto numeris:', 'Metai:', 'Namo numeris:', 'Kambarių sk.:', 'Aukštas:', 'Aukštų sk.:', 'Pastato tipas:', 'Šildymas:', 'Įrengimas:', 'Pastato energijos suvartojimo klasė:', 'Ypatybės:', 'Papildomos patalpos:', 'Papildoma įranga:', 'Apsauga:']:
d_list[key] = ' '.join(el.text.strip().replace("\n", ", ").split('NAUDINGA')[0].split('m²')[0].split())
return d_list
for puslapis in range(1, 2):
driver.get(f'https://www.aruodas.lt/butai/kaune/puslapis/{puslapis}')
response = driver.page_source
soup = BeautifulSoup(response, 'html.parser')
blocks = soup.find_all('tr', class_='list-row')
stored_urls = []
for url in blocks:
try:
stored_urls.append(url.a['href'])
except:
pass
for link in stored_urls:
driver.get(link)
response = driver.page_source
soup = BeautifulSoup(response, 'html.parser')
h1 = soup.find('h1', 'obj-header-text')
price = soup.find('div', class_ = 'price-left')
try:
address1 = h1.get_text(strip=True)
address2 = re.findall(r'(.*),[^,]*$', address1)
address = ''.join(address2)
city, district, street = address.split(',')
except:
city, district, street = 'NaN'
try:
full_price = price.find('span', class_ = 'price-eur').text.strip()
full_price1 = full_price.replace('€', '').replace(' ','').strip()
except:
full_price1 = 'NaN'
try:
price_sq_m = price.find('span', class_ = 'price-per').text.strip()
price_sq_m1 = price_sq_m.replace('€/m²)', '').replace('(domina keitimas)', '').replace('(', '').replace(' ','').strip()
except:
price_sq_m1 = 'NaN'
try:
price_change = price.find('div', class_ = 'price-change').text.strip()
price_change1 = price_change.replace('%', '').strip()
except:
price_change1 = 'NaN'
data.append({'city': city, 'district': district, 'street': street, 'full_price': full_price1, 'price_sq_m': price_sq_m1, 'price_change': price_change1, **get_dl(soup)})
例如在键列表中有值:
['Ypatybės:']:
但是在页面中,我正在抓取第一个公寓的地方没有那个值,根本就没有创建行,这不是我需要的。
写入csv的代码块:
with open('output_kaunas.csv', 'w', encoding='utf-8', newline='') as f_output:
csv_output = csv.DictWriter(f_output, fieldnames=data[0].keys(), extrasaction='ignore')
csv_output.writeheader()
csv_output.writerows(data)
所以,我的问题是,如何创建具有我需要的功能的行,即使该功能在第一个抓取的项目中不存在。
要将数据存储在 csv 文件中,您可以使用 pandas Dataframe
df = pd.DataFrame(data).to_csv('output_kaunas.csv',index=False)
根据您的完整代码:
from selenium import webdriver
from bs4 import BeautifulSoup
import re
import pandas as pd
import time
PATH = 'C:\Program Files (x86)\chromedriver.exe'
driver = webdriver.Chrome(PATH)
data = []
def get_dl(soup):
d_list = {}
for dl in soup.findAll("dl", {"class": "obj-details"}):
for el in dl.find_all(["dt", "dd"]):
if el.name == 'dt':
key = el.get_text(strip=True)
elif key in ['Plotas:', 'Buto numeris:', 'Metai:', 'Namo numeris:', 'Kambarių sk.:', 'Aukštas:', 'Aukštų sk.:', 'Pastato tipas:', 'Šildymas:', 'Įrengimas:', 'Pastato energijos suvartojimo klasė:', 'Ypatybės:', 'Papildomos patalpos:', 'Papildoma įranga:', 'Apsauga:']:
d_list[key] = ' '.join(el.text.strip().replace("\n", ", ").split('NAUDINGA')[0].split('m²')[0].split())
return d_list
for puslapis in range(1, 2):
driver.get(f'https://www.aruodas.lt/butai/kaune/puslapis/{puslapis}')
response = driver.page_source
soup = BeautifulSoup(response, 'html.parser')
blocks = soup.find_all('tr', class_='list-row')
stored_urls = []
for url in blocks:
try:
stored_urls.append(url.a['href'])
except:
pass
for link in stored_urls:
driver.get(link)
response = driver.page_source
soup = BeautifulSoup(response, 'html.parser')
h1 = soup.find('h1', 'obj-header-text')
price = soup.find('div', class_ = 'price-left')
try:
address1 = h1.get_text(strip=True)
address2 = re.findall(r'(.*),[^,]*$', address1)
address = ''.join(address2)
city, district, street = address.split(',')
except:
city, district, street = 'NaN'
try:
full_price = price.find('span', class_ = 'price-eur').text.strip()
full_price1 = full_price.replace('€', '').replace(' ','').strip()
except:
full_price1 = 'NaN'
try:
price_sq_m = price.find('span', class_ = 'price-per').text.strip()
price_sq_m1 = price_sq_m.replace('€/m²)', '').replace('(domina keitimas)', '').replace('(', '').replace(' ','').strip()
except:
price_sq_m1 = 'NaN'
try:
price_change = price.find('div', class_ = 'price-change').text.strip()
price_change1 = price_change.replace('%', '').strip()
except:
price_change1 = 'NaN'
data.append({'city': city, 'district': district, 'street': street, 'full_price': full_price1, 'price_sq_m': price_sq_m1, 'price_change': price_change1, **get_dl(soup)})
df = pd.DataFrame(data).to_csv('output_kaunas.csv',index=False)
我正在 python 中通过抓取来提取房地产数据。我希望这些数据在 CSV 文件中。
如果第一个抓取的项目没有我需要的值,当我将数据写入 CSV 时,它只是跳过所有行(但其他项目具有该值),这是 null 并且不创建任何行,甚至没有空值。
我的网页抓取代码块:
from selenium import webdriver
from bs4 import BeautifulSoup
import re
import csv
import time
PATH = 'C:\Program Files (x86)\chromedriver.exe'
driver = webdriver.Chrome(PATH)
data = []
def get_dl(soup):
d_list = {}
for dl in soup.findAll("dl", {"class": "obj-details"}):
for el in dl.find_all(["dt", "dd"]):
if el.name == 'dt':
key = el.get_text(strip=True)
elif key in ['Plotas:', 'Buto numeris:', 'Metai:', 'Namo numeris:', 'Kambarių sk.:', 'Aukštas:', 'Aukštų sk.:', 'Pastato tipas:', 'Šildymas:', 'Įrengimas:', 'Pastato energijos suvartojimo klasė:', 'Ypatybės:', 'Papildomos patalpos:', 'Papildoma įranga:', 'Apsauga:']:
d_list[key] = ' '.join(el.text.strip().replace("\n", ", ").split('NAUDINGA')[0].split('m²')[0].split())
return d_list
for puslapis in range(1, 2):
driver.get(f'https://www.aruodas.lt/butai/kaune/puslapis/{puslapis}')
response = driver.page_source
soup = BeautifulSoup(response, 'html.parser')
blocks = soup.find_all('tr', class_='list-row')
stored_urls = []
for url in blocks:
try:
stored_urls.append(url.a['href'])
except:
pass
for link in stored_urls:
driver.get(link)
response = driver.page_source
soup = BeautifulSoup(response, 'html.parser')
h1 = soup.find('h1', 'obj-header-text')
price = soup.find('div', class_ = 'price-left')
try:
address1 = h1.get_text(strip=True)
address2 = re.findall(r'(.*),[^,]*$', address1)
address = ''.join(address2)
city, district, street = address.split(',')
except:
city, district, street = 'NaN'
try:
full_price = price.find('span', class_ = 'price-eur').text.strip()
full_price1 = full_price.replace('€', '').replace(' ','').strip()
except:
full_price1 = 'NaN'
try:
price_sq_m = price.find('span', class_ = 'price-per').text.strip()
price_sq_m1 = price_sq_m.replace('€/m²)', '').replace('(domina keitimas)', '').replace('(', '').replace(' ','').strip()
except:
price_sq_m1 = 'NaN'
try:
price_change = price.find('div', class_ = 'price-change').text.strip()
price_change1 = price_change.replace('%', '').strip()
except:
price_change1 = 'NaN'
data.append({'city': city, 'district': district, 'street': street, 'full_price': full_price1, 'price_sq_m': price_sq_m1, 'price_change': price_change1, **get_dl(soup)})
例如在键列表中有值:
['Ypatybės:']:
但是在页面中,我正在抓取第一个公寓的地方没有那个值,根本就没有创建行,这不是我需要的。
写入csv的代码块:
with open('output_kaunas.csv', 'w', encoding='utf-8', newline='') as f_output:
csv_output = csv.DictWriter(f_output, fieldnames=data[0].keys(), extrasaction='ignore')
csv_output.writeheader()
csv_output.writerows(data)
所以,我的问题是,如何创建具有我需要的功能的行,即使该功能在第一个抓取的项目中不存在。
要将数据存储在 csv 文件中,您可以使用 pandas Dataframe
df = pd.DataFrame(data).to_csv('output_kaunas.csv',index=False)
根据您的完整代码:
from selenium import webdriver
from bs4 import BeautifulSoup
import re
import pandas as pd
import time
PATH = 'C:\Program Files (x86)\chromedriver.exe'
driver = webdriver.Chrome(PATH)
data = []
def get_dl(soup):
d_list = {}
for dl in soup.findAll("dl", {"class": "obj-details"}):
for el in dl.find_all(["dt", "dd"]):
if el.name == 'dt':
key = el.get_text(strip=True)
elif key in ['Plotas:', 'Buto numeris:', 'Metai:', 'Namo numeris:', 'Kambarių sk.:', 'Aukštas:', 'Aukštų sk.:', 'Pastato tipas:', 'Šildymas:', 'Įrengimas:', 'Pastato energijos suvartojimo klasė:', 'Ypatybės:', 'Papildomos patalpos:', 'Papildoma įranga:', 'Apsauga:']:
d_list[key] = ' '.join(el.text.strip().replace("\n", ", ").split('NAUDINGA')[0].split('m²')[0].split())
return d_list
for puslapis in range(1, 2):
driver.get(f'https://www.aruodas.lt/butai/kaune/puslapis/{puslapis}')
response = driver.page_source
soup = BeautifulSoup(response, 'html.parser')
blocks = soup.find_all('tr', class_='list-row')
stored_urls = []
for url in blocks:
try:
stored_urls.append(url.a['href'])
except:
pass
for link in stored_urls:
driver.get(link)
response = driver.page_source
soup = BeautifulSoup(response, 'html.parser')
h1 = soup.find('h1', 'obj-header-text')
price = soup.find('div', class_ = 'price-left')
try:
address1 = h1.get_text(strip=True)
address2 = re.findall(r'(.*),[^,]*$', address1)
address = ''.join(address2)
city, district, street = address.split(',')
except:
city, district, street = 'NaN'
try:
full_price = price.find('span', class_ = 'price-eur').text.strip()
full_price1 = full_price.replace('€', '').replace(' ','').strip()
except:
full_price1 = 'NaN'
try:
price_sq_m = price.find('span', class_ = 'price-per').text.strip()
price_sq_m1 = price_sq_m.replace('€/m²)', '').replace('(domina keitimas)', '').replace('(', '').replace(' ','').strip()
except:
price_sq_m1 = 'NaN'
try:
price_change = price.find('div', class_ = 'price-change').text.strip()
price_change1 = price_change.replace('%', '').strip()
except:
price_change1 = 'NaN'
data.append({'city': city, 'district': district, 'street': street, 'full_price': full_price1, 'price_sq_m': price_sq_m1, 'price_change': price_change1, **get_dl(soup)})
df = pd.DataFrame(data).to_csv('output_kaunas.csv',index=False)