不是从新闻列表中抓取时间、标题和内容,而是从封面和专栏封面新闻和侧边栏中抓取
Scrape time, title and content not from a news list, but from cover and column cover news and sidebar
我有一个 window,它有一个按钮可以将网站的新闻标题抓取到列表框中,然后我有另一个按钮可以抓取与所选标题相关的新闻内容并将它们显示在文本框中
目前在页面上我抓取了所有标题、时间表和内容,但我还想添加页面封面上的标题(以及相关内容)(this), and also all the titles (with the relative contents) that are on the right column of the page (this)。
容易的是右栏的标题总是有相同的唯一 html 名称,但是时间不是直接可见的,因为你必须打开 link 才能得到时间.
我试过使用这些,但没有成功
title_cover = " ".join(span.text for span in div.select("title absolute serif"))
title_sidebar = " ".join(span.text for span in div.select("bold"))
time_cover_and_sidebar = div.find('span', attrs={'class': 'upper'}).text
news_cover = f" {time_cover_and_sidebar} {place.upper()}, {title_cover} (TMW)"
news_sidebar = f" {time_cover_and_sidebar} {place.upper()}, {title_sidebar} (TMW)"
results_cover.append( [number, time_cover_and_sidebar, place, title_cover, news, link] )
results_sidebar.append( [number, time_cover_and_sidebar, place, title_sidebar, news, link])
我想要得到的输出与我使用已经提供的代码得到的输出相同(时间,place/name 团队,标题),不同之处在于我还想插入时间,封面和侧边栏新闻的标题和内容
完整的可执行代码:
import tkinter as tk # PEP8: `import *` is not preferred
from tkinter import ttk
from tkinter.scrolledtext import ScrolledText
import requests
import requests_cache
from bs4 import BeautifulSoup
import pandas as pd
# PEP8: all imports at the beginning
# --- functions --- # PEP8: all functions directly after imports
def get_data_for(place):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
}
results = []
response = requests.get(f'https://www.tuttomercatoweb.com/{place}/', headers=headers)
print('url:', response.url)
print('status:', response.status_code)
#print('html:', response.text[:1000])
soup = BeautifulSoup(response.content, 'html.parser')
news = soup.find_all('div', attrs={"class": "tcc-list-news"})
for number, each in enumerate(news):
for div in each.find_all("div"):
time = div.find('span', attrs={'class': 'hh serif'}).text
title = " ".join(span.text for span in div.select("a > span"))
news = f" {time} {place.upper()}, {title} (TMW)"
link = div.find('a')['href']
results.append( [number, time, place, title, news, link] )
return results
def all_titles():
global df
allnews = [] # local variable
for place in ['atalanta', 'bologna']:
print('search:', place)
results = get_data_for(place)
print('found:', len(results))
allnews += results
text_download.insert('end', f"search: {place}\nfound: {len(results)}\n")
df = pd.DataFrame(allnews, columns=['number', 'time', 'place', 'title', 'news', 'link'])
df = df.sort_values(by=['number', 'time', 'place', 'title'], ascending=[True, False, True, True])
df = df.reset_index()
listbox_title.delete('0', 'end')
for index, row in df.iterrows():
listbox_title.insert('end', row['news'])
def content(event=None): # `command=` executes without `event`, but `bind` executes with `event` - so it needs default value
# tuple
selection = listbox_title.curselection()
print('selection:', selection)
if selection:
item = df.iloc[selection[-1]]
#print('item:', item)
url = item['link']
#print('url:', url)
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
}
# keep page in database `SQLite`
# https://github.com/reclosedev/requests-cache
# https://sqlite.org/index.html
session = requests_cache.CachedSession('titles')
response = session.get(url, headers=headers)
#response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.content, 'html.parser')
content_download = "\n".join(item.get_text() for item in soup.select("div.text.mbottom"))
text_download.delete('1.0', 'end') # remove previous content)
text_download.insert('end', content_download)
# --- main ---
df = None
window = tk.Tk()
window.geometry("800x800")
# ---
# [Tkinter: How to display Listbox with Scrollbar — furas.pl](https://blog.furas.pl/python-tkitner-how-to-display-listbox-with-scrollbar-gb.html)
frame_title = tk.Frame(window)
frame_title.pack(fill='both', expand=True, pady=5, padx=5)
listbox_title = tk.Listbox(frame_title, selectbackground="#960000", selectforeground="white", bg="white")
listbox_title.pack(side='left', fill='both', expand=True)
scrollbar_title = tk.Scrollbar(frame_title)
scrollbar_title.pack(side='left', fill='y')
scrollbar_title['command'] = listbox_title.yview
listbox_title.config(yscrollcommand=scrollbar_title.set)
listbox_title.bind('<Double-Button-1>', content) # it executes `content(event)`
# ----
text_download = ScrolledText(window, bg="white")
text_download.pack(fill='both', expand=True, pady=0, padx=5)
# ----
buttons_frame = tk.Frame(window)
buttons_frame.pack(fill='x')
button1 = tk.Button(buttons_frame, text="View Titles", command=all_titles) # don't use `[]` to execute functions
button1.pack(side='left', pady=5, padx=5)
button2 = tk.Button(buttons_frame, text="View Content", command=content) # don't use `[]` to execute functions
button2.pack(side='left', pady=5, padx=(0,5))
window.mainloop()
将来,trim 减少您的 posts/question 以仅包含最少量的代码来回答问题。 Tkinter 并不是您想在此处修复的部分。
话虽这么说,但我只更新了与获取这些封面新闻和侧面新闻有关的代码。你是对的,你需要去link获取数据。然后您可以从 html 解析,或者他们确实在网站的 <script>
标签中提供了一个很好的 json 格式来获取它。
import tkinter as tk # PEP8: `import *` is not preferred
from tkinter import ttk
from tkinter.scrolledtext import ScrolledText
import requests
import requests_cache
from bs4 import BeautifulSoup
import pandas as pd
import re
import json
from dateutil import parser
import datetime
import locale
# PEP8: all imports at the beginning
# --- functions --- # PEP8: all functions directly after imports
def get_data_for(place):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
}
results = []
response = requests.get(f'https://www.tuttomercatoweb.com/{place}/', headers=headers)
print('url:', response.url)
print('status:', response.status_code)
#print('html:', response.text[:1000])
soup = BeautifulSoup(response.content, 'html.parser')
#Cover
cover_news = soup.find('div', {'class':'box pp'})
link = cover_news.find('a', href=True)['href']
coverNewsResponse = requests.get(link, headers=headers)
coverSoup = BeautifulSoup(coverNewsResponse.content, 'html.parser')
jsonStr = str(coverSoup.find('script'))
jsonStr = re.search('({.*})', jsonStr).group(1)
jsonData = json.loads(jsonStr)
timePublished = parser.parse(jsonData['datePublished']).strftime("%H:%M")
datePublished = parser.parse(jsonData['datePublished']).strftime("%Y-%m-%d")
title = jsonData['headline']
news = f" {timePublished} {place.upper()}, {title} (TMW)"
results.append( [datePublished, timePublished, place, title, news, link] )
# Side panel
side_news = soup.find_all('div', {'class':'box small'})
for each in side_news:
link = each.find('a', href=True)['href']
sideNewsResponse = requests.get(link, headers=headers)
sideSoup = BeautifulSoup(sideNewsResponse.content, 'html.parser')
jsonStr = str(sideSoup.find('script'))
jsonStr = re.search('({.*})', jsonStr).group(1)
jsonData = json.loads(jsonStr)
timePublished = parser.parse(jsonData['datePublished']).strftime("%H:%M")
datePublished = parser.parse(jsonData['datePublished']).strftime("%Y-%m-%d")
title = jsonData['headline']
news = f" {timePublished} {place.upper()}, {title} (TMW)"
results.append( [datePublished, timePublished, place, title, news, link] )
news = soup.find_all('div', attrs={"class": "tcc-list-news"})
for each in news:
for div in each.find_all("div"):
timePublished = div.find('span', attrs={'class': 'hh serif'}).text
datePublished = div.find_previous('div', {'class':'tcc-border upper date'})
if datePublished:
if datePublished.text in ['Ieri']:
yesterday = datetime.datetime.today() - datetime.timedelta(days = 1)
datePublished = yesterday.strftime("%Y-%m-%d")
else:
locale.setlocale(locale.LC_ALL, 'it')
currentYear = datetime.datetime.today().strftime("%Y")
dateStr = datePublished.text
dateStr = datetime.datetime.strptime(dateStr + ' ' + currentYear, '%A %d %B %Y')
datePublished = dateStr.strftime("%Y-%m-%d")
else:
datePublished = datetime.datetime.today().strftime("%Y-%m-%d")
title = " ".join(span.text for span in div.select("a > span"))
news = f" {timePublished} {place.upper()}, {title} (TMW)"
link = div.find('a')['href']
results.append( [datePublished, timePublished, place, title, news, link] )
return results
def all_titles():
global df
allnews = [] # local variable
for place in ['atalanta', 'bologna']:
print('search:', place)
results = get_data_for(place)
print('found:', len(results))
allnews += results
text_download.insert('end', f"search: {place}\nfound: {len(results)}\n")
df = pd.DataFrame(allnews, columns=['date', 'time', 'place', 'title', 'news', 'link'])
df = df.sort_values(by=['date', 'time', 'place', 'title'], ascending=[False, False, True, True])
df = df.drop_duplicates(subset=['date', 'time', 'place', 'title'])
df = df.reset_index(drop=True)
listbox_title.delete('0', 'end')
for index, row in df.iterrows():
listbox_title.insert('end', row['news'])
def content(event=None): # `command=` executes without `event`, but `bind` executes with `event` - so it needs default value
# tuple
selection = listbox_title.curselection()
print('selection:', selection)
if selection:
item = df.iloc[selection[-1]]
#print('item:', item)
url = item['link']
#print('url:', url)
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
}
# keep page in database `SQLite`
# https://github.com/reclosedev/requests-cache
# https://sqlite.org/index.html
session = requests_cache.CachedSession('titles')
response = session.get(url, headers=headers)
#response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.content, 'html.parser')
content_download = "\n".join(item.get_text() for item in soup.select("div.text.mbottom"))
text_download.delete('1.0', 'end') # remove previous content)
text_download.insert('end', content_download)
# --- main ---
df = None
window = tk.Tk()
window.geometry("800x800")
# ---
# [Tkinter: How to display Listbox with Scrollbar — furas.pl](https://blog.furas.pl/python-tkitner-how-to-display-listbox-with-scrollbar-gb.html)
frame_title = tk.Frame(window)
frame_title.pack(fill='both', expand=True, pady=5, padx=5)
listbox_title = tk.Listbox(frame_title, selectbackground="#960000", selectforeground="white", bg="white")
listbox_title.pack(side='left', fill='both', expand=True)
scrollbar_title = tk.Scrollbar(frame_title)
scrollbar_title.pack(side='left', fill='y')
scrollbar_title['command'] = listbox_title.yview
listbox_title.config(yscrollcommand=scrollbar_title.set)
listbox_title.bind('<Double-Button-1>', content) # it executes `content(event)`
# ----
text_download = ScrolledText(window, bg="white")
text_download.pack(fill='both', expand=True, pady=0, padx=5)
# ----
buttons_frame = tk.Frame(window)
buttons_frame.pack(fill='x')
button1 = tk.Button(buttons_frame, text="View Titles", command=all_titles) # don't use `[]` to execute functions
button1.pack(side='left', pady=5, padx=5)
button2 = tk.Button(buttons_frame, text="View Content", command=content) # don't use `[]` to execute functions
button2.pack(side='left', pady=5, padx=(0,5))
window.mainloop()
pythonpython-3.xbeautifulsoupscreen-scrapingEdit tags
我有一个 window,它有一个按钮可以将网站的新闻标题抓取到列表框中,然后我有另一个按钮可以抓取与所选标题相关的新闻内容并将它们显示在文本框中
目前在页面上我抓取了所有标题、时间表和内容,但我还想添加页面封面上的标题(以及相关内容)(this), and also all the titles (with the relative contents) that are on the right column of the page (this)。
容易的是右栏的标题总是有相同的唯一 html 名称,但是时间不是直接可见的,因为你必须打开 link 才能得到时间.
我试过使用这些,但没有成功
title_cover = " ".join(span.text for span in div.select("title absolute serif"))
title_sidebar = " ".join(span.text for span in div.select("bold"))
time_cover_and_sidebar = div.find('span', attrs={'class': 'upper'}).text
news_cover = f" {time_cover_and_sidebar} {place.upper()}, {title_cover} (TMW)"
news_sidebar = f" {time_cover_and_sidebar} {place.upper()}, {title_sidebar} (TMW)"
results_cover.append( [number, time_cover_and_sidebar, place, title_cover, news, link] )
results_sidebar.append( [number, time_cover_and_sidebar, place, title_sidebar, news, link])
我想要得到的输出与我使用已经提供的代码得到的输出相同(时间,place/name 团队,标题),不同之处在于我还想插入时间,封面和侧边栏新闻的标题和内容
完整的可执行代码:
import tkinter as tk # PEP8: `import *` is not preferred
from tkinter import ttk
from tkinter.scrolledtext import ScrolledText
import requests
import requests_cache
from bs4 import BeautifulSoup
import pandas as pd
# PEP8: all imports at the beginning
# --- functions --- # PEP8: all functions directly after imports
def get_data_for(place):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
}
results = []
response = requests.get(f'https://www.tuttomercatoweb.com/{place}/', headers=headers)
print('url:', response.url)
print('status:', response.status_code)
#print('html:', response.text[:1000])
soup = BeautifulSoup(response.content, 'html.parser')
news = soup.find_all('div', attrs={"class": "tcc-list-news"})
for number, each in enumerate(news):
for div in each.find_all("div"):
time = div.find('span', attrs={'class': 'hh serif'}).text
title = " ".join(span.text for span in div.select("a > span"))
news = f" {time} {place.upper()}, {title} (TMW)"
link = div.find('a')['href']
results.append( [number, time, place, title, news, link] )
return results
def all_titles():
global df
allnews = [] # local variable
for place in ['atalanta', 'bologna']:
print('search:', place)
results = get_data_for(place)
print('found:', len(results))
allnews += results
text_download.insert('end', f"search: {place}\nfound: {len(results)}\n")
df = pd.DataFrame(allnews, columns=['number', 'time', 'place', 'title', 'news', 'link'])
df = df.sort_values(by=['number', 'time', 'place', 'title'], ascending=[True, False, True, True])
df = df.reset_index()
listbox_title.delete('0', 'end')
for index, row in df.iterrows():
listbox_title.insert('end', row['news'])
def content(event=None): # `command=` executes without `event`, but `bind` executes with `event` - so it needs default value
# tuple
selection = listbox_title.curselection()
print('selection:', selection)
if selection:
item = df.iloc[selection[-1]]
#print('item:', item)
url = item['link']
#print('url:', url)
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
}
# keep page in database `SQLite`
# https://github.com/reclosedev/requests-cache
# https://sqlite.org/index.html
session = requests_cache.CachedSession('titles')
response = session.get(url, headers=headers)
#response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.content, 'html.parser')
content_download = "\n".join(item.get_text() for item in soup.select("div.text.mbottom"))
text_download.delete('1.0', 'end') # remove previous content)
text_download.insert('end', content_download)
# --- main ---
df = None
window = tk.Tk()
window.geometry("800x800")
# ---
# [Tkinter: How to display Listbox with Scrollbar — furas.pl](https://blog.furas.pl/python-tkitner-how-to-display-listbox-with-scrollbar-gb.html)
frame_title = tk.Frame(window)
frame_title.pack(fill='both', expand=True, pady=5, padx=5)
listbox_title = tk.Listbox(frame_title, selectbackground="#960000", selectforeground="white", bg="white")
listbox_title.pack(side='left', fill='both', expand=True)
scrollbar_title = tk.Scrollbar(frame_title)
scrollbar_title.pack(side='left', fill='y')
scrollbar_title['command'] = listbox_title.yview
listbox_title.config(yscrollcommand=scrollbar_title.set)
listbox_title.bind('<Double-Button-1>', content) # it executes `content(event)`
# ----
text_download = ScrolledText(window, bg="white")
text_download.pack(fill='both', expand=True, pady=0, padx=5)
# ----
buttons_frame = tk.Frame(window)
buttons_frame.pack(fill='x')
button1 = tk.Button(buttons_frame, text="View Titles", command=all_titles) # don't use `[]` to execute functions
button1.pack(side='left', pady=5, padx=5)
button2 = tk.Button(buttons_frame, text="View Content", command=content) # don't use `[]` to execute functions
button2.pack(side='left', pady=5, padx=(0,5))
window.mainloop()
将来,trim 减少您的 posts/question 以仅包含最少量的代码来回答问题。 Tkinter 并不是您想在此处修复的部分。
话虽这么说,但我只更新了与获取这些封面新闻和侧面新闻有关的代码。你是对的,你需要去link获取数据。然后您可以从 html 解析,或者他们确实在网站的 <script>
标签中提供了一个很好的 json 格式来获取它。
import tkinter as tk # PEP8: `import *` is not preferred
from tkinter import ttk
from tkinter.scrolledtext import ScrolledText
import requests
import requests_cache
from bs4 import BeautifulSoup
import pandas as pd
import re
import json
from dateutil import parser
import datetime
import locale
# PEP8: all imports at the beginning
# --- functions --- # PEP8: all functions directly after imports
def get_data_for(place):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
}
results = []
response = requests.get(f'https://www.tuttomercatoweb.com/{place}/', headers=headers)
print('url:', response.url)
print('status:', response.status_code)
#print('html:', response.text[:1000])
soup = BeautifulSoup(response.content, 'html.parser')
#Cover
cover_news = soup.find('div', {'class':'box pp'})
link = cover_news.find('a', href=True)['href']
coverNewsResponse = requests.get(link, headers=headers)
coverSoup = BeautifulSoup(coverNewsResponse.content, 'html.parser')
jsonStr = str(coverSoup.find('script'))
jsonStr = re.search('({.*})', jsonStr).group(1)
jsonData = json.loads(jsonStr)
timePublished = parser.parse(jsonData['datePublished']).strftime("%H:%M")
datePublished = parser.parse(jsonData['datePublished']).strftime("%Y-%m-%d")
title = jsonData['headline']
news = f" {timePublished} {place.upper()}, {title} (TMW)"
results.append( [datePublished, timePublished, place, title, news, link] )
# Side panel
side_news = soup.find_all('div', {'class':'box small'})
for each in side_news:
link = each.find('a', href=True)['href']
sideNewsResponse = requests.get(link, headers=headers)
sideSoup = BeautifulSoup(sideNewsResponse.content, 'html.parser')
jsonStr = str(sideSoup.find('script'))
jsonStr = re.search('({.*})', jsonStr).group(1)
jsonData = json.loads(jsonStr)
timePublished = parser.parse(jsonData['datePublished']).strftime("%H:%M")
datePublished = parser.parse(jsonData['datePublished']).strftime("%Y-%m-%d")
title = jsonData['headline']
news = f" {timePublished} {place.upper()}, {title} (TMW)"
results.append( [datePublished, timePublished, place, title, news, link] )
news = soup.find_all('div', attrs={"class": "tcc-list-news"})
for each in news:
for div in each.find_all("div"):
timePublished = div.find('span', attrs={'class': 'hh serif'}).text
datePublished = div.find_previous('div', {'class':'tcc-border upper date'})
if datePublished:
if datePublished.text in ['Ieri']:
yesterday = datetime.datetime.today() - datetime.timedelta(days = 1)
datePublished = yesterday.strftime("%Y-%m-%d")
else:
locale.setlocale(locale.LC_ALL, 'it')
currentYear = datetime.datetime.today().strftime("%Y")
dateStr = datePublished.text
dateStr = datetime.datetime.strptime(dateStr + ' ' + currentYear, '%A %d %B %Y')
datePublished = dateStr.strftime("%Y-%m-%d")
else:
datePublished = datetime.datetime.today().strftime("%Y-%m-%d")
title = " ".join(span.text for span in div.select("a > span"))
news = f" {timePublished} {place.upper()}, {title} (TMW)"
link = div.find('a')['href']
results.append( [datePublished, timePublished, place, title, news, link] )
return results
def all_titles():
global df
allnews = [] # local variable
for place in ['atalanta', 'bologna']:
print('search:', place)
results = get_data_for(place)
print('found:', len(results))
allnews += results
text_download.insert('end', f"search: {place}\nfound: {len(results)}\n")
df = pd.DataFrame(allnews, columns=['date', 'time', 'place', 'title', 'news', 'link'])
df = df.sort_values(by=['date', 'time', 'place', 'title'], ascending=[False, False, True, True])
df = df.drop_duplicates(subset=['date', 'time', 'place', 'title'])
df = df.reset_index(drop=True)
listbox_title.delete('0', 'end')
for index, row in df.iterrows():
listbox_title.insert('end', row['news'])
def content(event=None): # `command=` executes without `event`, but `bind` executes with `event` - so it needs default value
# tuple
selection = listbox_title.curselection()
print('selection:', selection)
if selection:
item = df.iloc[selection[-1]]
#print('item:', item)
url = item['link']
#print('url:', url)
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
}
# keep page in database `SQLite`
# https://github.com/reclosedev/requests-cache
# https://sqlite.org/index.html
session = requests_cache.CachedSession('titles')
response = session.get(url, headers=headers)
#response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.content, 'html.parser')
content_download = "\n".join(item.get_text() for item in soup.select("div.text.mbottom"))
text_download.delete('1.0', 'end') # remove previous content)
text_download.insert('end', content_download)
# --- main ---
df = None
window = tk.Tk()
window.geometry("800x800")
# ---
# [Tkinter: How to display Listbox with Scrollbar — furas.pl](https://blog.furas.pl/python-tkitner-how-to-display-listbox-with-scrollbar-gb.html)
frame_title = tk.Frame(window)
frame_title.pack(fill='both', expand=True, pady=5, padx=5)
listbox_title = tk.Listbox(frame_title, selectbackground="#960000", selectforeground="white", bg="white")
listbox_title.pack(side='left', fill='both', expand=True)
scrollbar_title = tk.Scrollbar(frame_title)
scrollbar_title.pack(side='left', fill='y')
scrollbar_title['command'] = listbox_title.yview
listbox_title.config(yscrollcommand=scrollbar_title.set)
listbox_title.bind('<Double-Button-1>', content) # it executes `content(event)`
# ----
text_download = ScrolledText(window, bg="white")
text_download.pack(fill='both', expand=True, pady=0, padx=5)
# ----
buttons_frame = tk.Frame(window)
buttons_frame.pack(fill='x')
button1 = tk.Button(buttons_frame, text="View Titles", command=all_titles) # don't use `[]` to execute functions
button1.pack(side='left', pady=5, padx=5)
button2 = tk.Button(buttons_frame, text="View Content", command=content) # don't use `[]` to execute functions
button2.pack(side='left', pady=5, padx=(0,5))
window.mainloop()
pythonpython-3.xbeautifulsoupscreen-scrapingEdit tags