Python 不写入文件
Python doesn't write to a file
所以这是我的代码,它创建了文件,这意味着它可以访问写权限,但它只是不在里面写任何东西,该功能有效并且它在屏幕上打印它只是不写任何东西,任何为什么会这样?
预期行为:创建两个文件并将网站过滤为工作或非工作并说明非工作网站的错误代码
当前行为:创建空文件
import requests
from concurrent.futures import ThreadPoolExecutor
websites = []
f = open("websites.txt", "r")
for i in f:
if not i.startswith("http"):
i = "http://"+i
websites.append(i.strip())
print("appended" + i)
f.close()
with open("working.txt", "w") as fa, open("not_working.txt", "w") as fe:
def checker(website):
response = requests.get(website)
available = response.status_code == 200
print(response.status_code)
if available:
fa.write(website + "\n")
else:
fe.write(website + " error " + response.status_code + "\n")
with ThreadPoolExecutor() as executor:
executor.map(checker, websites)
代码
executor.map(checker, websites)
创建 generator
但它不执行线程。
至少需要list()
list( executor.map(checker, websites) )
执行生成器。
完整代码:
import requests
from concurrent.futures import ThreadPoolExecutor
# --- functions ---
def read_urls(filename):
websites = []
with open(filename) as f:
for line in f:
line = line.strip()
if line: # skip empty lines
if not line.startswith("http"):
line = "http://" + line
websites.append(line)
print("appended:", line)
return websites
def checker(website):
response = requests.get(website)
print('[checker]:', response.status_code, website)
if response.status_code == 200:
fa.write(f'{website}\n')
else:
fe.write(f'{website} error {response.status_code}\n')
# --- main ---
#websites = read_urls("websites.txt")
websites = ['https://whosebug.com', 'https://fake.com']
with open("working.txt", "w") as fa, open("not_working.txt", "w") as fe:
with ThreadPoolExecutor() as executor:
list( executor.map(checker, websites) )
但是 return 来自函数的结果并写入主线程可能更安全。它将按照与原始文件中数据相同的顺序写入结果。在线程中写入可能会以不同的顺序获取它,因为线程可能 运行 的顺序不同。
import requests
from concurrent.futures import ThreadPoolExecutor
# --- functions ---
def read_urls(filename):
websites = []
with open(filename) as f:
for line in f:
line = line.strip()
if line: # skip empty lines
if not line.startswith("http"):
line = "http://" + line
websites.append(line)
print("appended:", line)
return websites
def checker(website):
response = requests.get(website)
print('[checker]:', response.status_code, website)
return website, response.status_code
# --- main ---
#websites = read_urls("websites.txt")
websites = ['https://whosebug.com', 'https://fake.com']
with open("working.txt", "w") as fa, open("not_working.txt", "w") as fe:
with ThreadPoolExecutor() as executor:
for website, status_code in executor.map(checker, websites):
print('[main]:', status_code, website)
if status_code == 200:
fa.write(f'{website}\n')
else:
fe.write(f'{website} error {status_code}\n')
所以这是我的代码,它创建了文件,这意味着它可以访问写权限,但它只是不在里面写任何东西,该功能有效并且它在屏幕上打印它只是不写任何东西,任何为什么会这样?
预期行为:创建两个文件并将网站过滤为工作或非工作并说明非工作网站的错误代码
当前行为:创建空文件
import requests
from concurrent.futures import ThreadPoolExecutor
websites = []
f = open("websites.txt", "r")
for i in f:
if not i.startswith("http"):
i = "http://"+i
websites.append(i.strip())
print("appended" + i)
f.close()
with open("working.txt", "w") as fa, open("not_working.txt", "w") as fe:
def checker(website):
response = requests.get(website)
available = response.status_code == 200
print(response.status_code)
if available:
fa.write(website + "\n")
else:
fe.write(website + " error " + response.status_code + "\n")
with ThreadPoolExecutor() as executor:
executor.map(checker, websites)
代码
executor.map(checker, websites)
创建 generator
但它不执行线程。
至少需要list()
list( executor.map(checker, websites) )
执行生成器。
完整代码:
import requests
from concurrent.futures import ThreadPoolExecutor
# --- functions ---
def read_urls(filename):
websites = []
with open(filename) as f:
for line in f:
line = line.strip()
if line: # skip empty lines
if not line.startswith("http"):
line = "http://" + line
websites.append(line)
print("appended:", line)
return websites
def checker(website):
response = requests.get(website)
print('[checker]:', response.status_code, website)
if response.status_code == 200:
fa.write(f'{website}\n')
else:
fe.write(f'{website} error {response.status_code}\n')
# --- main ---
#websites = read_urls("websites.txt")
websites = ['https://whosebug.com', 'https://fake.com']
with open("working.txt", "w") as fa, open("not_working.txt", "w") as fe:
with ThreadPoolExecutor() as executor:
list( executor.map(checker, websites) )
但是 return 来自函数的结果并写入主线程可能更安全。它将按照与原始文件中数据相同的顺序写入结果。在线程中写入可能会以不同的顺序获取它,因为线程可能 运行 的顺序不同。
import requests
from concurrent.futures import ThreadPoolExecutor
# --- functions ---
def read_urls(filename):
websites = []
with open(filename) as f:
for line in f:
line = line.strip()
if line: # skip empty lines
if not line.startswith("http"):
line = "http://" + line
websites.append(line)
print("appended:", line)
return websites
def checker(website):
response = requests.get(website)
print('[checker]:', response.status_code, website)
return website, response.status_code
# --- main ---
#websites = read_urls("websites.txt")
websites = ['https://whosebug.com', 'https://fake.com']
with open("working.txt", "w") as fa, open("not_working.txt", "w") as fe:
with ThreadPoolExecutor() as executor:
for website, status_code in executor.map(checker, websites):
print('[main]:', status_code, website)
if status_code == 200:
fa.write(f'{website}\n')
else:
fe.write(f'{website} error {status_code}\n')