URL 个错误 URL 请求 Python
URL requests with bad URLs Python
我最近开始为我正在进行的项目写作 Python。我写了一个脚本,它获取图像的 URL 列表(比如在 txt 文件中)并下载它们。但是,列表中的一些 URL 已经过时,不再有效。这会导致 错误 。此外,如果 link 加载时间过长,也会导致错误。
代码:
导入 urllib.request
import random
def downloadImageFromURL(url):
name = random.randrange(1, 10000)
full_name = str(name) + ".jpg"
urllib.request.urlretrieve(url, full_name)
f = open('url.txt','r')
for row in range(0, 10):
line = f.readline()
try:
downloadImageFromURL(line)
except ConnectionError:
print("Failed to open url.")
print(line)
f.close()
新代码:
import urllib.request
import random
def sendRequest(url):
try:
page = requests.get(url, stream = True, timeout = 5)
except Exception:
return False
else:
if (page.status_code == 200):
return page
else:
return False
f = open('url.txt','r')
for row in range(0, 10):
line = f.readline()
try:
sendRequest(line)
except ConnectionError:
print("Failed to open url.")
print(line)
f.close()
谢谢!
import os
import requests
import shutil
outputDirectory = r"C:\Users\Joshua\Documents\Downloaded Media"
def sendRequest(url):
try:
page = requests.get(url, stream = True, timeout = 5)
except Exception:
pass
else:
if (page.status_code == 200):
return page
return False
def downloadImage(imageUrl: str, filePath: str):
img = sendRequest(imageUrl)
if (img == False):
return False
with open(filePath, "wb") as f:
img.raw.decode_content = True
try:
shutil.copyfileobj(img.raw, f)
except Exception:
return False
return True
URL = "https://upload.wikimedia.org/wikipedia/commons/b/b6/Image_created_with_a_mobile_phone.png"
imageName = URL.split("/")[-1] # Image_created_with_a_mobile_phone.png
# C:\Users\Joshua\Documents\Downloaded Media\Image_created_with_a_mobile_phone.png
imagePath = os.path.join(outputDirectory, imageName)
downloadImage(URL, imagePath)
我最近开始为我正在进行的项目写作 Python。我写了一个脚本,它获取图像的 URL 列表(比如在 txt 文件中)并下载它们。但是,列表中的一些 URL 已经过时,不再有效。这会导致 错误 。此外,如果 link 加载时间过长,也会导致错误。
代码: 导入 urllib.request
import random
def downloadImageFromURL(url):
name = random.randrange(1, 10000)
full_name = str(name) + ".jpg"
urllib.request.urlretrieve(url, full_name)
f = open('url.txt','r')
for row in range(0, 10):
line = f.readline()
try:
downloadImageFromURL(line)
except ConnectionError:
print("Failed to open url.")
print(line)
f.close()
新代码:
import urllib.request
import random
def sendRequest(url):
try:
page = requests.get(url, stream = True, timeout = 5)
except Exception:
return False
else:
if (page.status_code == 200):
return page
else:
return False
f = open('url.txt','r')
for row in range(0, 10):
line = f.readline()
try:
sendRequest(line)
except ConnectionError:
print("Failed to open url.")
print(line)
f.close()
谢谢!
import os
import requests
import shutil
outputDirectory = r"C:\Users\Joshua\Documents\Downloaded Media"
def sendRequest(url):
try:
page = requests.get(url, stream = True, timeout = 5)
except Exception:
pass
else:
if (page.status_code == 200):
return page
return False
def downloadImage(imageUrl: str, filePath: str):
img = sendRequest(imageUrl)
if (img == False):
return False
with open(filePath, "wb") as f:
img.raw.decode_content = True
try:
shutil.copyfileobj(img.raw, f)
except Exception:
return False
return True
URL = "https://upload.wikimedia.org/wikipedia/commons/b/b6/Image_created_with_a_mobile_phone.png"
imageName = URL.split("/")[-1] # Image_created_with_a_mobile_phone.png
# C:\Users\Joshua\Documents\Downloaded Media\Image_created_with_a_mobile_phone.png
imagePath = os.path.join(outputDirectory, imageName)
downloadImage(URL, imagePath)