使用 pdfminer.six 从 URL 打开 PDF
Open a PDF, from a URL, with pdfminer.six
背景:Python3.7 & pdfminer.six
使用此处找到的信息:Exporting Data from PDFs with Python,我有以下代码:
import io
from pdfminer.converter import TextConverter
from pdfminer.pdfinterp import PDFPageInterpreter
from pdfminer.pdfinterp import PDFResourceManager
from pdfminer.pdfpage import PDFPage
def extract_text_from_pdf(pdf_path):
resource_manager = PDFResourceManager()
fake_file_handle = io.StringIO()
converter = TextConverter(resource_manager, fake_file_handle)
page_interpreter = PDFPageInterpreter(resource_manager, converter)
with open(pdf_path, 'rb') as fh:
for page in PDFPage.get_pages(fh,
caching=True,
check_extractable=True):
page_interpreter.process_page(page)
text = fake_file_handle.getvalue()
# close open handles
converter.close()
fake_file_handle.close()
if text:
return text
if __name__ == '__main__':
path = '../_pdfs/mypdf.pdf'
print(extract_text_from_pdf(path))
这行得通(耶!),但我真正想做的是通过 url 直接请求 pdf,而不是打开已预先保存到本地驱动器的 pdf。
我不知道我需要如何修改 "with open" 逻辑以从远程 url 调用,我也不确定我最好使用哪个请求库来获取最新版本的 Python(请求、urllib、urllib2 等?)
我是 Python 的新手,所以请记住这一点(P.s。我发现了其他关于此的问题,但我无能为力 - 可能是因为它们往往是很老了。)
如有任何帮助,我们将不胜感激!谢谢!
您可以使用PyPDF2
解析pdf文件。
试试这个:
import requests, PyPDF2
# Fill address with your url
try:
response = requests.get(address)
except:
print("Error")
my_raw_data = response.content
with open("my_pdf.pdf", 'wb') as my_data:
my_data.write(my_raw_data)
my_data.close()
open_pdf_file = open("my_pdf.pdf", 'rb')
try:
read_pdf = PyPDF2.PdfFileReader(open_pdf_file)
except:
print("Failed to read, Press Enter to continue :")
if read_pdf.isEncrypted:
read_pdf.decrypt("")
n = read_pdf.getNumPages()
for x in range(0,n):
try:
s = read_pdf.getPage(x).extractText()
except:
print("Error in",i)
continue
print(s)
我是这样解决的:
from io import StringIO, BytesIO
import urllib.request
from pdfminer.converter import TextConverter
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
def extract_text_from_pdf_url(url, user_agent=None):
resource_manager = PDFResourceManager()
fake_file_handle = StringIO()
converter = TextConverter(resource_manager, fake_file_handle)
if user_agent == None:
user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36'
headers = {'User-Agent': user_agent}
request = urllib.request.Request(url, data=None, headers=headers)
response = urllib.request.urlopen(request).read()
fb = BytesIO(response)
page_interpreter = PDFPageInterpreter(resource_manager, converter)
for page in PDFPage.get_pages(fb,
caching=True,
check_extractable=True):
page_interpreter.process_page(page)
text = fake_file_handle.getvalue()
# close open handles
fb.close()
converter.close()
fake_file_handle.close()
if text:
# If document has instances of \xa0 replace them with spaces.
# NOTE: \xa0 is non-breaking space in Latin1 (ISO 8859-1) & chr(160)
text = text.replace(u'\xa0', u' ')
return text
从 url 以可与 pdfminer.six 一起使用的格式检索 pdf 的更简单的解决方案是:
def pdf_getter(url:str):
'''
retrives pdf from url as bytes object
'''
open = urllib.request.urlopen(url).read()
return io.BytesIO(open)
PDFParser() 函数和高级 extract_text() 函数都将接受返回的对象作为输入。
背景:Python3.7 & pdfminer.six
使用此处找到的信息:Exporting Data from PDFs with Python,我有以下代码:
import io
from pdfminer.converter import TextConverter
from pdfminer.pdfinterp import PDFPageInterpreter
from pdfminer.pdfinterp import PDFResourceManager
from pdfminer.pdfpage import PDFPage
def extract_text_from_pdf(pdf_path):
resource_manager = PDFResourceManager()
fake_file_handle = io.StringIO()
converter = TextConverter(resource_manager, fake_file_handle)
page_interpreter = PDFPageInterpreter(resource_manager, converter)
with open(pdf_path, 'rb') as fh:
for page in PDFPage.get_pages(fh,
caching=True,
check_extractable=True):
page_interpreter.process_page(page)
text = fake_file_handle.getvalue()
# close open handles
converter.close()
fake_file_handle.close()
if text:
return text
if __name__ == '__main__':
path = '../_pdfs/mypdf.pdf'
print(extract_text_from_pdf(path))
这行得通(耶!),但我真正想做的是通过 url 直接请求 pdf,而不是打开已预先保存到本地驱动器的 pdf。
我不知道我需要如何修改 "with open" 逻辑以从远程 url 调用,我也不确定我最好使用哪个请求库来获取最新版本的 Python(请求、urllib、urllib2 等?)
我是 Python 的新手,所以请记住这一点(P.s。我发现了其他关于此的问题,但我无能为力 - 可能是因为它们往往是很老了。)
如有任何帮助,我们将不胜感激!谢谢!
您可以使用PyPDF2
解析pdf文件。
试试这个:
import requests, PyPDF2
# Fill address with your url
try:
response = requests.get(address)
except:
print("Error")
my_raw_data = response.content
with open("my_pdf.pdf", 'wb') as my_data:
my_data.write(my_raw_data)
my_data.close()
open_pdf_file = open("my_pdf.pdf", 'rb')
try:
read_pdf = PyPDF2.PdfFileReader(open_pdf_file)
except:
print("Failed to read, Press Enter to continue :")
if read_pdf.isEncrypted:
read_pdf.decrypt("")
n = read_pdf.getNumPages()
for x in range(0,n):
try:
s = read_pdf.getPage(x).extractText()
except:
print("Error in",i)
continue
print(s)
我是这样解决的:
from io import StringIO, BytesIO
import urllib.request
from pdfminer.converter import TextConverter
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
def extract_text_from_pdf_url(url, user_agent=None):
resource_manager = PDFResourceManager()
fake_file_handle = StringIO()
converter = TextConverter(resource_manager, fake_file_handle)
if user_agent == None:
user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36'
headers = {'User-Agent': user_agent}
request = urllib.request.Request(url, data=None, headers=headers)
response = urllib.request.urlopen(request).read()
fb = BytesIO(response)
page_interpreter = PDFPageInterpreter(resource_manager, converter)
for page in PDFPage.get_pages(fb,
caching=True,
check_extractable=True):
page_interpreter.process_page(page)
text = fake_file_handle.getvalue()
# close open handles
fb.close()
converter.close()
fake_file_handle.close()
if text:
# If document has instances of \xa0 replace them with spaces.
# NOTE: \xa0 is non-breaking space in Latin1 (ISO 8859-1) & chr(160)
text = text.replace(u'\xa0', u' ')
return text
从 url 以可与 pdfminer.six 一起使用的格式检索 pdf 的更简单的解决方案是:
def pdf_getter(url:str):
'''
retrives pdf from url as bytes object
'''
open = urllib.request.urlopen(url).read()
return io.BytesIO(open)
PDFParser() 函数和高级 extract_text() 函数都将接受返回的对象作为输入。