如何使用 Python 从 PDF 中提取 table 作为文本?
How to extract table as text from the PDF using Python?
我有一个 PDF,其中包含 Tables、文本和一些图像。我想提取 PDF 中 table 所在位置的 table。
现在正在手动从页面中查找 Table。我从那里捕获该页面并保存到另一个 PDF 中。
import PyPDF2
PDFfilename = "Sammamish.pdf" #filename of your PDF/directory where your PDF is stored
pfr = PyPDF2.PdfFileReader(open(PDFfilename, "rb")) #PdfFileReader object
pg4 = pfr.getPage(126) #extract pg 127
writer = PyPDF2.PdfFileWriter() #create PdfFileWriter object
#add pages
writer.addPage(pg4)
NewPDFfilename = "allTables.pdf" #filename of your PDF/directory where you want your new PDF to be
with open(NewPDFfilename, "wb") as outputStream:
writer.write(outputStream) #write pages to new PDF
我的目标是从整个 PDF 文档中提取 table。
- 我建议您使用表格提取 table。
- 将您的 pdf 作为参数传递给表格 api,它将 return 您以数据帧的形式 table。
- 您的 pdf 中的每个 table 都被 return 编辑为一个数据帧。
- table 将被 return 编辑到数据框列表中,要使用数据框,您需要 pandas。
这是我提取pdf的代码。
import pandas as pd
import tabula
file = "filename.pdf"
path = 'enter your directory path here' + file
df = tabula.read_pdf(path, pages = '1', multiple_tables = True)
print(df)
详情请参考我的repo。
此答案适用于遇到带有图像的 pdf 并需要使用 OCR 的任何人。我找不到可行的现成解决方案;没有什么能给我所需的准确性。
以下是我发现有效的步骤。
使用 https://poppler.freedesktop.org/ 中的 pdfimages
将 pdf 的页面转换为图像。
使用Tesseract to detect rotation and ImageMagickmogrify
修复
使用OpenCV查找并提取tables.
使用 OpenCV 从 table.
中查找并提取每个单元格
使用 OpenCV 裁剪和清理每个单元格,这样就不会有干扰 OCR 软件的噪音。
使用 Tesseract 对每个单元格进行 OCR。
将每个单元格提取的文本组合成你需要的格式。
我写了一个 python 包,其中包含可以帮助完成这些步骤的模块。
回购:https://github.com/eihli/image-table-ocr
文档和来源:https://eihli.github.io/image-table-ocr/pdf_table_extraction_and_ocr.html
有些步骤不需要代码,它们利用 pdfimages
和 tesseract
等外部工具。我将为一些确实需要代码的步骤提供一些简短示例。
- 查找 tables:
这个 link 在弄清楚如何找到 table 时是一个很好的参考。 https://answers.opencv.org/question/63847/how-to-extract-tables-from-an-image/
import cv2
def find_tables(image):
BLUR_KERNEL_SIZE = (17, 17)
STD_DEV_X_DIRECTION = 0
STD_DEV_Y_DIRECTION = 0
blurred = cv2.GaussianBlur(image, BLUR_KERNEL_SIZE, STD_DEV_X_DIRECTION, STD_DEV_Y_DIRECTION)
MAX_COLOR_VAL = 255
BLOCK_SIZE = 15
SUBTRACT_FROM_MEAN = -2
img_bin = cv2.adaptiveThreshold(
~blurred,
MAX_COLOR_VAL,
cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY,
BLOCK_SIZE,
SUBTRACT_FROM_MEAN,
)
vertical = horizontal = img_bin.copy()
SCALE = 5
image_width, image_height = horizontal.shape
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (int(image_width / SCALE), 1))
horizontally_opened = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, horizontal_kernel)
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, int(image_height / SCALE)))
vertically_opened = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, vertical_kernel)
horizontally_dilated = cv2.dilate(horizontally_opened, cv2.getStructuringElement(cv2.MORPH_RECT, (40, 1)))
vertically_dilated = cv2.dilate(vertically_opened, cv2.getStructuringElement(cv2.MORPH_RECT, (1, 60)))
mask = horizontally_dilated + vertically_dilated
contours, hierarchy = cv2.findContours(
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE,
)
MIN_TABLE_AREA = 1e5
contours = [c for c in contours if cv2.contourArea(c) > MIN_TABLE_AREA]
perimeter_lengths = [cv2.arcLength(c, True) for c in contours]
epsilons = [0.1 * p for p in perimeter_lengths]
approx_polys = [cv2.approxPolyDP(c, e, True) for c, e in zip(contours, epsilons)]
bounding_rects = [cv2.boundingRect(a) for a in approx_polys]
# The link where a lot of this code was borrowed from recommends an
# additional step to check the number of "joints" inside this bounding rectangle.
# A table should have a lot of intersections. We might have a rectangular image
# here though which would only have 4 intersections, 1 at each corner.
# Leaving that step as a future TODO if it is ever necessary.
images = [image[y:y+h, x:x+w] for x, y, w, h in bounding_rects]
return images
- 从 table 中提取单元格。
这与 2 非常相似,因此我不会包含所有代码。我将参考的部分是对单元格进行排序。
我们要从左到右、从上到下识别单元格。
我们会找到最左上角的矩形。然后我们将找到中心位于左上角矩形的 top-y 和 bottom-y 值范围内的所有矩形。然后我们将根据它们中心的 x 值对这些矩形进行排序。我们将从列表中删除这些矩形并重复。
def cell_in_same_row(c1, c2):
c1_center = c1[1] + c1[3] - c1[3] / 2
c2_bottom = c2[1] + c2[3]
c2_top = c2[1]
return c2_top < c1_center < c2_bottom
orig_cells = [c for c in cells]
rows = []
while cells:
first = cells[0]
rest = cells[1:]
cells_in_same_row = sorted(
[
c for c in rest
if cell_in_same_row(c, first)
],
key=lambda c: c[0]
)
row_cells = sorted([first] + cells_in_same_row, key=lambda c: c[0])
rows.append(row_cells)
cells = [
c for c in rest
if not cell_in_same_row(c, first)
]
# Sort rows by average height of their center.
def avg_height_of_center(row):
centers = [y + h - h / 2 for x, y, w, h in row]
return sum(centers) / len(centers)
rows.sort(key=avg_height_of_center)
如果您的 pdf 是 text-based 而不是扫描文档(即,如果您可以在 PDF 查看器中单击并拖动到 table 中的 select 文本),那么您可以将模块 camelot-py
与
一起使用
import camelot
tables = camelot.read_pdf('foo.pdf')
然后您可以选择如何保存 tables(作为 csv、json、excel、html、sqlite),以及是否输出应压缩为 ZIP 存档。
tables.export('foo.csv', f='csv', compress=False)
编辑:tabula-py
的显示速度大约是 camelot-py
的 6 倍,因此应该改用它。
import camelot
import cProfile
import pstats
import tabula
cmd_tabula = "tabula.read_pdf('table.pdf', pages='1', lattice=True)"
prof_tabula = cProfile.Profile().run(cmd_tabula)
time_tabula = pstats.Stats(prof_tabula).total_tt
cmd_camelot = "camelot.read_pdf('table.pdf', pages='1', flavor='lattice')"
prof_camelot = cProfile.Profile().run(cmd_camelot)
time_camelot = pstats.Stats(prof_camelot).total_tt
print(time_tabula, time_camelot, time_camelot/time_tabula)
给了
1.8495559890000015 11.057014036000016 5.978199147125147
使用 Python pdfminer
从 PDF 中提取 table 作为文本
from pprint import pprint
from io import StringIO
import re
from pdfminer.high_level import extract_text_to_fp
from pdfminer.layout import LAParams
from lxml import html
ID_LEFT_BORDER = 56
ID_RIGHT_BORDER = 156
QTY_LEFT_BORDER = 355
QTY_RIGHT_BORDER = 455
# Read PDF file and convert it to HTML
output = StringIO()
with open('example.pdf', 'rb') as pdf_file:
extract_text_to_fp(pdf_file, output, laparams=LAParams(), output_type='html', codec=None)
raw_html = output.getvalue()
# Extract all DIV tags
tree = html.fromstring(raw_html)
divs = tree.xpath('.//div')
# Sort and filter DIV tags
filtered_divs = {'ID': [], 'Qty': []}
for div in divs:
# extract styles from a tag
div_style = div.get('style')
# print(div_style)
# position:absolute; border: textbox 1px solid; writing-mode:lr-tb; left:292px; top:1157px; width:27px; height:12px;
# get left position
try:
left = re.findall(r'left:([0-9]+)px', div_style)[0]
except IndexError:
continue
# div contains ID if div's left position between ID_LEFT_BORDER and ID_RIGHT_BORDER
if ID_LEFT_BORDER < int(left) < ID_RIGHT_BORDER:
filtered_divs['ID'].append(div.text_content().strip('\n'))
# div contains Quantity if div's left position between QTY_LEFT_BORDER and QTY_RIGHT_BORDER
if QTY_LEFT_BORDER < int(left) < QTY_RIGHT_BORDER:
filtered_divs['Qty'].append(div.text_content().strip('\n'))
# Merge and clear lists with data
data = []
for row in zip(filtered_divs['ID'], filtered_divs['Qty']):
if 'ID' in row[0]:
continue
data_row = {'ID': row[0].split(' ')[0], 'Quantity': row[1]}
data.append(data_row)
pprint(data)
我有一个 PDF,其中包含 Tables、文本和一些图像。我想提取 PDF 中 table 所在位置的 table。
现在正在手动从页面中查找 Table。我从那里捕获该页面并保存到另一个 PDF 中。
import PyPDF2
PDFfilename = "Sammamish.pdf" #filename of your PDF/directory where your PDF is stored
pfr = PyPDF2.PdfFileReader(open(PDFfilename, "rb")) #PdfFileReader object
pg4 = pfr.getPage(126) #extract pg 127
writer = PyPDF2.PdfFileWriter() #create PdfFileWriter object
#add pages
writer.addPage(pg4)
NewPDFfilename = "allTables.pdf" #filename of your PDF/directory where you want your new PDF to be
with open(NewPDFfilename, "wb") as outputStream:
writer.write(outputStream) #write pages to new PDF
我的目标是从整个 PDF 文档中提取 table。
- 我建议您使用表格提取 table。
- 将您的 pdf 作为参数传递给表格 api,它将 return 您以数据帧的形式 table。
- 您的 pdf 中的每个 table 都被 return 编辑为一个数据帧。
- table 将被 return 编辑到数据框列表中,要使用数据框,您需要 pandas。
这是我提取pdf的代码。
import pandas as pd
import tabula
file = "filename.pdf"
path = 'enter your directory path here' + file
df = tabula.read_pdf(path, pages = '1', multiple_tables = True)
print(df)
详情请参考我的repo。
此答案适用于遇到带有图像的 pdf 并需要使用 OCR 的任何人。我找不到可行的现成解决方案;没有什么能给我所需的准确性。
以下是我发现有效的步骤。
使用 https://poppler.freedesktop.org/ 中的
pdfimages
将 pdf 的页面转换为图像。使用Tesseract to detect rotation and ImageMagick
mogrify
修复使用OpenCV查找并提取tables.
使用 OpenCV 从 table.
中查找并提取每个单元格
使用 OpenCV 裁剪和清理每个单元格,这样就不会有干扰 OCR 软件的噪音。
使用 Tesseract 对每个单元格进行 OCR。
将每个单元格提取的文本组合成你需要的格式。
我写了一个 python 包,其中包含可以帮助完成这些步骤的模块。
回购:https://github.com/eihli/image-table-ocr
文档和来源:https://eihli.github.io/image-table-ocr/pdf_table_extraction_and_ocr.html
有些步骤不需要代码,它们利用 pdfimages
和 tesseract
等外部工具。我将为一些确实需要代码的步骤提供一些简短示例。
- 查找 tables:
这个 link 在弄清楚如何找到 table 时是一个很好的参考。 https://answers.opencv.org/question/63847/how-to-extract-tables-from-an-image/
import cv2
def find_tables(image):
BLUR_KERNEL_SIZE = (17, 17)
STD_DEV_X_DIRECTION = 0
STD_DEV_Y_DIRECTION = 0
blurred = cv2.GaussianBlur(image, BLUR_KERNEL_SIZE, STD_DEV_X_DIRECTION, STD_DEV_Y_DIRECTION)
MAX_COLOR_VAL = 255
BLOCK_SIZE = 15
SUBTRACT_FROM_MEAN = -2
img_bin = cv2.adaptiveThreshold(
~blurred,
MAX_COLOR_VAL,
cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY,
BLOCK_SIZE,
SUBTRACT_FROM_MEAN,
)
vertical = horizontal = img_bin.copy()
SCALE = 5
image_width, image_height = horizontal.shape
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (int(image_width / SCALE), 1))
horizontally_opened = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, horizontal_kernel)
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, int(image_height / SCALE)))
vertically_opened = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, vertical_kernel)
horizontally_dilated = cv2.dilate(horizontally_opened, cv2.getStructuringElement(cv2.MORPH_RECT, (40, 1)))
vertically_dilated = cv2.dilate(vertically_opened, cv2.getStructuringElement(cv2.MORPH_RECT, (1, 60)))
mask = horizontally_dilated + vertically_dilated
contours, hierarchy = cv2.findContours(
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE,
)
MIN_TABLE_AREA = 1e5
contours = [c for c in contours if cv2.contourArea(c) > MIN_TABLE_AREA]
perimeter_lengths = [cv2.arcLength(c, True) for c in contours]
epsilons = [0.1 * p for p in perimeter_lengths]
approx_polys = [cv2.approxPolyDP(c, e, True) for c, e in zip(contours, epsilons)]
bounding_rects = [cv2.boundingRect(a) for a in approx_polys]
# The link where a lot of this code was borrowed from recommends an
# additional step to check the number of "joints" inside this bounding rectangle.
# A table should have a lot of intersections. We might have a rectangular image
# here though which would only have 4 intersections, 1 at each corner.
# Leaving that step as a future TODO if it is ever necessary.
images = [image[y:y+h, x:x+w] for x, y, w, h in bounding_rects]
return images
- 从 table 中提取单元格。
这与 2 非常相似,因此我不会包含所有代码。我将参考的部分是对单元格进行排序。
我们要从左到右、从上到下识别单元格。
我们会找到最左上角的矩形。然后我们将找到中心位于左上角矩形的 top-y 和 bottom-y 值范围内的所有矩形。然后我们将根据它们中心的 x 值对这些矩形进行排序。我们将从列表中删除这些矩形并重复。
def cell_in_same_row(c1, c2):
c1_center = c1[1] + c1[3] - c1[3] / 2
c2_bottom = c2[1] + c2[3]
c2_top = c2[1]
return c2_top < c1_center < c2_bottom
orig_cells = [c for c in cells]
rows = []
while cells:
first = cells[0]
rest = cells[1:]
cells_in_same_row = sorted(
[
c for c in rest
if cell_in_same_row(c, first)
],
key=lambda c: c[0]
)
row_cells = sorted([first] + cells_in_same_row, key=lambda c: c[0])
rows.append(row_cells)
cells = [
c for c in rest
if not cell_in_same_row(c, first)
]
# Sort rows by average height of their center.
def avg_height_of_center(row):
centers = [y + h - h / 2 for x, y, w, h in row]
return sum(centers) / len(centers)
rows.sort(key=avg_height_of_center)
如果您的 pdf 是 text-based 而不是扫描文档(即,如果您可以在 PDF 查看器中单击并拖动到 table 中的 select 文本),那么您可以将模块 camelot-py
与
import camelot
tables = camelot.read_pdf('foo.pdf')
然后您可以选择如何保存 tables(作为 csv、json、excel、html、sqlite),以及是否输出应压缩为 ZIP 存档。
tables.export('foo.csv', f='csv', compress=False)
编辑:tabula-py
的显示速度大约是 camelot-py
的 6 倍,因此应该改用它。
import camelot
import cProfile
import pstats
import tabula
cmd_tabula = "tabula.read_pdf('table.pdf', pages='1', lattice=True)"
prof_tabula = cProfile.Profile().run(cmd_tabula)
time_tabula = pstats.Stats(prof_tabula).total_tt
cmd_camelot = "camelot.read_pdf('table.pdf', pages='1', flavor='lattice')"
prof_camelot = cProfile.Profile().run(cmd_camelot)
time_camelot = pstats.Stats(prof_camelot).total_tt
print(time_tabula, time_camelot, time_camelot/time_tabula)
给了
1.8495559890000015 11.057014036000016 5.978199147125147
使用 Python pdfminer
从 PDF 中提取 table 作为文本from pprint import pprint
from io import StringIO
import re
from pdfminer.high_level import extract_text_to_fp
from pdfminer.layout import LAParams
from lxml import html
ID_LEFT_BORDER = 56
ID_RIGHT_BORDER = 156
QTY_LEFT_BORDER = 355
QTY_RIGHT_BORDER = 455
# Read PDF file and convert it to HTML
output = StringIO()
with open('example.pdf', 'rb') as pdf_file:
extract_text_to_fp(pdf_file, output, laparams=LAParams(), output_type='html', codec=None)
raw_html = output.getvalue()
# Extract all DIV tags
tree = html.fromstring(raw_html)
divs = tree.xpath('.//div')
# Sort and filter DIV tags
filtered_divs = {'ID': [], 'Qty': []}
for div in divs:
# extract styles from a tag
div_style = div.get('style')
# print(div_style)
# position:absolute; border: textbox 1px solid; writing-mode:lr-tb; left:292px; top:1157px; width:27px; height:12px;
# get left position
try:
left = re.findall(r'left:([0-9]+)px', div_style)[0]
except IndexError:
continue
# div contains ID if div's left position between ID_LEFT_BORDER and ID_RIGHT_BORDER
if ID_LEFT_BORDER < int(left) < ID_RIGHT_BORDER:
filtered_divs['ID'].append(div.text_content().strip('\n'))
# div contains Quantity if div's left position between QTY_LEFT_BORDER and QTY_RIGHT_BORDER
if QTY_LEFT_BORDER < int(left) < QTY_RIGHT_BORDER:
filtered_divs['Qty'].append(div.text_content().strip('\n'))
# Merge and clear lists with data
data = []
for row in zip(filtered_divs['ID'], filtered_divs['Qty']):
if 'ID' in row[0]:
continue
data_row = {'ID': row[0].split(' ')[0], 'Quantity': row[1]}
data.append(data_row)
pprint(data)