如何过滤列表,删除最低概率值,使用 python

How to filter a list, remove lowest probability value, using python

我有要检测文本的图像,我正在使用 easyocr 检测文本。 OCR 给出输出边界框值和概率,如输出图像所示。我想删除检测到的任何文本中小于 0.4 的概率我该如何更改?

图片1

图 2

结果元素给出了第一个文本和第二个文本的输出概率'AA'如图所示。我想删除检测到的最低概率文本。

image1 的输出

image2 的输出

要求

pip 安装 pytesseract

pip 安装 easyocr

运行 代码使用 python main.py -i image1.jpg

# import the necessary packages
from pytesseract import Output
import pytesseract
import argparse
import cv2
from matplotlib import pyplot as plt
import numpy as np
import os
import easyocr
from PIL import ImageDraw, Image



def remove_lines(image):
    result = image.copy()
    gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
    thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]

    # Remove horizontal lines
    horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (40,1))
    remove_horizontal = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, horizontal_kernel, iterations=2)
    cnts = cv2.findContours(remove_horizontal, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if len(cnts) == 2 else cnts[1]
    for c in cnts:
        cv2.drawContours(result, [c], -1, (255,255,255), 5)


    # Remove vertical lines
    vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1,40))
    remove_vertical = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, vertical_kernel, iterations=2)
    cnts = cv2.findContours(remove_vertical, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if len(cnts) == 2 else cnts[1]
    for c in cnts:
        cv2.drawContours(result, [c], -1, (255,255,255), 5)

    plt.imshow(result)
    plt.show()

    return result



# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
    help="path to input image to be OCR'd")
ap.add_argument("-c", "--min-conf", type=int, default=0,
    help="mininum confidence value to filter weak text detection")
args = vars(ap.parse_args())


reader = easyocr.Reader(['ch_sim','en']) # need to run only once to load model into memory



# load the input image, convert it from BGR to RGB channel ordering,
# and use Tesseract to localize each area of text in the input image
image = cv2.imread(args["image"])
# image = remove_lines(image)

results = reader.readtext(image)


print(results)

results=[([[5, 0],[233, 0],[233, 15],[5, 15]],' Ꮎ ]TC T III3 UᎪCU 3', 0.011015821004953916),
 ([[241, 0], [390, 0], [390, 15], [241, 15] ] , 'ᎠA[ [ C 0lᎢ', 0.0023567583563770737),
 ([[2, 16], [46, 16], [46, 42], [2, 42]], 'MM', 0.9965182566504686),
 ([[98, 16], [140, 16], [140, 46], [98, 46]], 'D', 0.9973547096148511),
 ([[182, 16], [220, 16],[220, 44], [182, 44]], 'Y', 0.9971791823074896),
 ([[24, 46], [62, 46], [62, 74], [24, 74]], '62', 0.9999828941291119),
 ([[94, 46], [130, 46], [130, 74], [94, 74]], '26', 0.9997197349619524),
 ([[180, 46], [242, 46], [242, 74], [180, 74]],'1970', 0.999931275844574)]

low_precision = []
for text in results:
    if text[2]<0.5: # precision here
        low_precision.append(text)
for i in low_precision:
    results.remove(i) # remove low precision
print(results)

结果:

[([[2, 16], [46, 16], [46, 42], [2, 42]], 'MM', 0.9965182566504686),
 ([[98, 16], [140, 16], [140, 46], [98, 46]], 'D', 0.9973547096148511),
 ([[182, 16], [220, 16], [220, 44], [182, 44]], 'Y', 0.9971791823074896),
 ([[24, 46], [62, 46], [62, 74], [24, 74]], '62', 0.9999828941291119),
 ([[94, 46], [130, 46], [130, 74], [94, 74]], '26', 0.9997197349619524),
 ([[180, 46], [242, 46], [242, 74], [180, 74]], '1970', 0.999931275844574)]