Python 图像处理线程

Python Image Processing Threading

所以我正在做一个机器人项目,我们必须识别墙上的图案并相应地定位我们的机器人。我在我的笔记本电脑上开发了这个图像处理代码,它抓取了一张图像,将其转换为 HSV,应用了按位掩码,使用了 Canny 边缘检测,并找到了轮廓。我以为我可以将代码复制并粘贴到 raspberry pi 3 上;但是,由于处理能力下降,fps 小于 1。我一直在尝试将代码隔离到线程中,这样我就可以有一个线程捕获图像,一个线程将图像转换为 HSV 并对其进行过滤,和一根线做轮廓拟合。为了让它们相互通信,我做了队列。

这是我的初始视觉代码:

import numpy as np
import cv2
import time
import matplotlib.pyplot as plt
import sys

def onmouse(k, x, y, s, p):
    global hsv
    if k == 1:  # left mouse, print pixel at x,y
        print(hsv[y, x])

def distance_to_camera(Kwidth, focalLength, pixelWidth):
    return (Kwidth * focalLength) / pixelWidth

def contourArea(contours):

    area = []
    for i in range(0,len(contours)):
       area.append([cv2.contourArea(contours[i]),i])

    area.sort()
    if(area[len(area) - 1] >= 5 * area[0]):
        return area[len(area)-1]

    else: return 0

if __name__ == '__main__':

    cap = cv2.VideoCapture(0)

    """
    cap.set(3, 1920)
    cap.set(4, 1080)
    cap.set(5, 30)
    time.sleep(2)
    cap.set(15, -8.0)
    """

    KNOWN_WIDTH = 18

    # focalLength =  focalLength = (rect[1][1] * 74) / 18

    focalLength = 341.7075686984592

    distance_data = []

    counter1 = 0

    numFrames = 100
    samples = 1
    start_time = time.time()

    while (samples < numFrames):
        # Capture frame-by-frame
        ret, img = cap.read()

        length1, width1, channels = img.shape
        img = cv2.GaussianBlur(img, (5, 5), 0)

        hsv = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2HSV)

        # lower_green = np.array([75, 200, 170])
        # lower_green = np.array([53,180,122])

        #lower_green = np.array([70, 120, 120])

        lower_green = np.array([70, 50, 120])

        upper_green = np.array([120, 200, 255])

        #upper_green = np.array([120, 200, 255])

        mask = cv2.inRange(hsv, lower_green, upper_green)
        res = cv2.bitwise_and(hsv, hsv, mask=mask)

        gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)

        edged = cv2.Canny(res, 35, 125)

        im2, contours, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)

        if (len(contours) > 1):

            area,place = contourArea(contours)

            #print(area)
            if(area != 0):

                # print("Contxours: %d" % contours.size())
                # print("Hierarchy: %d" % hierarchy.size())

                c = contours[place]

                cv2.drawContours(img, c, -1, (0, 0, 255), 3)
                cv2.drawContours(edged,c, -1, (255, 0, 0), 3)
                perimeter = cv2.arcLength(c, True)

                M = cv2.moments(c)

                cx = 0
                cy = 0

                if (M['m00'] != 0):
                    cx = int(M['m10'] / M['m00'])  # Center of MASS Coordinates
                    cy = int(M['m01'] / M['m00'])

                rect = cv2.minAreaRect(c)
                box = cv2.boxPoints(rect)
                box = np.int0(box)
                cv2.drawContours(img, [box], 0, (255, 0, 0), 2)
                cv2.circle(img, (cx, cy), 7, (0, 0, 255), -1)

                cv2.line(img, (int(width1 / 2), int(length1 / 2)), (cx, cy), (255, 0, 0), 2)


                if(rect[1][1] != 0):
                    inches = distance_to_camera(KNOWN_WIDTH, focalLength, rect[1][1])

                    #print(inches)
                    distance_data.append(inches)

                counter1+=1
        samples+=1
        """
        cv2.namedWindow("Image w Contours")
        cv2.setMouseCallback("Image w Contours", onmouse)
        cv2.imshow('Image w Contours', img)

        cv2.namedWindow("HSV")
        cv2.setMouseCallback("HSV", onmouse)
        cv2.imshow('HSV', edged)

        if cv2.waitKey(1) & 0xFF == ord('x'):
            break
        """

# When everything done, release the capture

    totTime = time.time() - start_time
    print("--- %s seconds ---" % (totTime))
    print('----%s fps ----' % (numFrames/totTime))
    cap.release()
    cv2.destroyAllWindows()

    --- 13.469419717788696 seconds ---
    ----7.42422480665093 fps ----


    plt.plot(distance_data)
    plt.xlabel('TimeData')
    plt.ylabel('Distance to Target(in) ')
    plt.title('Distance vs Time From Camera')
    plt.show()

这是我的线程代码,它在主线程中抓取帧并在另一个线程中过滤;我想要另一个用于轮廓拟合的线程,但即使使用这两个过程,线程代码的 FPS 也与之前的代码几乎相同。这些结果也来自我的笔记本电脑,而不是 raspberry pi。

import cv2
import threading
import datetime
import numpy as np
import queue
import time

frame = queue.Queue(0)
canny = queue.Queue(0)
lower_green = np.array([70, 50, 120])
upper_green = np.array([120, 200, 255])

class FilterFrames(threading.Thread):
    def __init__(self,threadID,lock):
        threading.Thread.__init__(self)
        self.lock = lock
        self.name = threadID
        self.setDaemon(True)
        self.start()

    def run(self):

        while(True):
            img1 = frame.get()
            img1 = cv2.GaussianBlur(img1, (5, 5), 0)
            hsv = cv2.cvtColor(img1.copy(), cv2.COLOR_BGR2HSV)
            mask = cv2.inRange(hsv, lower_green, upper_green)
            res = cv2.bitwise_and(hsv, hsv, mask=mask)
            edged = cv2.Canny(res, 35, 125)
            canny.put(edged)

if __name__ == '__main__':

    lock = threading.Lock()
    numframes = 100
    frames = 0

    cap = cv2.VideoCapture(0)

    filter = FilterFrames(lock=lock, threadID='Filter')

    start_time = time.time()
    while(frames < numframes):

        ret,img = cap.read()

        frame.put(img)

        frames+=1

    totTime = time.time() - start_time
    print("--- %s seconds ---" % (totTime))
    print('----%s fps ----' % (numframes/totTime))

    """
    Results were:

    --- 13.590131759643555 seconds ---
    ----7.358280388197121 fps ----

    """
    cap.release()

我想知道我是否做错了什么,队列的访问是否正在减慢代码速度,以及我是否应该为这个应用程序使用多处理模块而不是线程。

您可以使用 cProfile 模块分析代码。它会告诉你程序的哪一部分是瓶颈。

Python 在 CPython 实现中具有全局解释器锁 (GIL)。这意味着即使您的应用程序是多线程的,它也只会使用您的一个 CPU。您可以尝试 multiprocessing 模块。尽管 Jython 和 IronPython 没有 GIL,但它们没有 none 或没有稳定的 Python3 支持。

在您的代码中从未使用过 self.lock。使用一个好的 IDE 和 pylint 来捕获这些类型的错误。队列维护自己的锁。

threading.Thread.__init__(self) 是来自 Python2 的过时语法。请改用 super().__init__()