将代码转换为面向对象 Class
Converting Code Into Object Oriented Class
我有一段代码可以成功跟踪并输出对象的位置。 Here's a video of it working。我想将此代码转换为 class,因为我计划拥有多个需要跟踪其位置的摄像机。这是 运行 成功的代码:
import cv2
import easygui
conversionFactor = float(easygui.enterbox("Please enter the numerical conversion factor, in micrometers/pixel:"))
cap = cv2.VideoCapture(0)
#Fast frame rate, low accuracy
#tracker = cv2.TrackerMOSSE_create()
#Slow frame rate, high accuracy
tracker = cv2.TrackerCSRT_create()
success, img = cap.read()
bbox = cv2.selectROI("Tracking", img, False)
tracker.init(img, bbox)
def drawBox(img, bbox):
# Get coordinates.
# x is the pixel value corresponding to horizontal movement of the object.
# (i.e. x = 0 is the far left of the screen, bigger number is further to the right)
# y is the pixel value corresponding to vertical movement of the object.
x,y,w,h = int(bbox[0]),int(bbox[1]),int(bbox[2]),int(bbox[3])
cv2.rectangle(img,(x,y),((x+w),(y+h)),(255,0,255),3,1)
cv2.putText(img, "Tracking", (25, 75), cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 255, 255), 2)
while True:
timer = cv2.getTickCount()
success, img = cap.read()
xCoordinate = bbox[0] * conversionFactor
yCoordinate = bbox[1] * conversionFactor
xCoordinateString = "X Coordinate (micrometers): " + str("%.2f" % xCoordinate)
yCoordinateString = "Y Coordinate (micrometers): " + str("%.2f" % yCoordinate)
success, bbox = tracker.update(img)
if success:
drawBox(img, bbox)
else:
cv2.putText(img, "Lost", (25, 75), cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255), 2)
fps = cv2.getTickFrequency()/(cv2.getTickCount()-timer)
cv2.putText(img, str(int(fps)), (25,50), cv2.FONT_HERSHEY_COMPLEX, 0.7,(0,0,255),2)
cv2.putText(img, xCoordinateString, (200, 50), cv2.FONT_HERSHEY_COMPLEX, 0.7, (255, 0, 0), 2)
cv2.putText(img, yCoordinateString, (200, 75), cv2.FONT_HERSHEY_COMPLEX, 0.7, (255, 0, 0), 2)
cv2.imshow("Tracking",img)
if cv2.waitKey(1) & 0xff == ord('q'):
break
我尝试将“while true”块中的所有代码移动到它自己的方法中,然后将 xCoordinate 和 yCoordinate 变量转换为 horizontalCoordinate 和 verticalCoordinate class 属性(因为它将由编码器定义是否vertical/horizontal 坐标是 x、y 或 z 轴)。第一段代码运行s,但是后面的视频没有继续到运行(as shown in this video).
除了获得一些关于如何改进此代码的意见外,我很乐意帮助解决此代码问题 class。我在面向对象设计方面的技能充其量只是最低限度,所以我确信我在这里搞砸了很多最佳实践。
代码:
import cv2
import easygui
class MachineVisionCamerasAFAM():
def __init__(self, cameraNumber):
self.cameraNumber = cameraNumber
self.horizontalPosition = 0
self.verticalPosition = 0
conversionFactor = float(easygui.enterbox("Please enter the numerical conversion factor, in micrometers/pixel:"))
self.conversionFactor = conversionFactor
cap = cv2.VideoCapture(cameraNumber)
self.cap = cap
# Fast frame rate, low accuracy
# tracker = cv2.TrackerMOSSE_create()
# Slow frame rate, high accuracy
tracker = cv2.TrackerCSRT_create()
success, img = cap.read()
bbox = cv2.selectROI("Tracking", img, False)
tracker.init(img, bbox)
self.bbox = bbox
self.tracker = tracker
def getHorizontalPosition(self):
horizontalCoordinate = self.bbox[1] * self.conversionFactor
return self.horizontalPosition
def getVerticalPosition(self):
verticalCoordinate = self.bbox[0] * self.conversionFactor
return self.verticalPosition
def runCamera(self):
def drawBox(img, bbox):
# Get coordinates.
# x is the pixel value corresponding to horizontal movement of the object.
# (i.e. x = 0 is the far left of the screen, bigger number is further to the right)
# y is the pixel value corresponding to vertical movement of the object.
x, y, w, h = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
cv2.rectangle(img, (x, y), ((x + w), (y + h)), (255, 0, 255), 3, 1)
cv2.putText(img, "Tracking", (25, 75), cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 255, 255), 2)
timer = cv2.getTickCount()
success, img = self.cap.read()
success, bbox = self.tracker.update(img)
if success:
drawBox(img, bbox)
else:
cv2.putText(img, "Lost", (25, 75), cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255), 2)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
cv2.putText(img, str(int(fps)), (25, 50), cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255), 2)
cv2.imshow("Tracking", img)
#if cv2.waitKey(1) & 0xff == ord('q'):
#break
camera1 = MachineVisionCamerasAFAM(cameraNumber=0)
while True:
camera1.runCamera()
print("X Coordinate (micrometers): " + str("%.2f" % camera1.horizontalPosition))
print(camera1.horizontalPosition)
print("Y Coordinate (micrometers): " + str("%.2f" % camera1.verticalPosition))
print(camera1.verticalPosition)
在此先感谢您的帮助!
在您的案例中真正重要的是注释 waitKey
,这是更新所必需的。作为第一步,您可以用 cv2.waitKey(1)
替换注释的两行。持续更新应该再次开始工作。
我有一段代码可以成功跟踪并输出对象的位置。 Here's a video of it working。我想将此代码转换为 class,因为我计划拥有多个需要跟踪其位置的摄像机。这是 运行 成功的代码:
import cv2
import easygui
conversionFactor = float(easygui.enterbox("Please enter the numerical conversion factor, in micrometers/pixel:"))
cap = cv2.VideoCapture(0)
#Fast frame rate, low accuracy
#tracker = cv2.TrackerMOSSE_create()
#Slow frame rate, high accuracy
tracker = cv2.TrackerCSRT_create()
success, img = cap.read()
bbox = cv2.selectROI("Tracking", img, False)
tracker.init(img, bbox)
def drawBox(img, bbox):
# Get coordinates.
# x is the pixel value corresponding to horizontal movement of the object.
# (i.e. x = 0 is the far left of the screen, bigger number is further to the right)
# y is the pixel value corresponding to vertical movement of the object.
x,y,w,h = int(bbox[0]),int(bbox[1]),int(bbox[2]),int(bbox[3])
cv2.rectangle(img,(x,y),((x+w),(y+h)),(255,0,255),3,1)
cv2.putText(img, "Tracking", (25, 75), cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 255, 255), 2)
while True:
timer = cv2.getTickCount()
success, img = cap.read()
xCoordinate = bbox[0] * conversionFactor
yCoordinate = bbox[1] * conversionFactor
xCoordinateString = "X Coordinate (micrometers): " + str("%.2f" % xCoordinate)
yCoordinateString = "Y Coordinate (micrometers): " + str("%.2f" % yCoordinate)
success, bbox = tracker.update(img)
if success:
drawBox(img, bbox)
else:
cv2.putText(img, "Lost", (25, 75), cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255), 2)
fps = cv2.getTickFrequency()/(cv2.getTickCount()-timer)
cv2.putText(img, str(int(fps)), (25,50), cv2.FONT_HERSHEY_COMPLEX, 0.7,(0,0,255),2)
cv2.putText(img, xCoordinateString, (200, 50), cv2.FONT_HERSHEY_COMPLEX, 0.7, (255, 0, 0), 2)
cv2.putText(img, yCoordinateString, (200, 75), cv2.FONT_HERSHEY_COMPLEX, 0.7, (255, 0, 0), 2)
cv2.imshow("Tracking",img)
if cv2.waitKey(1) & 0xff == ord('q'):
break
我尝试将“while true”块中的所有代码移动到它自己的方法中,然后将 xCoordinate 和 yCoordinate 变量转换为 horizontalCoordinate 和 verticalCoordinate class 属性(因为它将由编码器定义是否vertical/horizontal 坐标是 x、y 或 z 轴)。第一段代码运行s,但是后面的视频没有继续到运行(as shown in this video).
除了获得一些关于如何改进此代码的意见外,我很乐意帮助解决此代码问题 class。我在面向对象设计方面的技能充其量只是最低限度,所以我确信我在这里搞砸了很多最佳实践。
代码:
import cv2
import easygui
class MachineVisionCamerasAFAM():
def __init__(self, cameraNumber):
self.cameraNumber = cameraNumber
self.horizontalPosition = 0
self.verticalPosition = 0
conversionFactor = float(easygui.enterbox("Please enter the numerical conversion factor, in micrometers/pixel:"))
self.conversionFactor = conversionFactor
cap = cv2.VideoCapture(cameraNumber)
self.cap = cap
# Fast frame rate, low accuracy
# tracker = cv2.TrackerMOSSE_create()
# Slow frame rate, high accuracy
tracker = cv2.TrackerCSRT_create()
success, img = cap.read()
bbox = cv2.selectROI("Tracking", img, False)
tracker.init(img, bbox)
self.bbox = bbox
self.tracker = tracker
def getHorizontalPosition(self):
horizontalCoordinate = self.bbox[1] * self.conversionFactor
return self.horizontalPosition
def getVerticalPosition(self):
verticalCoordinate = self.bbox[0] * self.conversionFactor
return self.verticalPosition
def runCamera(self):
def drawBox(img, bbox):
# Get coordinates.
# x is the pixel value corresponding to horizontal movement of the object.
# (i.e. x = 0 is the far left of the screen, bigger number is further to the right)
# y is the pixel value corresponding to vertical movement of the object.
x, y, w, h = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
cv2.rectangle(img, (x, y), ((x + w), (y + h)), (255, 0, 255), 3, 1)
cv2.putText(img, "Tracking", (25, 75), cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 255, 255), 2)
timer = cv2.getTickCount()
success, img = self.cap.read()
success, bbox = self.tracker.update(img)
if success:
drawBox(img, bbox)
else:
cv2.putText(img, "Lost", (25, 75), cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255), 2)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
cv2.putText(img, str(int(fps)), (25, 50), cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 0, 255), 2)
cv2.imshow("Tracking", img)
#if cv2.waitKey(1) & 0xff == ord('q'):
#break
camera1 = MachineVisionCamerasAFAM(cameraNumber=0)
while True:
camera1.runCamera()
print("X Coordinate (micrometers): " + str("%.2f" % camera1.horizontalPosition))
print(camera1.horizontalPosition)
print("Y Coordinate (micrometers): " + str("%.2f" % camera1.verticalPosition))
print(camera1.verticalPosition)
在此先感谢您的帮助!
在您的案例中真正重要的是注释 waitKey
,这是更新所必需的。作为第一步,您可以用 cv2.waitKey(1)
替换注释的两行。持续更新应该再次开始工作。