使用 python-opencv 进行背景子跟踪的质心跟踪
Centroid tracking for background subtracking with python-opencv
我正在使用 backgoundsubtraction 进行对象跟踪并且我应用了本教程https://www.pyimagesearch.com/2018/07/23/simple-object-tracking-with-opencv/ for live stream video its giving output properly. but when i gave sequence of image frames all the ID is being printed on the new image and along with old ID and OLD id is not being removed
下一帧会得到新的ID,之前的id也会打印在上面
我该如何解决这个问题
def detection():
backsub = cv2. createBackgroundSubtractorMOG2(128, cv2.THRESH_BINARY, 1)
minarea = 50
counter = 0
counter = 0
counter1 = 0
ct = CentroidTracker()
rects = []
#cx = 0
#cy = 0
(H, W) = (None, None)
filenames = [img for img in glob.glob("img location/*.jpg")]
filenames.sort()
print("start2")
for img in filenames:
frame = cv2.imread(img)
t = time.localtime()
timestamp = int(round(time.time() * 1000))
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blur = cv2.medianBlur(frame, 21)
#blur = cv2.GaussianBlur(frame, (21,21), 0)
fgmask = backsub.apply(blur)
fgmask[fgmask==127] = 0
thresh = cv2.threshold(fgmask, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=4)
#cv2.line(frame, (138, 265), (344, 640), (0, 255, 0),1)
cv2.line(frame, (103, 209), (332, 607), (0, 255, 0),1)
#pts = np.array([[0, 640], [0, 264], [138, 265], [344, 640]], np.int32)
pts = np.array([[0, 607], [0, 215], [103, 209], [332, 607]], np.int32)
#bbPath = mplPath.Path(np.array([[0, 640], [0, 264], [138, 265], [344, 640]]))
bbPath = mplPath.Path(np.array([[0, 607], [0, 215], [103, 209], [332, 607]]))
frame = cv2.polylines(frame, [pts], True, (0, 255,0))
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
M = cv2.moments(c)
A = cv2.contourArea(c)
if cv2.contourArea(c) >= minarea:
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
rectang = [x,y,(x + w),(y + h)]
rects.append(rectang)
if M["m00"] != 0:
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])
cv2.circle(frame, (cx, cy),5, (255, 255, 255), -1)
centroid1 = (cx, cy)
cv2.imwrite("Writelocation/I%s.jpg"%timestamp,frame)
print("Area is : ",A)
objects = ct.update(rects)
if object is not None:
for (objectID, centroid) in objects.items():
# draw both the ID of the object and the centroid of the
# object on the output frame
text = "ID:{}".format(objectID)
cv2.putText(frame, text, (centroid[0], centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
cv2.imshow("fgmask", thresh)
cv2.imshow("img",frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.imwrite("Writelocation/V%s.jpg"%timestamp,frame)
print("Done")
detection()
以上是BackgroundSubtraction pgm
下面是质心跟踪 pgm
from scipy.spatial import distance as dist
from collections import OrderedDict
import numpy as np
class CentroidTracker():
def __init__(self, maxDisappeared=3):
self.nextObjectID = 0
self.objects = OrderedDict()
self.disappeared = OrderedDict()
self.maxDisappeared = maxDisappeared
def register(self, centroid):
self.objects[self.nextObjectID] = centroid
self.disappeared[self.nextObjectID] = 0
self.nextObjectID += 1
def deregister(self, objectID):
del self.objects[objectID]
del self.disappeared[objectID]
def update(self, rects):
if len(rects) == 0:
for objectID in list(self.disappeared.keys()):
self.disappeared[objectID] += 1
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
return self.objects
inputCentroids = np.zeros((len(rects), 2), dtype="int")
for (i, (startX, startY, endX, endY)) in enumerate(rects):
cX = int((startX + endX) / 2.0)
cY = int((startY + endY) / 2.0)
inputCentroids[i] = (cX, cY)
if len(self.objects) == 0:
for i in range(0, len(inputCentroids)):
self.register(inputCentroids[i])
else:
objectIDs = list(self.objects.keys())
objectCentroids = list(self.objects.values())
D = dist.cdist(np.array(objectCentroids), inputCentroids)
rows = D.min(axis=1).argsort()
cols = D.argmin(axis=1)[rows]
usedRows = set()
usedCols = set()
for (row, col) in zip(rows, cols):
if row in usedRows or col in usedCols:
continue
objectID = objectIDs[row]
self.objects[objectID] = inputCentroids[col]
self.disappeared[objectID] = 0
usedRows.add(row)
usedCols.add(col)
unusedRows = set(range(0, D.shape[0])).difference(usedRows)
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
if D.shape[0] >= D.shape[1]:
for row in unusedRows:
objectID = objectIDs[row]
self.disappeared[objectID] += 1
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
else:
for col in unusedCols:
self.register(inputCentroids[col])
return self.objects
尝试将行 rects = []
in detection()
放在第一个 for 循环内,即行 for img in filenames:
之后
我正在使用 backgoundsubtraction 进行对象跟踪并且我应用了本教程https://www.pyimagesearch.com/2018/07/23/simple-object-tracking-with-opencv/ for live stream video its giving output properly. but when i gave sequence of image frames all the ID is being printed on the new image and along with old ID and OLD id is not being removed
下一帧会得到新的ID,之前的id也会打印在上面 我该如何解决这个问题
def detection():
backsub = cv2. createBackgroundSubtractorMOG2(128, cv2.THRESH_BINARY, 1)
minarea = 50
counter = 0
counter = 0
counter1 = 0
ct = CentroidTracker()
rects = []
#cx = 0
#cy = 0
(H, W) = (None, None)
filenames = [img for img in glob.glob("img location/*.jpg")]
filenames.sort()
print("start2")
for img in filenames:
frame = cv2.imread(img)
t = time.localtime()
timestamp = int(round(time.time() * 1000))
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blur = cv2.medianBlur(frame, 21)
#blur = cv2.GaussianBlur(frame, (21,21), 0)
fgmask = backsub.apply(blur)
fgmask[fgmask==127] = 0
thresh = cv2.threshold(fgmask, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=4)
#cv2.line(frame, (138, 265), (344, 640), (0, 255, 0),1)
cv2.line(frame, (103, 209), (332, 607), (0, 255, 0),1)
#pts = np.array([[0, 640], [0, 264], [138, 265], [344, 640]], np.int32)
pts = np.array([[0, 607], [0, 215], [103, 209], [332, 607]], np.int32)
#bbPath = mplPath.Path(np.array([[0, 640], [0, 264], [138, 265], [344, 640]]))
bbPath = mplPath.Path(np.array([[0, 607], [0, 215], [103, 209], [332, 607]]))
frame = cv2.polylines(frame, [pts], True, (0, 255,0))
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
M = cv2.moments(c)
A = cv2.contourArea(c)
if cv2.contourArea(c) >= minarea:
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
rectang = [x,y,(x + w),(y + h)]
rects.append(rectang)
if M["m00"] != 0:
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])
cv2.circle(frame, (cx, cy),5, (255, 255, 255), -1)
centroid1 = (cx, cy)
cv2.imwrite("Writelocation/I%s.jpg"%timestamp,frame)
print("Area is : ",A)
objects = ct.update(rects)
if object is not None:
for (objectID, centroid) in objects.items():
# draw both the ID of the object and the centroid of the
# object on the output frame
text = "ID:{}".format(objectID)
cv2.putText(frame, text, (centroid[0], centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
cv2.imshow("fgmask", thresh)
cv2.imshow("img",frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.imwrite("Writelocation/V%s.jpg"%timestamp,frame)
print("Done")
detection()
以上是BackgroundSubtraction pgm 下面是质心跟踪 pgm
from scipy.spatial import distance as dist
from collections import OrderedDict
import numpy as np
class CentroidTracker():
def __init__(self, maxDisappeared=3):
self.nextObjectID = 0
self.objects = OrderedDict()
self.disappeared = OrderedDict()
self.maxDisappeared = maxDisappeared
def register(self, centroid):
self.objects[self.nextObjectID] = centroid
self.disappeared[self.nextObjectID] = 0
self.nextObjectID += 1
def deregister(self, objectID):
del self.objects[objectID]
del self.disappeared[objectID]
def update(self, rects):
if len(rects) == 0:
for objectID in list(self.disappeared.keys()):
self.disappeared[objectID] += 1
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
return self.objects
inputCentroids = np.zeros((len(rects), 2), dtype="int")
for (i, (startX, startY, endX, endY)) in enumerate(rects):
cX = int((startX + endX) / 2.0)
cY = int((startY + endY) / 2.0)
inputCentroids[i] = (cX, cY)
if len(self.objects) == 0:
for i in range(0, len(inputCentroids)):
self.register(inputCentroids[i])
else:
objectIDs = list(self.objects.keys())
objectCentroids = list(self.objects.values())
D = dist.cdist(np.array(objectCentroids), inputCentroids)
rows = D.min(axis=1).argsort()
cols = D.argmin(axis=1)[rows]
usedRows = set()
usedCols = set()
for (row, col) in zip(rows, cols):
if row in usedRows or col in usedCols:
continue
objectID = objectIDs[row]
self.objects[objectID] = inputCentroids[col]
self.disappeared[objectID] = 0
usedRows.add(row)
usedCols.add(col)
unusedRows = set(range(0, D.shape[0])).difference(usedRows)
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
if D.shape[0] >= D.shape[1]:
for row in unusedRows:
objectID = objectIDs[row]
self.disappeared[objectID] += 1
if self.disappeared[objectID] > self.maxDisappeared:
self.deregister(objectID)
else:
for col in unusedCols:
self.register(inputCentroids[col])
return self.objects
尝试将行 rects = []
in detection()
放在第一个 for 循环内,即行 for img in filenames: