我将如何使用具有图像单应性的 Orb 检测器?
How would I use Orb detector with image homography?
我想使用 orb 检测器在找到的图像周围绘制边界框,类似于此处使用筛选检测器的示例:SIFT Refrence
Linked 示例使用 FlannBasedMatcher。我的代码使用 BFMatcher。我对使用的 Matcher 没有偏好。
MIN_MATCH_COUNT = 10
img1 = cv2.imread('box.png',0)
img2 = cv2.imread('box_in_scene.png',0)
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1,des2)
我如何继续此代码以使用单应性来绘制 box_in_scene 图像?
编辑: 我尝试了以下操作,但输出与预期不符。
src_pts = np.float32([ kp1[m.queryIdx].pt for m in matches[:50] ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in matches[:50] ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
这是我的结果。
代码(描述写成注释):
#!/usr/bin/python3
# 2017.11.26 23:27:12 CST
## Find object by orb features matching
import numpy as np
import cv2
imgname = "box.png" # query image (small object)
imgname2 = "box_in_scene.png" # train image (large scene)
MIN_MATCH_COUNT = 4
## Create ORB object and BF object(using HAMMING)
orb = cv2.ORB_create()
img1 = cv2.imread(imgname)
img2 = cv2.imread(imgname2)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
## Find the keypoints and descriptors with ORB
kpts1, descs1 = orb.detectAndCompute(gray1,None)
kpts2, descs2 = orb.detectAndCompute(gray2,None)
## match descriptors and sort them in the order of their distance
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(descs1, descs2)
dmatches = sorted(matches, key = lambda x:x.distance)
## extract the matched keypoints
src_pts = np.float32([kpts1[m.queryIdx].pt for m in dmatches]).reshape(-1,1,2)
dst_pts = np.float32([kpts2[m.trainIdx].pt for m in dmatches]).reshape(-1,1,2)
## find homography matrix and do perspective transform
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
h,w = img1.shape[:2]
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
## draw found regions
img2 = cv2.polylines(img2, [np.int32(dst)], True, (0,0,255), 1, cv2.LINE_AA)
cv2.imshow("found", img2)
## draw match lines
res = cv2.drawMatches(img1, kpts1, img2, kpts2, dmatches[:20],None,flags=2)
cv2.imshow("orb_match", res);
cv2.waitKey();cv2.destroyAllWindows()
我想使用 orb 检测器在找到的图像周围绘制边界框,类似于此处使用筛选检测器的示例:SIFT Refrence
Linked 示例使用 FlannBasedMatcher。我的代码使用 BFMatcher。我对使用的 Matcher 没有偏好。
MIN_MATCH_COUNT = 10
img1 = cv2.imread('box.png',0)
img2 = cv2.imread('box_in_scene.png',0)
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1,des2)
我如何继续此代码以使用单应性来绘制 box_in_scene 图像?
编辑: 我尝试了以下操作,但输出与预期不符。
src_pts = np.float32([ kp1[m.queryIdx].pt for m in matches[:50] ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in matches[:50] ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
这是我的结果。
代码(描述写成注释):
#!/usr/bin/python3
# 2017.11.26 23:27:12 CST
## Find object by orb features matching
import numpy as np
import cv2
imgname = "box.png" # query image (small object)
imgname2 = "box_in_scene.png" # train image (large scene)
MIN_MATCH_COUNT = 4
## Create ORB object and BF object(using HAMMING)
orb = cv2.ORB_create()
img1 = cv2.imread(imgname)
img2 = cv2.imread(imgname2)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
## Find the keypoints and descriptors with ORB
kpts1, descs1 = orb.detectAndCompute(gray1,None)
kpts2, descs2 = orb.detectAndCompute(gray2,None)
## match descriptors and sort them in the order of their distance
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(descs1, descs2)
dmatches = sorted(matches, key = lambda x:x.distance)
## extract the matched keypoints
src_pts = np.float32([kpts1[m.queryIdx].pt for m in dmatches]).reshape(-1,1,2)
dst_pts = np.float32([kpts2[m.trainIdx].pt for m in dmatches]).reshape(-1,1,2)
## find homography matrix and do perspective transform
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
h,w = img1.shape[:2]
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
## draw found regions
img2 = cv2.polylines(img2, [np.int32(dst)], True, (0,0,255), 1, cv2.LINE_AA)
cv2.imshow("found", img2)
## draw match lines
res = cv2.drawMatches(img1, kpts1, img2, kpts2, dmatches[:20],None,flags=2)
cv2.imshow("orb_match", res);
cv2.waitKey();cv2.destroyAllWindows()