SIFT feature_matching 点坐标

SIFT feature_matching point coordinates

我想使用基于 FLANN 的匹配器打印检测特征关键点 算法:http://docs.opencv.org/trunk/dc/dc3/tutorial_py_matcher.html。 搜索工作正常,并将红色(全部)和绿色(良好)的关键点显示为教程。 我只想打印第二张图像(场景)的坐标(x,y),此处命名为“kp2”,但它不起作用。 这是我的代码:

import numpy as np
import cv2
from matplotlib import pyplot as plt
img1 = cv2.imread('img1.jpg',0)          # queryImage
img2 = cv2.imread('img2.jpg',0) # trainImage
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
# FLANN parameters
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50)   # or pass empty dictionary
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(des1,des2,k=2)
# Need to draw only good matches, so create a mask
matchesMask = [[0,0] for i in range(len(matches))]

# ratio test as per Lowe's paper
for i,(m,n) in enumerate(matches):
    if m.distance < 0.7*n.distance:
        matchesMask[i]=[1,0]
        print(i,kp2[i].pt)

draw_params = dict(matchColor = (0,255,0),
                   singlePointColor = (255,0,0),
                   matchesMask = matchesMask,
                   flags = 0)
img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,matches,None,**draw_params)
plt.imshow(img3,),plt.show()

我的结果:

77 (67.68722534179688, 92.98455047607422)
82 (14.395119667053223, 93.1697998046875)
86 (127.58460235595703, 98.1304931640625)
109 (66.52041625976562, 111.51738739013672)
110 (66.52041625976562, 111.51738739013672)
146 (69.3978500366211, 11.287369728088379)

匹配关键点的个数不错但是坐标错了print(i,kp2[i].pt)。我检查了原始图像。 我做错了什么,如果是的话,我必须放哪一行来打印匹配的关键点坐标。 谢谢大家。

更新:

我找到了有用的资源,http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.html


我用这两张图做测试:

Android, Android_small

匹配结果如下:

一些结果:

0 (42.05057144165039, 134.98709106445312) (139.18690490722656, 24.550437927246094)
1 (53.74299621582031, 249.95252990722656) (26.700265884399414, 124.75701904296875)
2 (56.41600799560547, 272.58843994140625) (139.18690490722656, 24.550437927246094)
3 (82.96114349365234, 124.731201171875) (41.35136795043945, 62.25730895996094)
4 (82.96114349365234, 124.731201171875) (41.35136795043945, 62.25730895996094)
5 (82.96114349365234, 124.731201171875) (41.35136795043945, 62.25730895996094)
6 (91.90446472167969, 293.59735107421875) (139.18690490722656, 24.550437927246094)
8 (94.516845703125, 296.0242919921875) (139.18690490722656, 24.550437927246094)
9 (98.97846221923828, 134.186767578125) (49.89073944091797, 67.37061309814453)

代码及说明如下:

#!/usr/bin/python3
# 2017.10.06 22:36:44 CST
# 2017.10.06 23:18:25 CST

"""
Environment:
    OpenCV 3.3  + Python 3.5

Aims:
(1) Detect sift keypoints and compute descriptors.
(2) Use flannmatcher to match descriptors.
(3) Do ratio test and output the matched pairs coordinates, draw some pairs in purple .
(4) Draw matched pairs in blue color, singlepoints in red.
"""
import numpy as np
import cv2
from matplotlib import pyplot as plt
imgname = "android.png"          # query image (large scene)
imgname2 = "android_small.png"   # train image (small object)

## Create SIFT object
sift = cv2.xfeatures2d.SIFT_create()

## Create flann matcher
FLANN_INDEX_KDTREE = 1  # bug: flann enums are missing
flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
#matcher = cv2.FlannBasedMatcher_create()
matcher = cv2.FlannBasedMatcher(flann_params, {})

## Detect and compute
img1 = cv2.imread(imgname)
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
kpts1, descs1 = sift.detectAndCompute(gray1,None)

## As up
img2 = cv2.imread(imgname2)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
kpts2, descs2 = sift.detectAndCompute(gray2,None)

## Ratio test
matches = matcher.knnMatch(descs1, descs2, 2)
matchesMask = [[0,0] for i in range(len(matches))]
for i, (m1,m2) in enumerate(matches):
    if m1.distance < 0.7 * m2.distance:
        matchesMask[i] = [1,0]
        ## Notice: How to get the index
        pt1 = kpts1[m1.queryIdx].pt
        pt2 = kpts2[m1.trainIdx].pt
        print(i, pt1,pt2 )
        if i % 5 ==0:
            ## Draw pairs in purple, to make sure the result is ok
            cv2.circle(img1, (int(pt1[0]),int(pt1[1])), 5, (255,0,255), -1)
            cv2.circle(img2, (int(pt2[0]),int(pt2[1])), 5, (255,0,255), -1)


## Draw match in blue, error in red
draw_params = dict(matchColor = (255, 0,0),
                   singlePointColor = (0,0,255),
                   matchesMask = matchesMask,
                   flags = 0)

res = cv2.drawMatchesKnn(img1,kpts1,img2,kpts2,matches,None,**draw_params)
cv2.imshow("Result", res);cv2.waitKey();cv2.destroyAllWindows()

我发现了一个问题。我改变了:

# ratio test as per Lowe's paper for i,(m,n) in enumerate(matches):
    if m.distance < 0.7*n.distance:
        matchesMask[i]=[1,0]
        good.append(m)


dst_pt = [ kp2[m.trainIdx].pt for m in good ] print(dst_pt)

对于你Sunreef,好的点是绿色的(就像教程一样)。我有原始图像和油漆(是油漆)我检查坐标点。 Paint 显示像素坐标。我有大约十点要检查。