如何使用 Opencv 从 Harris 角点检测器中提取关键点
How to extract keypoints from Harris Corner Detector using Opencv
- 首先我会使用
cv::cornerHarris()
来检测角点(我可以轻松做到)。
- 其次,我想从 Harris 检测器中提取关键点并将它们存储在
std::vector<KeyPoint>
中(我不知道该怎么做)。稍后我将使用它来计算描述符并匹配它们。
我可以很容易地使用 SURF 来完成它们,但我想使用 Harris 角点检测器来完成。
/// Detecting corners
cv::cornerHarris(leftRoi, dst, blockSize, apertureSize, k, BORDER_DEFAULT);
/// Normalizing
normalize(dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat());
convertScaleAbs(dst_norm, dst_norm_scaled);
/// Drawing a circle around corners
for (int j = 0; j < dst_norm.rows; j++)
{
for (int i = 0; i < dst_norm.cols; i++)
{
if ((int)dst_norm.at<float>(j, i) > 165)
{
circle(dst_norm_scaled, Point(i, j), 5, Scalar(0), 2, 8, 0);
}
}
}
/// Showing the result
namedWindow("corners_window", CV_WINDOW_AUTOSIZE);
imshow("corners_window", dst_norm_scaled);
-这部分有问题(我如何从 Harris 检测器中提取关键点)
std::vector<KeyPoint> keypoints;
Python
这是我在Python中写的:
# convert coordinates to Keypoint type
eye_corner_keypoints = [cv2.KeyPoint(crd[0], crd[1], 13) for crd in eye_corner_coordinates]
# compute SIFT descriptors from corner keypoints
sift = cv2.xfeatures2d.SIFT_create()
eye_corner_descriptors = [sift.compute(gray,[kp])[1] for kp in eye_corner_keypoints]
C++
查看 OpenCV reference documentation 中关键点的构造函数签名 class:
KeyPoint (float x, float y, float _size, float _angle=-1, float _response=0, int _octave=0, int _class_id=-1)
看起来您可以遍历您的坐标点并在每次迭代时(大致)实例化您的 KeyPoint 对象,如下所示:
for (int i = 0; i < num_points; i++) {
KeyPoint kp(points_x[i], points_y[i], points_size[i]);
/* ... */
警告:代码未经测试,我不是 C++ 程序员。
Try my code:
import cv2
import numpy as np
from skimage.io import imread
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
img = imread('images/box.jpg')
img_gray = rgb2gray(img)
img_gray = np.float32(img_gray)
#cv2.imshow("Image",img)
#cv2.imshow("Gray Image",img_gray)
#Ix = cv2.Sobel(img_gray,cv2.CV_64F,1,0,ksize=5)
#Iy = cv2.Sobel(img_gray,cv2.CV_64F,0,1,ksize=5)
kernel_x = np.array([[-1, 0, 1],[-2, 0, 2],[-1, 0, 1]])
kernel_y = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
Ix = cv2.filter2D(img_gray,-1,kernel_x)
Iy = cv2.filter2D(img_gray,-1,kernel_y)
Ixx = Ix**2
Ixy = Ix*Iy
Iyy = Iy**2
#cv2.imshow("Ixx",Ixx)
#cv2.imshow("Iyy Image",Iyy)
#cv2.imshow("Ixy Image",Ixy)
# Loop through image and find our corners
k = 0.05
height = img_gray.shape[0]
width = img_gray.shape[1]
harris_response = []
window_size = 6
offset = int(window_size/2)
for y in range(offset, height-offset):
for x in range(offset, width-offset):
Sxx = np.sum(Ixx[y-offset:y+1+offset, x-offset:x+1+offset])
Syy = np.sum(Iyy[y-offset:y+1+offset, x-offset:x+1+offset])
Sxy = np.sum(Ixy[y-offset:y+1+offset, x-offset:x+1+offset])
# Find determinant and trace, use to get corner response
det = (Sxx * Syy) - (Sxy ** 2)
trace = Sxx + Syy
r = det - k * (trace ** 2)
harris_response.append([x, y, r])
img_copy = np.copy(img)
thresh = 500
#sift = cv2.xfeatures2d.SIFT_create()
#kp,dc = sift.compute(img,None)
for response in harris_response:
x, y, r = response
if r > thresh:
img_copy[y, x] = [255, 0, 0]
plt.imshow(img_copy)
cv2.waitKey(0)
plt.show()
- 首先我会使用
cv::cornerHarris()
来检测角点(我可以轻松做到)。 - 其次,我想从 Harris 检测器中提取关键点并将它们存储在
std::vector<KeyPoint>
中(我不知道该怎么做)。稍后我将使用它来计算描述符并匹配它们。 我可以很容易地使用 SURF 来完成它们,但我想使用 Harris 角点检测器来完成。
/// Detecting corners cv::cornerHarris(leftRoi, dst, blockSize, apertureSize, k, BORDER_DEFAULT); /// Normalizing normalize(dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat()); convertScaleAbs(dst_norm, dst_norm_scaled); /// Drawing a circle around corners for (int j = 0; j < dst_norm.rows; j++) { for (int i = 0; i < dst_norm.cols; i++) { if ((int)dst_norm.at<float>(j, i) > 165) { circle(dst_norm_scaled, Point(i, j), 5, Scalar(0), 2, 8, 0); } } } /// Showing the result namedWindow("corners_window", CV_WINDOW_AUTOSIZE); imshow("corners_window", dst_norm_scaled);
-这部分有问题(我如何从 Harris 检测器中提取关键点)
std::vector<KeyPoint> keypoints;
Python
这是我在Python中写的:
# convert coordinates to Keypoint type
eye_corner_keypoints = [cv2.KeyPoint(crd[0], crd[1], 13) for crd in eye_corner_coordinates]
# compute SIFT descriptors from corner keypoints
sift = cv2.xfeatures2d.SIFT_create()
eye_corner_descriptors = [sift.compute(gray,[kp])[1] for kp in eye_corner_keypoints]
C++
查看 OpenCV reference documentation 中关键点的构造函数签名 class:
KeyPoint (float x, float y, float _size, float _angle=-1, float _response=0, int _octave=0, int _class_id=-1)
看起来您可以遍历您的坐标点并在每次迭代时(大致)实例化您的 KeyPoint 对象,如下所示:
for (int i = 0; i < num_points; i++) {
KeyPoint kp(points_x[i], points_y[i], points_size[i]);
/* ... */
警告:代码未经测试,我不是 C++ 程序员。
Try my code:
import cv2
import numpy as np
from skimage.io import imread
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
img = imread('images/box.jpg')
img_gray = rgb2gray(img)
img_gray = np.float32(img_gray)
#cv2.imshow("Image",img)
#cv2.imshow("Gray Image",img_gray)
#Ix = cv2.Sobel(img_gray,cv2.CV_64F,1,0,ksize=5)
#Iy = cv2.Sobel(img_gray,cv2.CV_64F,0,1,ksize=5)
kernel_x = np.array([[-1, 0, 1],[-2, 0, 2],[-1, 0, 1]])
kernel_y = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
Ix = cv2.filter2D(img_gray,-1,kernel_x)
Iy = cv2.filter2D(img_gray,-1,kernel_y)
Ixx = Ix**2
Ixy = Ix*Iy
Iyy = Iy**2
#cv2.imshow("Ixx",Ixx)
#cv2.imshow("Iyy Image",Iyy)
#cv2.imshow("Ixy Image",Ixy)
# Loop through image and find our corners
k = 0.05
height = img_gray.shape[0]
width = img_gray.shape[1]
harris_response = []
window_size = 6
offset = int(window_size/2)
for y in range(offset, height-offset):
for x in range(offset, width-offset):
Sxx = np.sum(Ixx[y-offset:y+1+offset, x-offset:x+1+offset])
Syy = np.sum(Iyy[y-offset:y+1+offset, x-offset:x+1+offset])
Sxy = np.sum(Ixy[y-offset:y+1+offset, x-offset:x+1+offset])
# Find determinant and trace, use to get corner response
det = (Sxx * Syy) - (Sxy ** 2)
trace = Sxx + Syy
r = det - k * (trace ** 2)
harris_response.append([x, y, r])
img_copy = np.copy(img)
thresh = 500
#sift = cv2.xfeatures2d.SIFT_create()
#kp,dc = sift.compute(img,None)
for response in harris_response:
x, y, r = response
if r > thresh:
img_copy[y, x] = [255, 0, 0]
plt.imshow(img_copy)
cv2.waitKey(0)
plt.show()