无法使用 ORB 算法来匹配我的图像
Can't get ORB algorithm to match my images
我尝试在这些图像上使用 ORB 算法。该代码有效,但我无法获得足够的关键点来匹配图像。我怎样才能让它发挥作用?
import cv2
import numpy as np
img1 = cv2.imread('cards4/eightofspades100.png', 0)
img2 = cv2.imread('cards4/deck 4.png', 0)
cv2.imshow('img1',img1)
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
#des1 and des2 are arrays of 500 features and 32 values
imgKp1 = cv2.drawKeypoints(img1, kp1, None)
imgKp2 = cv2.drawKeypoints(img2, kp2, None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
good = []
# m and n because k = 2 and k is the number of features
for m,n in matches:
if m.distance <1*n.distance:
good.append([m])
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags = 2)
cv2.imshow('Kp1', imgKp1)
cv2.imshow('Kp2', imgKp2)
#cv2.imshow('img1', img1)
#cv2.imshow('img2', img2)
#cv2.imshow('img3', img3)
cv2.waitKey(0)
eight of spades
deck of card
模板匹配使用 cv2.matchTemplate():
import wizzi_utils as wu # pip install wizzi_utils
import cv2
def match():
src = wu.cvt.load_img(path='cards4/deck 4.png')
template = wu.cvt.load_img(path='cards4/eightofspades100.png')
# reduce images size to 30% (too big to see results)
# works also without resizing - just to visualize
reduce_factor = 0.3
src = wu.cvt.resize_opencv_image(img=src, resize=reduce_factor)
template = wu.cvt.resize_opencv_image(img=template, resize=reduce_factor)
# display on the right side
wu.cvt.display_open_cv_image(img=src, ms=1, title='src', loc='top_right')
wu.cvt.display_open_cv_image(img=template, ms=1, title='template', loc='bottom_right')
# convert to gray scale
src_g = wu.cvt.BGR_img_to_gray(bgr_img=src)
template_g = wu.cvt.BGR_img_to_gray(bgr_img=template)
t_height, t_width = template_g.shape
# different methods
methods = [cv2.TM_CCOEFF, cv2.TM_CCOEFF_NORMED, cv2.TM_CCORR_NORMED, cv2.TM_SQDIFF]
print('matching:')
for method_idx in methods:
result = cv2.matchTemplate(src_g, template_g, method_idx)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
print('\tfor method_idx {}: min_loc={}, max_loc={}'.format(method_idx, min_loc, max_loc))
location = min_loc if method_idx in [cv2.TM_SQDIFF] else max_loc
bottom_right = (location[0] + t_width, location[1] + t_height)
# just for drawing the result
src_copy = src.copy()
cv2.rectangle(img=src_copy, pt1=location, pt2=bottom_right, color=wu.pyplt.get_BGR_color('r'), thickness=2)
wu.cvt.display_open_cv_image(img=src_copy, ms=0, title='method idx {}'.format(method_idx), loc='top_left')
cv2.destroyAllWindows()
return
def main():
match()
return
if __name__ == '__main__':
wu.main_wrapper(
main_function=main,
seed=42,
ipv4=False,
cuda_off=False,
torch_v=False,
tf_v=False,
cv2_v=False,
with_pip_list=False,
with_profiler=False
)
我尝试在这些图像上使用 ORB 算法。该代码有效,但我无法获得足够的关键点来匹配图像。我怎样才能让它发挥作用?
import cv2
import numpy as np
img1 = cv2.imread('cards4/eightofspades100.png', 0)
img2 = cv2.imread('cards4/deck 4.png', 0)
cv2.imshow('img1',img1)
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
#des1 and des2 are arrays of 500 features and 32 values
imgKp1 = cv2.drawKeypoints(img1, kp1, None)
imgKp2 = cv2.drawKeypoints(img2, kp2, None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
good = []
# m and n because k = 2 and k is the number of features
for m,n in matches:
if m.distance <1*n.distance:
good.append([m])
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags = 2)
cv2.imshow('Kp1', imgKp1)
cv2.imshow('Kp2', imgKp2)
#cv2.imshow('img1', img1)
#cv2.imshow('img2', img2)
#cv2.imshow('img3', img3)
cv2.waitKey(0)
eight of spades
deck of card
模板匹配使用 cv2.matchTemplate():
import wizzi_utils as wu # pip install wizzi_utils
import cv2
def match():
src = wu.cvt.load_img(path='cards4/deck 4.png')
template = wu.cvt.load_img(path='cards4/eightofspades100.png')
# reduce images size to 30% (too big to see results)
# works also without resizing - just to visualize
reduce_factor = 0.3
src = wu.cvt.resize_opencv_image(img=src, resize=reduce_factor)
template = wu.cvt.resize_opencv_image(img=template, resize=reduce_factor)
# display on the right side
wu.cvt.display_open_cv_image(img=src, ms=1, title='src', loc='top_right')
wu.cvt.display_open_cv_image(img=template, ms=1, title='template', loc='bottom_right')
# convert to gray scale
src_g = wu.cvt.BGR_img_to_gray(bgr_img=src)
template_g = wu.cvt.BGR_img_to_gray(bgr_img=template)
t_height, t_width = template_g.shape
# different methods
methods = [cv2.TM_CCOEFF, cv2.TM_CCOEFF_NORMED, cv2.TM_CCORR_NORMED, cv2.TM_SQDIFF]
print('matching:')
for method_idx in methods:
result = cv2.matchTemplate(src_g, template_g, method_idx)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
print('\tfor method_idx {}: min_loc={}, max_loc={}'.format(method_idx, min_loc, max_loc))
location = min_loc if method_idx in [cv2.TM_SQDIFF] else max_loc
bottom_right = (location[0] + t_width, location[1] + t_height)
# just for drawing the result
src_copy = src.copy()
cv2.rectangle(img=src_copy, pt1=location, pt2=bottom_right, color=wu.pyplt.get_BGR_color('r'), thickness=2)
wu.cvt.display_open_cv_image(img=src_copy, ms=0, title='method idx {}'.format(method_idx), loc='top_left')
cv2.destroyAllWindows()
return
def main():
match()
return
if __name__ == '__main__':
wu.main_wrapper(
main_function=main,
seed=42,
ipv4=False,
cuda_off=False,
torch_v=False,
tf_v=False,
cv2_v=False,
with_pip_list=False,
with_profiler=False
)