When receiving "ValueError: not enough values to unpack (expected 2, got 1)", how can I force the program to ignore and continue?
When receiving "ValueError: not enough values to unpack (expected 2, got 1)", how can I force the program to ignore and continue?
我正在使用 Python (3) 和 OpenCV (3.3) 在网络摄像头上 运行 实时对象检测,使用样本图像,然后与视频流进行特征匹配。我已经使用 SIFT/SURF 让它工作,但我正在尝试使用 ORB 算法。
我在某些情况下收到以下错误导致程序崩溃:
for i, (m, n) in enumerate(matches):
ValueError: not enough values to unpack (expected 2, got 1)
我明白它崩溃的原因,有时图像之间有很好的匹配,有时没有,导致不匹配。
我的问题是,如何强制程序忽略并跳过没有足够值的情况并继续 运行ning。
问题代码的主要区域:
for i, (m, n) in enumerate(matches):
if m.distance < 0.7*n.distance:
good.append(m)
示例 'matches' 输出:
[[<DMatch 0x11bdcc030>, <DMatch 0x11bbf20b0>], [<DMatch 0x11bbf2490>, <DMatch 0x11bbf24f0>], [<DMatch 0x11bbf2750>, <DMatch 0x11bbf25d0>], [<DMatch 0x11bbf2570>, <DMatch 0x11bbf2150>], etc etc
完整代码:
import numpy as np
import cv2
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
import os, os.path
import math
import time
from datetime import datetime
startTime = datetime.now()
MIN_MATCH_COUNT = 10 # default=10
img1 = cv2.imread('Pattern3_small.jpg',0) # queryImage
# Create ORB object. You can specify params here or later.
orb = cv2.ORB_create()
cap = cv2.VideoCapture(0)
# cap = cv2.VideoCapture("output_H264_30.mov")
# find the keypoints and descriptors with SIFT
kp1, des1 = orb.detectAndCompute(img1,None)
pts_global = []
dst_global = []
position = []
heading = []
# plt.axis([0, 1280, 0, 720])
tbl_upper_horiz = 1539
tbl_lower_horiz = 343
tbl_upper_vert = 1008
tbl_lower_vert = 110
# cv2.namedWindow("Frame", cv2.WINDOW_NORMAL)
# cv2.resizeWindow("Frame", 600,350)
while True:
_, img2 = cap.read()
# Start timer
timer = cv2.getTickCount()
# find the keypoints and descriptors with SIFT
# kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 0
FLANN_INDEX_LSH = 6
# index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
index_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12, 6
key_size = 12, # 20, 12
multi_probe_level = 1) #2, 1
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# print (matches)
# Calculate Frames per second (FPS)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
# store all the good matches as per Lowe's ratio test.
good = []
# ratio test as per Lowe's paper
for i, (m, n) in enumerate(matches):
if m.distance < 0.7*n.distance:
good.append(m)
# Do something afterwards
感谢您的帮助。
将matches
的每个元素视为一个集合并使用异常处理:
for i, pair in enumerate(matches):
try:
m, n = pair
if m.distance < 0.7*n.distance:
good.append(m)
except ValueError:
pass
如果您这样做:
for i, (m, n) in enumerate(matches):
而且你不能保证所有的元组总是包含两个元素,那么你应该这样做:
for i, values in enumerate(matches):
if len(values) < 2:
continue # you don't have the second element to compare against
...
# Do your usual processing here
上面的代码是明确的和可读的,即很明显您需要两个元素才能在处理链中进一步进行。它也不太容易出错,因为您要确保拥有正确的数据才能继续。它还允许您计算有多少元组被丢弃。
我正在使用 Python (3) 和 OpenCV (3.3) 在网络摄像头上 运行 实时对象检测,使用样本图像,然后与视频流进行特征匹配。我已经使用 SIFT/SURF 让它工作,但我正在尝试使用 ORB 算法。
我在某些情况下收到以下错误导致程序崩溃:
for i, (m, n) in enumerate(matches):
ValueError: not enough values to unpack (expected 2, got 1)
我明白它崩溃的原因,有时图像之间有很好的匹配,有时没有,导致不匹配。
我的问题是,如何强制程序忽略并跳过没有足够值的情况并继续 运行ning。
问题代码的主要区域:
for i, (m, n) in enumerate(matches):
if m.distance < 0.7*n.distance:
good.append(m)
示例 'matches' 输出:
[[<DMatch 0x11bdcc030>, <DMatch 0x11bbf20b0>], [<DMatch 0x11bbf2490>, <DMatch 0x11bbf24f0>], [<DMatch 0x11bbf2750>, <DMatch 0x11bbf25d0>], [<DMatch 0x11bbf2570>, <DMatch 0x11bbf2150>], etc etc
完整代码:
import numpy as np
import cv2
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
import os, os.path
import math
import time
from datetime import datetime
startTime = datetime.now()
MIN_MATCH_COUNT = 10 # default=10
img1 = cv2.imread('Pattern3_small.jpg',0) # queryImage
# Create ORB object. You can specify params here or later.
orb = cv2.ORB_create()
cap = cv2.VideoCapture(0)
# cap = cv2.VideoCapture("output_H264_30.mov")
# find the keypoints and descriptors with SIFT
kp1, des1 = orb.detectAndCompute(img1,None)
pts_global = []
dst_global = []
position = []
heading = []
# plt.axis([0, 1280, 0, 720])
tbl_upper_horiz = 1539
tbl_lower_horiz = 343
tbl_upper_vert = 1008
tbl_lower_vert = 110
# cv2.namedWindow("Frame", cv2.WINDOW_NORMAL)
# cv2.resizeWindow("Frame", 600,350)
while True:
_, img2 = cap.read()
# Start timer
timer = cv2.getTickCount()
# find the keypoints and descriptors with SIFT
# kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 0
FLANN_INDEX_LSH = 6
# index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
index_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12, 6
key_size = 12, # 20, 12
multi_probe_level = 1) #2, 1
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# print (matches)
# Calculate Frames per second (FPS)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
# store all the good matches as per Lowe's ratio test.
good = []
# ratio test as per Lowe's paper
for i, (m, n) in enumerate(matches):
if m.distance < 0.7*n.distance:
good.append(m)
# Do something afterwards
感谢您的帮助。
将matches
的每个元素视为一个集合并使用异常处理:
for i, pair in enumerate(matches):
try:
m, n = pair
if m.distance < 0.7*n.distance:
good.append(m)
except ValueError:
pass
如果您这样做:
for i, (m, n) in enumerate(matches):
而且你不能保证所有的元组总是包含两个元素,那么你应该这样做:
for i, values in enumerate(matches):
if len(values) < 2:
continue # you don't have the second element to compare against
...
# Do your usual processing here
上面的代码是明确的和可读的,即很明显您需要两个元素才能在处理链中进一步进行。它也不太容易出错,因为您要确保拥有正确的数据才能继续。它还允许您计算有多少元组被丢弃。