微调 Hough Line 函数参数 OpenCV
Fine Tuning Hough Line function parameters OpenCV
我一直试图在正方形周围画出 4 条线,这样我就可以获得正方形的顶点。由于准确性,我将采用这种方法,而不是直接使用 Harris 或等高线方法来查找角点。在 opencv 的内置函数中使用 houghlines 我无法获得全长线来获得交点,而且我也得到了太多不相关的线。我想知道是否可以微调参数以获得我的要求?如果是,我该怎么做?我的问题与这个问题完全相同 here. 但是,即使更改了这些参数,我也没有得到这些行本身。我附上了原始图像以及代码和输出:
原图:
代码:
#include <Windows.h>
#include "opencv2\highgui.hpp"
#include "opencv2\imgproc.hpp"
#include "opencv2/imgcodecs/imgcodecs.hpp"
#include "opencv2/videoio/videoio.hpp"
using namespace cv;
using namespace std;
int main(int argc, const char** argv)
{
Mat image,src;
image = imread("c:/pics/output2_1.bmp");
src = image.clone();
cvtColor(image, image, CV_BGR2GRAY);
threshold(image, image, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY_INV);
namedWindow("thresh", WINDOW_NORMAL);
resizeWindow("thresh", 600, 400);
imshow("thresh", image);
cv::Mat edges;
cv::Canny(image, edges, 0, 255);
vector<Vec2f> lines;
HoughLines(edges, lines, 1, CV_PI / 180, 100, 0, 0);
for (size_t i = 0; i < lines.size(); i++)
{
float rho = lines[i][0], theta = lines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000 * (-b));
pt1.y = cvRound(y0 + 1000 * (a));
pt2.x = cvRound(x0 - 1000 * (-b));
pt2.y = cvRound(y0 - 1000 * (a));
line(src, pt1, pt2, Scalar(0, 0, 255), 3, CV_AA);
}
namedWindow("Edges Structure", WINDOW_NORMAL);
resizeWindow("Edges Structure", 600, 400);
imshow("Edges Structure", src);
waitKey(0);
return(0);
}
输出图像:
更新:这张图片上有一个框,所以我可以通过删除那个框来减少图片边框中不相关的线条,但是我仍然没有得到覆盖正方形的完整线条。
有许多 方法可以做到这一点,我仅举一个例子。但是,我在 python
方面最快,因此我的代码示例将使用该语言。不过,翻译它应该不难(在为其他人完成翻译后,请随时使用您的 C++ 解决方案编辑您的 post)。
对于预处理,我强烈建议 dilate()
处理您的边缘图像。这将使线条 更粗 ,这将有助于更好地适应 Hough 线条。 Hough 线函数在抽象中所做的基本上是制作一个通过大量角度和距离的线网格,如果这些线越过 Canny 的任何白色像素,那么它会为该线通过的每个点打分.但是,Canny 的线条不会完全笔直,因此您会得到几条不同的线条得分。使这些 Canny 线条更粗意味着每条 真正 接近拟合良好的线条将有更好的机会得分更高。
如果您要使用 HoughLinesP
,那么您的输出将是行 段 ,您所拥有的只是线上的两个点。
由于线条大多是垂直和水平的,因此您可以根据线条的位置轻松拆分线条。如果一条线的两个 y 坐标彼此靠近,则该线大部分是水平的。如果两个 x 坐标彼此靠近,则该线大部分是垂直的。所以你可以将你的线分割成垂直线和水平线。
def segment_lines(lines, delta):
h_lines = []
v_lines = []
for line in lines:
for x1, y1, x2, y2 in line:
if abs(x2-x1) < delta: # x-values are near; line is vertical
v_lines.append(line)
elif abs(y2-y1) < delta: # y-values are near; line is horizontal
h_lines.append(line)
return h_lines, v_lines
然后,您可以从它们的端点using determinants获得两条线段的交点。
def find_intersection(line1, line2):
# extract points
x1, y1, x2, y2 = line1[0]
x3, y3, x4, y4 = line2[0]
# compute determinant
Px = ((x1*y2 - y1*x2)*(x3-x4) - (x1-x2)*(x3*y4 - y3*x4))/ \
((x1-x2)*(y3-y4) - (y1-y2)*(x3-x4))
Py = ((x1*y2 - y1*x2)*(y3-y4) - (y1-y2)*(x3*y4 - y3*x4))/ \
((x1-x2)*(y3-y4) - (y1-y2)*(x3-x4))
return Px, Py
所以现在如果你遍历所有的线,你会有所有水平线和垂直线的交点,但是你有很多条线,所以你会有盒子的同一个角有许多交点。
但是,这些都在一个向量中,因此您不仅需要对每个角上的点进行平均,还需要将它们实际组合在一起。您可以使用 k-means 聚类来实现这一点,它在 OpenCV 中实现为 kmeans()
.
def cluster_points(points, nclusters):
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
_, _, centers = cv2.kmeans(points, nclusters, None, criteria, 10, cv2.KMEANS_PP_CENTERS)
return centers
最后,我们可以使用 circle()
简单地将这些中心绘制到图像上(确保我们首先进行舍入——因为到目前为止一切都是浮点数)以确保我们做对了。
我们拥有它;四个点,在盒子的角落。
这是我在 python 中的完整代码,包括生成上述数字的代码:
import cv2
import numpy as np
def find_intersection(line1, line2):
# extract points
x1, y1, x2, y2 = line1[0]
x3, y3, x4, y4 = line2[0]
# compute determinant
Px = ((x1*y2 - y1*x2)*(x3-x4) - (x1-x2)*(x3*y4 - y3*x4))/ \
((x1-x2)*(y3-y4) - (y1-y2)*(x3-x4))
Py = ((x1*y2 - y1*x2)*(y3-y4) - (y1-y2)*(x3*y4 - y3*x4))/ \
((x1-x2)*(y3-y4) - (y1-y2)*(x3-x4))
return Px, Py
def segment_lines(lines, delta):
h_lines = []
v_lines = []
for line in lines:
for x1, y1, x2, y2 in line:
if abs(x2-x1) < delta: # x-values are near; line is vertical
v_lines.append(line)
elif abs(y2-y1) < delta: # y-values are near; line is horizontal
h_lines.append(line)
return h_lines, v_lines
def cluster_points(points, nclusters):
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
_, _, centers = cv2.kmeans(points, nclusters, None, criteria, 10, cv2.KMEANS_PP_CENTERS)
return centers
img = cv2.imread('image.png')
# preprocessing
img = cv2.resize(img, None, fx=.5, fy=.5)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150)
dilated = cv2.dilate(edges, np.ones((3,3), dtype=np.uint8))
cv2.imshow("Dilated", dilated)
cv2.waitKey(0)
cv2.imwrite('dilated.png', dilated)
# run the Hough transform
lines = cv2.HoughLinesP(dilated, rho=1, theta=np.pi/180, threshold=100, maxLineGap=20, minLineLength=50)
# segment the lines
delta = 10
h_lines, v_lines = segment_lines(lines, delta)
# draw the segmented lines
houghimg = img.copy()
for line in h_lines:
for x1, y1, x2, y2 in line:
color = [0,0,255] # color hoz lines red
cv2.line(houghimg, (x1, y1), (x2, y2), color=color, thickness=1)
for line in v_lines:
for x1, y1, x2, y2 in line:
color = [255,0,0] # color vert lines blue
cv2.line(houghimg, (x1, y1), (x2, y2), color=color, thickness=1)
cv2.imshow("Segmented Hough Lines", houghimg)
cv2.waitKey(0)
cv2.imwrite('hough.png', houghimg)
# find the line intersection points
Px = []
Py = []
for h_line in h_lines:
for v_line in v_lines:
px, py = find_intersection(h_line, v_line)
Px.append(px)
Py.append(py)
# draw the intersection points
intersectsimg = img.copy()
for cx, cy in zip(Px, Py):
cx = np.round(cx).astype(int)
cy = np.round(cy).astype(int)
color = np.random.randint(0,255,3).tolist() # random colors
cv2.circle(intersectsimg, (cx, cy), radius=2, color=color, thickness=-1) # -1: filled circle
cv2.imshow("Intersections", intersectsimg)
cv2.waitKey(0)
cv2.imwrite('intersections.png', intersectsimg)
# use clustering to find the centers of the data clusters
P = np.float32(np.column_stack((Px, Py)))
nclusters = 4
centers = cluster_points(P, nclusters)
print(centers)
# draw the center of the clusters
for cx, cy in centers:
cx = np.round(cx).astype(int)
cy = np.round(cy).astype(int)
cv2.circle(img, (cx, cy), radius=4, color=[0,0,255], thickness=-1) # -1: filled circle
cv2.imshow("Center of intersection clusters", img)
cv2.waitKey(0)
cv2.imwrite('corners.png', img)
最后,只有一个问题...为什么不使用 Harris corner detector implemented in OpenCV as cornerHarris()
?因为它使用非常少的代码就可以很好地工作。我对灰度图像进行了阈值处理,然后进行了一点模糊处理以去除虚假的角点,然后...
这是使用以下代码生成的:
import cv2
import numpy as np
img = cv2.imread('image.png')
# preprocessing
img = cv2.resize(img, None, fx=.5, fy=.5)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
r, gray = cv2.threshold(gray, 120, 255, type=cv2.THRESH_BINARY)
gray = cv2.GaussianBlur(gray, (3,3), 3)
# run harris
gray = np.float32(gray)
dst = cv2.cornerHarris(gray,2,3,0.04)
# dilate the corner points for marking
dst = cv2.dilate(dst,None)
dst = cv2.dilate(dst,None)
# threshold
img[dst>0.01*dst.max()]=[0,0,255]
cv2.imshow('dst',img)
cv2.waitKey(0)
cv2.imwrite('harris.png', img)
我认为通过一些小的调整,Harris 角检测器可能比外推 Hough 线交点更准确。
我一直试图在正方形周围画出 4 条线,这样我就可以获得正方形的顶点。由于准确性,我将采用这种方法,而不是直接使用 Harris 或等高线方法来查找角点。在 opencv 的内置函数中使用 houghlines 我无法获得全长线来获得交点,而且我也得到了太多不相关的线。我想知道是否可以微调参数以获得我的要求?如果是,我该怎么做?我的问题与这个问题完全相同 here. 但是,即使更改了这些参数,我也没有得到这些行本身。我附上了原始图像以及代码和输出:
原图:
代码:
#include <Windows.h>
#include "opencv2\highgui.hpp"
#include "opencv2\imgproc.hpp"
#include "opencv2/imgcodecs/imgcodecs.hpp"
#include "opencv2/videoio/videoio.hpp"
using namespace cv;
using namespace std;
int main(int argc, const char** argv)
{
Mat image,src;
image = imread("c:/pics/output2_1.bmp");
src = image.clone();
cvtColor(image, image, CV_BGR2GRAY);
threshold(image, image, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY_INV);
namedWindow("thresh", WINDOW_NORMAL);
resizeWindow("thresh", 600, 400);
imshow("thresh", image);
cv::Mat edges;
cv::Canny(image, edges, 0, 255);
vector<Vec2f> lines;
HoughLines(edges, lines, 1, CV_PI / 180, 100, 0, 0);
for (size_t i = 0; i < lines.size(); i++)
{
float rho = lines[i][0], theta = lines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000 * (-b));
pt1.y = cvRound(y0 + 1000 * (a));
pt2.x = cvRound(x0 - 1000 * (-b));
pt2.y = cvRound(y0 - 1000 * (a));
line(src, pt1, pt2, Scalar(0, 0, 255), 3, CV_AA);
}
namedWindow("Edges Structure", WINDOW_NORMAL);
resizeWindow("Edges Structure", 600, 400);
imshow("Edges Structure", src);
waitKey(0);
return(0);
}
输出图像:
更新:这张图片上有一个框,所以我可以通过删除那个框来减少图片边框中不相关的线条,但是我仍然没有得到覆盖正方形的完整线条。
有许多 方法可以做到这一点,我仅举一个例子。但是,我在 python
方面最快,因此我的代码示例将使用该语言。不过,翻译它应该不难(在为其他人完成翻译后,请随时使用您的 C++ 解决方案编辑您的 post)。
对于预处理,我强烈建议 dilate()
处理您的边缘图像。这将使线条 更粗 ,这将有助于更好地适应 Hough 线条。 Hough 线函数在抽象中所做的基本上是制作一个通过大量角度和距离的线网格,如果这些线越过 Canny 的任何白色像素,那么它会为该线通过的每个点打分.但是,Canny 的线条不会完全笔直,因此您会得到几条不同的线条得分。使这些 Canny 线条更粗意味着每条 真正 接近拟合良好的线条将有更好的机会得分更高。
如果您要使用 HoughLinesP
,那么您的输出将是行 段 ,您所拥有的只是线上的两个点。
由于线条大多是垂直和水平的,因此您可以根据线条的位置轻松拆分线条。如果一条线的两个 y 坐标彼此靠近,则该线大部分是水平的。如果两个 x 坐标彼此靠近,则该线大部分是垂直的。所以你可以将你的线分割成垂直线和水平线。
def segment_lines(lines, delta):
h_lines = []
v_lines = []
for line in lines:
for x1, y1, x2, y2 in line:
if abs(x2-x1) < delta: # x-values are near; line is vertical
v_lines.append(line)
elif abs(y2-y1) < delta: # y-values are near; line is horizontal
h_lines.append(line)
return h_lines, v_lines
然后,您可以从它们的端点using determinants获得两条线段的交点。
def find_intersection(line1, line2):
# extract points
x1, y1, x2, y2 = line1[0]
x3, y3, x4, y4 = line2[0]
# compute determinant
Px = ((x1*y2 - y1*x2)*(x3-x4) - (x1-x2)*(x3*y4 - y3*x4))/ \
((x1-x2)*(y3-y4) - (y1-y2)*(x3-x4))
Py = ((x1*y2 - y1*x2)*(y3-y4) - (y1-y2)*(x3*y4 - y3*x4))/ \
((x1-x2)*(y3-y4) - (y1-y2)*(x3-x4))
return Px, Py
所以现在如果你遍历所有的线,你会有所有水平线和垂直线的交点,但是你有很多条线,所以你会有盒子的同一个角有许多交点。
但是,这些都在一个向量中,因此您不仅需要对每个角上的点进行平均,还需要将它们实际组合在一起。您可以使用 k-means 聚类来实现这一点,它在 OpenCV 中实现为 kmeans()
.
def cluster_points(points, nclusters):
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
_, _, centers = cv2.kmeans(points, nclusters, None, criteria, 10, cv2.KMEANS_PP_CENTERS)
return centers
最后,我们可以使用 circle()
简单地将这些中心绘制到图像上(确保我们首先进行舍入——因为到目前为止一切都是浮点数)以确保我们做对了。
我们拥有它;四个点,在盒子的角落。
这是我在 python 中的完整代码,包括生成上述数字的代码:
import cv2
import numpy as np
def find_intersection(line1, line2):
# extract points
x1, y1, x2, y2 = line1[0]
x3, y3, x4, y4 = line2[0]
# compute determinant
Px = ((x1*y2 - y1*x2)*(x3-x4) - (x1-x2)*(x3*y4 - y3*x4))/ \
((x1-x2)*(y3-y4) - (y1-y2)*(x3-x4))
Py = ((x1*y2 - y1*x2)*(y3-y4) - (y1-y2)*(x3*y4 - y3*x4))/ \
((x1-x2)*(y3-y4) - (y1-y2)*(x3-x4))
return Px, Py
def segment_lines(lines, delta):
h_lines = []
v_lines = []
for line in lines:
for x1, y1, x2, y2 in line:
if abs(x2-x1) < delta: # x-values are near; line is vertical
v_lines.append(line)
elif abs(y2-y1) < delta: # y-values are near; line is horizontal
h_lines.append(line)
return h_lines, v_lines
def cluster_points(points, nclusters):
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
_, _, centers = cv2.kmeans(points, nclusters, None, criteria, 10, cv2.KMEANS_PP_CENTERS)
return centers
img = cv2.imread('image.png')
# preprocessing
img = cv2.resize(img, None, fx=.5, fy=.5)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150)
dilated = cv2.dilate(edges, np.ones((3,3), dtype=np.uint8))
cv2.imshow("Dilated", dilated)
cv2.waitKey(0)
cv2.imwrite('dilated.png', dilated)
# run the Hough transform
lines = cv2.HoughLinesP(dilated, rho=1, theta=np.pi/180, threshold=100, maxLineGap=20, minLineLength=50)
# segment the lines
delta = 10
h_lines, v_lines = segment_lines(lines, delta)
# draw the segmented lines
houghimg = img.copy()
for line in h_lines:
for x1, y1, x2, y2 in line:
color = [0,0,255] # color hoz lines red
cv2.line(houghimg, (x1, y1), (x2, y2), color=color, thickness=1)
for line in v_lines:
for x1, y1, x2, y2 in line:
color = [255,0,0] # color vert lines blue
cv2.line(houghimg, (x1, y1), (x2, y2), color=color, thickness=1)
cv2.imshow("Segmented Hough Lines", houghimg)
cv2.waitKey(0)
cv2.imwrite('hough.png', houghimg)
# find the line intersection points
Px = []
Py = []
for h_line in h_lines:
for v_line in v_lines:
px, py = find_intersection(h_line, v_line)
Px.append(px)
Py.append(py)
# draw the intersection points
intersectsimg = img.copy()
for cx, cy in zip(Px, Py):
cx = np.round(cx).astype(int)
cy = np.round(cy).astype(int)
color = np.random.randint(0,255,3).tolist() # random colors
cv2.circle(intersectsimg, (cx, cy), radius=2, color=color, thickness=-1) # -1: filled circle
cv2.imshow("Intersections", intersectsimg)
cv2.waitKey(0)
cv2.imwrite('intersections.png', intersectsimg)
# use clustering to find the centers of the data clusters
P = np.float32(np.column_stack((Px, Py)))
nclusters = 4
centers = cluster_points(P, nclusters)
print(centers)
# draw the center of the clusters
for cx, cy in centers:
cx = np.round(cx).astype(int)
cy = np.round(cy).astype(int)
cv2.circle(img, (cx, cy), radius=4, color=[0,0,255], thickness=-1) # -1: filled circle
cv2.imshow("Center of intersection clusters", img)
cv2.waitKey(0)
cv2.imwrite('corners.png', img)
最后,只有一个问题...为什么不使用 Harris corner detector implemented in OpenCV as cornerHarris()
?因为它使用非常少的代码就可以很好地工作。我对灰度图像进行了阈值处理,然后进行了一点模糊处理以去除虚假的角点,然后...
这是使用以下代码生成的:
import cv2
import numpy as np
img = cv2.imread('image.png')
# preprocessing
img = cv2.resize(img, None, fx=.5, fy=.5)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
r, gray = cv2.threshold(gray, 120, 255, type=cv2.THRESH_BINARY)
gray = cv2.GaussianBlur(gray, (3,3), 3)
# run harris
gray = np.float32(gray)
dst = cv2.cornerHarris(gray,2,3,0.04)
# dilate the corner points for marking
dst = cv2.dilate(dst,None)
dst = cv2.dilate(dst,None)
# threshold
img[dst>0.01*dst.max()]=[0,0,255]
cv2.imshow('dst',img)
cv2.waitKey(0)
cv2.imwrite('harris.png', img)
我认为通过一些小的调整,Harris 角检测器可能比外推 Hough 线交点更准确。