将图像轮廓转换为极坐标

Convert image contours to polar coordinates

我试图通过分析它们的轮廓来分类拼图块的类型(头的数量,如果它是边界或角...)。

我试图遵循的方法是分析这种类型的图(来自 this 论文),通过将其转换为极坐标来“展开”拼图轮廓的笛卡尔坐标;但是我无法复制它。

我试过:

import cv2
import matplotlib.pyplot as plt

def cart2pol(x, y):
    rho = np.sqrt(x**2 + y**2)
    phi = np.arctan2(y, x)
    return(rho, phi)

# load image and find contours
img = cv2.imread("example.png", cv2.IMREAD_GRAYSCALE)
contours, _ = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

# get contour points in polar coordinates
rhos = []
for i in range(len(contours[0])):
    x, y = contours[0][i][0]
    rho, _ = cart2pol(x, y)
    rhos.append(rho)

plt.show()
plt.plot(rhos)

但这会产生不同的情节,如下所示:

来自这张图片:

在其他图像上尝试这个,我可以看到峰谷如何对应于碎片的头部和孔洞,但我想要一个像上面那样的图(从我所看到的来看不是正确的函数)。你能帮我拿下吗?

找到方块的中心:

M = cv2.moments(contours[0])
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])

计算从中心到轮廓上各点的向量,并将向量转换为极坐标:

ds, phis = [], []
for i in range(len(contours[0])):
    x, y = contours[0][i][0]
    d, rho = cart2pol(x-cx, y-cy)
    ds.append(d)
    phis.append(rho)

在 x-axis 上绘制极坐标,在 y-axis 上绘制距离:

plt.plot(phis, ds)

完整示例:

import os
os.chdir(os.path.abspath(os.path.dirname(__file__)))
import cv2
import matplotlib.pyplot as plt
import numpy as np

def cart2pol(x, y):
    rho = np.sqrt(x**2 + y**2)
    phi = np.arctan2(y, x)
    return (rho, phi)

img = cv2.imread('opencv_so_9_example.png', cv2.IMREAD_GRAYSCALE)
contours, _ = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

M = cv2.moments(contours[0])
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])

polar = [cart2pol(c[0][0] - cx, c[0][1] - cy) for c in contours[0][:]]
max_i = polar.index(max(polar, key = lambda x: x[1]))
polar = polar[max_i:] + polar[:max_i]
ds, phis = zip(*polar)

plt.gcf().set_size_inches(6, 3)     
plt.plot(phis, ds)
plt.show()

也可以用 Python/OpenCV 中的二值图像和 warpPolar 来做到这一点。

输入:

import cv2
import numpy as np
import math

# read image
img = cv2.imread('puzzle.png')
ht, wd = img.shape[:2]

# convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# threshold
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]

 # get non-zero points (i.e. white)
points = np.column_stack(np.where(thresh.transpose() > 0))

# get centroid
M = cv2.moments(points)
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"]) 

# compute maxradius = furthest corner - center
maxrad = math.sqrt( (wd-cx)*(wd-cx) + (ht-cy)*(ht-cy) )

# convert to polar image relative to centroid
polar1 = cv2.warpPolar(thresh, (ht,wd), (cx,cy), maxrad, cv2.INTER_CUBIC+cv2.WARP_POLAR_LINEAR+cv2.WARP_FILL_OUTLIERS)

# rotate 270 clocwise
polar2 = cv2.rotate(polar1, cv2.ROTATE_90_COUNTERCLOCKWISE)
ht, wd = polar2.shape[:2]

# save image
cv2.imwrite('puzzle_polar1.png',polar1)
cv2.imwrite('puzzle_polar2.png',polar2)

# show the images
cv2.imshow("polar1", polar1)
cv2.imshow("polar2", polar2)
cv2.waitKey(0)
cv2.destroyAllWindows()