关于 skimage.transform.PolynomialTransform 的问题
Question about skimage.transform.PolynomialTransform
我有两幅图像(通道 1 和通道 2),我正在尝试计算将一幅图像变形为另一幅图像的多项式变换。首先,我创建了一个 ORB 对象并计算了两个图像之间的仿射变换 (post-仿射)。然后我决定尝试使用skimage.transform.PolynomialTransform。但是,当我尝试计算转换时,返回的 NumPy 数组具有 NaN 值或 0 值,即使原始图像在该位置具有非零浮点值(post-多项式)。我究竟做错了什么?代码包含在下面,图片在下面 link。 https://drive.google.com/drive/folders/1mWxUvLFLK5-rYCrxs3-uGKFxKq2wXDjS?usp=sharing 提前致谢!
注意:我知道 Image warping with scikit-image and transform.PolynomialTransform 这个问题很相似,但我认为这两个问题并不重复。虽然那个用户的问题是相同的功能,但他们转换后的图像中的像素有值,而我的大体上没有。
import cv2
from ImageConversion import ImageConversion # self-written, irrelevant
import matplotlib
matplotlib.use('macosX')
import matplotlib.pyplot as plt
from scipy.ndimage import uniform_filter
from skimage.draw import circle_perimeter
from skimage.transform import PolynomialTransform, warp
def affine_transform(self):
channel1_u8 = self.channel1.astype('uint8') # necessary for detectAndCompute
channel2_u8 = self.channel2.astype('uint8')
orb = cv2.ORB_create(100)
#kp1, des1 = orb.detectAndCompute(channel1_32, None)
#kp2, des2 = orb.detectAndCompute(channel2_32, None)
kp1, des1 = orb.detectAndCompute(channel1_u8, None)
kp2, des2 = orb.detectAndCompute(channel2_u8, None)
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(des1, des2, None)
matches = sorted(matches, key = lambda x:x.distance)
points1 = np.zeros((len(matches), 2), dtype = np.float32)
points2 = np.zeros((len(matches), 2), dtype = np.float32)
for i, match in enumerate(matches):
points1[i, :] = kp1[match.queryIdx].pt # index of descriptor in query descriptors, ie index of descriptor in channel 1 which is the image we wish to map to channel 2
points2[i, :] = kp2[match.trainIdx].pt
mat_coeff, inliers = cv2.estimateAffine2D(points1, points2) # inliers only here because estimateAffine2D returns both matrix coefficients and inliers
print(mat_coeff)
rows, cols = channel1_u8.shape
#dst = cv2.warpAffine(channel1_u8, mat_coeff, (cols, rows))
dst = cv2.warpAffine(self.channel1, mat_coeff, (cols, rows))
return mat_coeff, dst
tform = PolynomialTransform()
tform.estimate(self.channel2, dst, order = 3)
warped_1 = warp(dst, tform, mode = 'constant')
我发现了错误。我试图提供 PolynomialTransform.estimate 整个图像,而不是图像中识别的关键点。
我有两幅图像(通道 1 和通道 2),我正在尝试计算将一幅图像变形为另一幅图像的多项式变换。首先,我创建了一个 ORB 对象并计算了两个图像之间的仿射变换 (post-仿射)。然后我决定尝试使用skimage.transform.PolynomialTransform。但是,当我尝试计算转换时,返回的 NumPy 数组具有 NaN 值或 0 值,即使原始图像在该位置具有非零浮点值(post-多项式)。我究竟做错了什么?代码包含在下面,图片在下面 link。 https://drive.google.com/drive/folders/1mWxUvLFLK5-rYCrxs3-uGKFxKq2wXDjS?usp=sharing 提前致谢!
注意:我知道 Image warping with scikit-image and transform.PolynomialTransform 这个问题很相似,但我认为这两个问题并不重复。虽然那个用户的问题是相同的功能,但他们转换后的图像中的像素有值,而我的大体上没有。
import cv2
from ImageConversion import ImageConversion # self-written, irrelevant
import matplotlib
matplotlib.use('macosX')
import matplotlib.pyplot as plt
from scipy.ndimage import uniform_filter
from skimage.draw import circle_perimeter
from skimage.transform import PolynomialTransform, warp
def affine_transform(self):
channel1_u8 = self.channel1.astype('uint8') # necessary for detectAndCompute
channel2_u8 = self.channel2.astype('uint8')
orb = cv2.ORB_create(100)
#kp1, des1 = orb.detectAndCompute(channel1_32, None)
#kp2, des2 = orb.detectAndCompute(channel2_32, None)
kp1, des1 = orb.detectAndCompute(channel1_u8, None)
kp2, des2 = orb.detectAndCompute(channel2_u8, None)
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(des1, des2, None)
matches = sorted(matches, key = lambda x:x.distance)
points1 = np.zeros((len(matches), 2), dtype = np.float32)
points2 = np.zeros((len(matches), 2), dtype = np.float32)
for i, match in enumerate(matches):
points1[i, :] = kp1[match.queryIdx].pt # index of descriptor in query descriptors, ie index of descriptor in channel 1 which is the image we wish to map to channel 2
points2[i, :] = kp2[match.trainIdx].pt
mat_coeff, inliers = cv2.estimateAffine2D(points1, points2) # inliers only here because estimateAffine2D returns both matrix coefficients and inliers
print(mat_coeff)
rows, cols = channel1_u8.shape
#dst = cv2.warpAffine(channel1_u8, mat_coeff, (cols, rows))
dst = cv2.warpAffine(self.channel1, mat_coeff, (cols, rows))
return mat_coeff, dst
tform = PolynomialTransform()
tform.estimate(self.channel2, dst, order = 3)
warped_1 = warp(dst, tform, mode = 'constant')
我发现了错误。我试图提供 PolynomialTransform.estimate 整个图像,而不是图像中识别的关键点。