OpenCV findHomography 和 WarpPerspective 没有产生好的结果
OpenCV findHomography and WarpPerspective not producing good results
我正在尝试使用 FindHomography
找到两个相机之间的扭曲矩阵,然后使用 warpPerspective
将图像拼接在一起。但是,待变形图像过度延伸并翻转到屏幕的另一侧。下面是一些简化的代码来显示奇怪的行为:
vector<Point2f> obj, scene, objCorners, TransformedObjCorners;
scene.push_back(Point2f(324,21));
scene.push_back(Point2f(388,4));
scene.push_back(Point2f(392,110));
scene.push_back(Point2f(322,111));
obj.push_back(Point2f(21,18));
obj.push_back(Point2f(79,45));
obj.push_back(Point2f(76,128));
obj.push_back(Point2f(13,118));
objCorners.push_back(Point2f(0,0));
objCorners.push_back(Point2f(400,0));
objCorners.push_back(Point2f(400,300));
objCorners.push_back(Point2f(0,300));
cv::Mat H = findHomography(obj, scene);
perspectiveTransform(objCorners, TransformedObjCorners, H);
cout << "Transformed object corners are :" << endl;
cout << TransformedObjCorners << endl;
我的输出是:
Transformed object corners are :
[309.14066, 18.626106;
-2.5252595, 298.53754;
31.930698, 9.6980038;
319.43829, 279.87805]
坐标为黑框:
你可以看到它因为负坐标在这里异常扭曲:
我已经花了几个小时试图跟踪问题。写方向上的任何 help/pointers 都会很有帮助。
谢谢
如何拼接左图?
如果我将三张图片拼接在一起,最好的方法是什么?所以我正在尝试左边和中间,下面是我的结果但是非常弱:
我使用了你的代码,调整了点的位置(因为你的图片有 title-bar)并扭曲了其中一张图片。
这些是具有点位置的输入图像:
这是代码
int main()
{
cv::Mat input1 = cv::imread("../inputData/panoA.png");
cv::Mat input2 = cv::imread("../inputData/panoB.png");
cv::Mat result;
std::vector<cv::Point2f> obj, scene, objCorners, transformedObjCorners;
std::vector<cv::Point2f> transObj, transScene;
// had to adjust your coordinates since you provided images with title-bar
scene.push_back(cv::Point2f(313,47));
scene.push_back(cv::Point2f(379,21));
scene.push_back(cv::Point2f(385,131));
scene.push_back(cv::Point2f(317,136));
obj.push_back(cv::Point2f(9,41));
obj.push_back(cv::Point2f(70,61));
obj.push_back(cv::Point2f(69,149));
obj.push_back(cv::Point2f(7,145));
objCorners.push_back(cv::Point2f(0,0));
objCorners.push_back(cv::Point2f(input2.cols,0));
objCorners.push_back(cv::Point2f(input2.cols,input2.rows));
objCorners.push_back(cv::Point2f(0,input2.rows));
cv::Mat H = findHomography(obj, scene);
for(unsigned int i=0; i<scene.size(); ++i)
{
cv::circle(input1, scene[i], 5, cv::Scalar(0,255,0));
}
for(unsigned int i=0; i<obj.size(); ++i)
{
cv::circle(input2, obj[i], 5, cv::Scalar(0,255,0));
}
cv::Mat result1;
cv::warpPerspective(input2, result1, H, cv::Size(input1.cols*2, input1.rows));
cv::Mat result2 = cv::Mat(result1.size(), CV_8UC3, cv::Scalar(0,0,0));
input1.copyTo(result2(cv::Rect(0,0,input1.cols, input1.rows)));
result = result1.clone();
// primitive blending, non-optimized
for(int j=0; j<result1.rows; ++j)
for(int i=0; i<result1.cols; ++i)
{
cv::Vec3b c1(0,0,0);
cv::Vec3b c2(0,0,0);
if(j < result1.rows && i<result1.cols) c1 = result1.at<cv::Vec3b>(j,i);
if(j < result2.rows && i<result2.cols) c2 = result2.at<cv::Vec3b>(j,i);
bool c1_0 = false;
bool c2_0 = false;
if(c1 == cv::Vec3b(0,0,0)) c1_0 = true;
if(c2 == cv::Vec3b(0,0,0)) c2_0 = true;
cv::Vec3b color(0,0,0);
if(!c1_0 && !c2_0)
{
// both nonzero: use mean value:
color = 0.5*(c1+c2);
}
if(c1_0)
{
// c1 zero => use c2
color = c2;
}
if(c2_0)
{
// c1 zero => use c2
color = c1;
}
result.at<cv::Vec3b>(j,i) = color;
}
cv::imshow("input1", input1);
cv::imshow("input2", input2);
cv::imshow("result", result);
cv::imwrite("../outputData/panoResult1.png", input1);
cv::imwrite("../outputData/panoResult2.png", input2);
cv::imwrite("../outputData/panoResult.png", result);
cv::waitKey(0);
return 0;
}
这是原始混合的结果:
失真来自将 3D 世界映射到 2D 平面以及镜头失真。此外,您的相机运动可能不会在两个图像之间为您提供完美的单应性关系(仅适用于平面或围绕相机中心的纯相机旋转)
我正在尝试使用 FindHomography
找到两个相机之间的扭曲矩阵,然后使用 warpPerspective
将图像拼接在一起。但是,待变形图像过度延伸并翻转到屏幕的另一侧。下面是一些简化的代码来显示奇怪的行为:
vector<Point2f> obj, scene, objCorners, TransformedObjCorners;
scene.push_back(Point2f(324,21));
scene.push_back(Point2f(388,4));
scene.push_back(Point2f(392,110));
scene.push_back(Point2f(322,111));
obj.push_back(Point2f(21,18));
obj.push_back(Point2f(79,45));
obj.push_back(Point2f(76,128));
obj.push_back(Point2f(13,118));
objCorners.push_back(Point2f(0,0));
objCorners.push_back(Point2f(400,0));
objCorners.push_back(Point2f(400,300));
objCorners.push_back(Point2f(0,300));
cv::Mat H = findHomography(obj, scene);
perspectiveTransform(objCorners, TransformedObjCorners, H);
cout << "Transformed object corners are :" << endl;
cout << TransformedObjCorners << endl;
我的输出是:
Transformed object corners are :
[309.14066, 18.626106;
-2.5252595, 298.53754;
31.930698, 9.6980038;
319.43829, 279.87805]
坐标为黑框:
你可以看到它因为负坐标在这里异常扭曲:
我已经花了几个小时试图跟踪问题。写方向上的任何 help/pointers 都会很有帮助。 谢谢
如何拼接左图? 如果我将三张图片拼接在一起,最好的方法是什么?所以我正在尝试左边和中间,下面是我的结果但是非常弱:
我使用了你的代码,调整了点的位置(因为你的图片有 title-bar)并扭曲了其中一张图片。
这些是具有点位置的输入图像:
这是代码
int main()
{
cv::Mat input1 = cv::imread("../inputData/panoA.png");
cv::Mat input2 = cv::imread("../inputData/panoB.png");
cv::Mat result;
std::vector<cv::Point2f> obj, scene, objCorners, transformedObjCorners;
std::vector<cv::Point2f> transObj, transScene;
// had to adjust your coordinates since you provided images with title-bar
scene.push_back(cv::Point2f(313,47));
scene.push_back(cv::Point2f(379,21));
scene.push_back(cv::Point2f(385,131));
scene.push_back(cv::Point2f(317,136));
obj.push_back(cv::Point2f(9,41));
obj.push_back(cv::Point2f(70,61));
obj.push_back(cv::Point2f(69,149));
obj.push_back(cv::Point2f(7,145));
objCorners.push_back(cv::Point2f(0,0));
objCorners.push_back(cv::Point2f(input2.cols,0));
objCorners.push_back(cv::Point2f(input2.cols,input2.rows));
objCorners.push_back(cv::Point2f(0,input2.rows));
cv::Mat H = findHomography(obj, scene);
for(unsigned int i=0; i<scene.size(); ++i)
{
cv::circle(input1, scene[i], 5, cv::Scalar(0,255,0));
}
for(unsigned int i=0; i<obj.size(); ++i)
{
cv::circle(input2, obj[i], 5, cv::Scalar(0,255,0));
}
cv::Mat result1;
cv::warpPerspective(input2, result1, H, cv::Size(input1.cols*2, input1.rows));
cv::Mat result2 = cv::Mat(result1.size(), CV_8UC3, cv::Scalar(0,0,0));
input1.copyTo(result2(cv::Rect(0,0,input1.cols, input1.rows)));
result = result1.clone();
// primitive blending, non-optimized
for(int j=0; j<result1.rows; ++j)
for(int i=0; i<result1.cols; ++i)
{
cv::Vec3b c1(0,0,0);
cv::Vec3b c2(0,0,0);
if(j < result1.rows && i<result1.cols) c1 = result1.at<cv::Vec3b>(j,i);
if(j < result2.rows && i<result2.cols) c2 = result2.at<cv::Vec3b>(j,i);
bool c1_0 = false;
bool c2_0 = false;
if(c1 == cv::Vec3b(0,0,0)) c1_0 = true;
if(c2 == cv::Vec3b(0,0,0)) c2_0 = true;
cv::Vec3b color(0,0,0);
if(!c1_0 && !c2_0)
{
// both nonzero: use mean value:
color = 0.5*(c1+c2);
}
if(c1_0)
{
// c1 zero => use c2
color = c2;
}
if(c2_0)
{
// c1 zero => use c2
color = c1;
}
result.at<cv::Vec3b>(j,i) = color;
}
cv::imshow("input1", input1);
cv::imshow("input2", input2);
cv::imshow("result", result);
cv::imwrite("../outputData/panoResult1.png", input1);
cv::imwrite("../outputData/panoResult2.png", input2);
cv::imwrite("../outputData/panoResult.png", result);
cv::waitKey(0);
return 0;
}
这是原始混合的结果:
失真来自将 3D 世界映射到 2D 平面以及镜头失真。此外,您的相机运动可能不会在两个图像之间为您提供完美的单应性关系(仅适用于平面或围绕相机中心的纯相机旋转)