如何在图像上显示 opencv projectedPoints
How to show opencv projectedPoints on an image
以下代码的目标是将立方体的角顶点和中心点投影到相机图像平面上。但是,创建的图像包含背景屏幕的副本,并且不显示 9 个立方点,或者至少我看不到这 9 个点中的任何一个。如果这段代码有效,我想了解的是旋转和平移向量参数对立方体视图的影响。我想做的是在观察立方体的同时围绕立方体旋转相机。 P.S。任何关于将 3D 点投影到 2d 相机图像平面的数学信息也会有所帮助。聚苯硫醚。为了使图像更清晰,给顶点编号也很有帮助。
// baseline: https://www.programmersought.com/article/6279272931/
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/viz/types.hpp"
#include <iostream>
#include <string>
#include <math.h>
using namespace std;
vector<cv::Point3f> Generate3DPoints();
int main(int argc, char* argv[])
{
// Generate 3D points
vector<cv::Point3f> objectPoints = Generate3DPoints();
vector<cv::Point2f> imagePoints;
// Camera settings
cv::Mat intrisicMat(3, 3, cv::DataType<float>::type); // Intrisic matrix
intrisicMat.at<float>(0, 0) = 1.6415318549788924e+003;
intrisicMat.at<float>(1, 0) = 0;
intrisicMat.at<float>(2, 0) = 0;
intrisicMat.at<float>(0, 1) = 0;
intrisicMat.at<float>(1, 1) = 1.7067753507885654e+003;
intrisicMat.at<float>(2, 1) = 0;
intrisicMat.at<float>(0, 2) = 5.3262822453148601e+002;
intrisicMat.at<float>(1, 2) = 3.8095355839052968e+002;
intrisicMat.at<float>(2, 2) = 1;
// Rotation vector
cv::Mat rVec(3, 1, cv::DataType<float>::type);
rVec.at<float>(0) = -3.9277902400761393e-002;
rVec.at<float>(1) = 3.7803824407602084e-002;
rVec.at<float>(2) = 2.6445674487856268e-002;
// Translation vector
cv::Mat tVec(3, 1, cv::DataType<float>::type);
tVec.at<float>(0) = 2.1158489381208221e+000;
tVec.at<float>(1) = -7.6847683212704716e+000;
tVec.at<float>(2) = 2.6169795190294256e+001;
// Distortion vector
cv::Mat distCoeffs(5, 1, cv::DataType<float>::type);
distCoeffs.at<float>(0) = -7.9134632415085826e-001;
distCoeffs.at<float>(1) = 1.5623584435644169e+000;
distCoeffs.at<float>(2) = -3.3916502741726508e-002;
distCoeffs.at<float>(3) = -1.3921577146136694e-002;
distCoeffs.at<float>(4) = 1.1430734623697941e-002;
cout << "Intrisic matrix: " << intrisicMat << endl << endl;
cout << "Rotation vector: " << rVec << endl << endl;
cout << "Translation vector: " << tVec << endl << endl;
cout << "Distortion coef: " << distCoeffs << endl << endl;
// Generate the points as viewed from the camera
std::vector<cv::Point2f> projectedPoints;
cv::projectPoints(objectPoints, rVec, tVec, intrisicMat, distCoeffs, projectedPoints);
// Display the points in an image
cv::Mat image(480, 640, CV_8UC3);
const uint black_r(0), black_g(0), black_b(0);
const uint silver_r(192), silver_g(192), silver_b(192);
// image = cv::Scalar(redVal,greenVal,blueVal);
image = cv::Scalar(black_b, black_g, black_r);
// cv::viz::COLOR blk(cv::viz::Color::black());
cv::Vec3b color(silver_b, silver_g, silver_r);
for (unsigned int i = 0; i < projectedPoints.size(); ++i)
{
cout << "Project point " << objectPoints[i] << " to " << projectedPoints[i];
cv::Point2f pt = projectedPoints[i];
if (0<= (pt.x) && (pt.x) <= image.cols && 0<= (-pt.y) && (-pt.y) <= image.rows )
{
unsigned int ix(std::floor(pt.x)), iy(std::ceil(-pt.y));
cout << ", and set image.at(" << ix << ", " << iy << ") = " << color;
image.at<cv::Vec3b>(ix, iy) = color;
}
cout << endl;
}
cout << "calling imshow" << endl;
cv::namedWindow("Projection", cv::WINDOW_AUTOSIZE);
cv::imshow("Projection", image);
cout << "return from imshow" << endl;
cout << "Press any key to exit.";
cin.ignore();
cin.get();
return 0;
}
vector<cv::Point3f> Generate3DPoints()
{
vector<cv::Point3f> points;
float x, y, z;
// 8 corners of a cube
// +0.5 z face
z = .5;
x = .5; y = .5;
points.push_back(cv::Point3f(x, y, z));
y = -.5;
points.push_back(cv::Point3f(x, y, z));
x = -.5; y = .5;
points.push_back(cv::Point3f(x, y, z));
y = -.5;
points.push_back(cv::Point3f(x, y, z));
// -0.5 z face
z = -.5;
x = .5; y = .5;
points.push_back(cv::Point3f(x, y, z));
y = -.5;
points.push_back(cv::Point3f(x, y, z));
x = -.5; y = .5;
points.push_back(cv::Point3f(x, y, z));
y = -.5;
points.push_back(cv::Point3f(x, y, z));
// mid point
x = 0; y = 0; z = 0;
points.push_back(cv::Point3f(x, y, z));
return points;
}
您的代码有几个问题:
1。图像尺寸与相机内在参数不匹配
从intrisicMat
可以看出原理点是(532.63, 380.95)
。所以图片大小应该是1000*800
左右,但是你用的图片只有640*480
。事实上,9个点中有5个投影到图像区域之外(x>640)。
2。绘图位置错误
cv::Mat::at()
的参数是(row, col)
,所以
image.at<cv::Vec3b>(ix, iy)
应该改为 image.at<cv::Vec3b>(iy, ix)
3。 cv::waitKey()
一定要在cv::imshow()
之后调用cv::waitKey(0);
,否则看不到图像
解决这些问题后,您将看到预计的 9 分。
以下代码的目标是将立方体的角顶点和中心点投影到相机图像平面上。但是,创建的图像包含背景屏幕的副本,并且不显示 9 个立方点,或者至少我看不到这 9 个点中的任何一个。如果这段代码有效,我想了解的是旋转和平移向量参数对立方体视图的影响。我想做的是在观察立方体的同时围绕立方体旋转相机。 P.S。任何关于将 3D 点投影到 2d 相机图像平面的数学信息也会有所帮助。聚苯硫醚。为了使图像更清晰,给顶点编号也很有帮助。
// baseline: https://www.programmersought.com/article/6279272931/
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/viz/types.hpp"
#include <iostream>
#include <string>
#include <math.h>
using namespace std;
vector<cv::Point3f> Generate3DPoints();
int main(int argc, char* argv[])
{
// Generate 3D points
vector<cv::Point3f> objectPoints = Generate3DPoints();
vector<cv::Point2f> imagePoints;
// Camera settings
cv::Mat intrisicMat(3, 3, cv::DataType<float>::type); // Intrisic matrix
intrisicMat.at<float>(0, 0) = 1.6415318549788924e+003;
intrisicMat.at<float>(1, 0) = 0;
intrisicMat.at<float>(2, 0) = 0;
intrisicMat.at<float>(0, 1) = 0;
intrisicMat.at<float>(1, 1) = 1.7067753507885654e+003;
intrisicMat.at<float>(2, 1) = 0;
intrisicMat.at<float>(0, 2) = 5.3262822453148601e+002;
intrisicMat.at<float>(1, 2) = 3.8095355839052968e+002;
intrisicMat.at<float>(2, 2) = 1;
// Rotation vector
cv::Mat rVec(3, 1, cv::DataType<float>::type);
rVec.at<float>(0) = -3.9277902400761393e-002;
rVec.at<float>(1) = 3.7803824407602084e-002;
rVec.at<float>(2) = 2.6445674487856268e-002;
// Translation vector
cv::Mat tVec(3, 1, cv::DataType<float>::type);
tVec.at<float>(0) = 2.1158489381208221e+000;
tVec.at<float>(1) = -7.6847683212704716e+000;
tVec.at<float>(2) = 2.6169795190294256e+001;
// Distortion vector
cv::Mat distCoeffs(5, 1, cv::DataType<float>::type);
distCoeffs.at<float>(0) = -7.9134632415085826e-001;
distCoeffs.at<float>(1) = 1.5623584435644169e+000;
distCoeffs.at<float>(2) = -3.3916502741726508e-002;
distCoeffs.at<float>(3) = -1.3921577146136694e-002;
distCoeffs.at<float>(4) = 1.1430734623697941e-002;
cout << "Intrisic matrix: " << intrisicMat << endl << endl;
cout << "Rotation vector: " << rVec << endl << endl;
cout << "Translation vector: " << tVec << endl << endl;
cout << "Distortion coef: " << distCoeffs << endl << endl;
// Generate the points as viewed from the camera
std::vector<cv::Point2f> projectedPoints;
cv::projectPoints(objectPoints, rVec, tVec, intrisicMat, distCoeffs, projectedPoints);
// Display the points in an image
cv::Mat image(480, 640, CV_8UC3);
const uint black_r(0), black_g(0), black_b(0);
const uint silver_r(192), silver_g(192), silver_b(192);
// image = cv::Scalar(redVal,greenVal,blueVal);
image = cv::Scalar(black_b, black_g, black_r);
// cv::viz::COLOR blk(cv::viz::Color::black());
cv::Vec3b color(silver_b, silver_g, silver_r);
for (unsigned int i = 0; i < projectedPoints.size(); ++i)
{
cout << "Project point " << objectPoints[i] << " to " << projectedPoints[i];
cv::Point2f pt = projectedPoints[i];
if (0<= (pt.x) && (pt.x) <= image.cols && 0<= (-pt.y) && (-pt.y) <= image.rows )
{
unsigned int ix(std::floor(pt.x)), iy(std::ceil(-pt.y));
cout << ", and set image.at(" << ix << ", " << iy << ") = " << color;
image.at<cv::Vec3b>(ix, iy) = color;
}
cout << endl;
}
cout << "calling imshow" << endl;
cv::namedWindow("Projection", cv::WINDOW_AUTOSIZE);
cv::imshow("Projection", image);
cout << "return from imshow" << endl;
cout << "Press any key to exit.";
cin.ignore();
cin.get();
return 0;
}
vector<cv::Point3f> Generate3DPoints()
{
vector<cv::Point3f> points;
float x, y, z;
// 8 corners of a cube
// +0.5 z face
z = .5;
x = .5; y = .5;
points.push_back(cv::Point3f(x, y, z));
y = -.5;
points.push_back(cv::Point3f(x, y, z));
x = -.5; y = .5;
points.push_back(cv::Point3f(x, y, z));
y = -.5;
points.push_back(cv::Point3f(x, y, z));
// -0.5 z face
z = -.5;
x = .5; y = .5;
points.push_back(cv::Point3f(x, y, z));
y = -.5;
points.push_back(cv::Point3f(x, y, z));
x = -.5; y = .5;
points.push_back(cv::Point3f(x, y, z));
y = -.5;
points.push_back(cv::Point3f(x, y, z));
// mid point
x = 0; y = 0; z = 0;
points.push_back(cv::Point3f(x, y, z));
return points;
}
您的代码有几个问题:
1。图像尺寸与相机内在参数不匹配
从intrisicMat
可以看出原理点是(532.63, 380.95)
。所以图片大小应该是1000*800
左右,但是你用的图片只有640*480
。事实上,9个点中有5个投影到图像区域之外(x>640)。
2。绘图位置错误
cv::Mat::at()
的参数是(row, col)
,所以
image.at<cv::Vec3b>(ix, iy)
应该改为 image.at<cv::Vec3b>(iy, ix)
3。 cv::waitKey()
一定要在cv::imshow()
之后调用cv::waitKey(0);
,否则看不到图像
解决这些问题后,您将看到预计的 9 分。