Opencv:手动项目点
Opencv : project points manually
我正在尝试从 OpenCV 中重现方法 projectPoints()
的行为。
下面两张图中,red/green/blue轴是用OpenCV的方法得到的,而magenta/yellow/cyan轴是用我自己的方法得到的:
图片1
图片2
用我的方法,轴似乎有一个很好的方向,但翻译不正确。
这是我的代码:
void drawVector(float x, float y, float z, float r, float g, float b, cv::Mat &pose, cv::Mat &cameraMatrix, cv::Mat &dst) {
//Origin = (0, 0, 0, 1)
cv::Mat origin(4, 1, CV_64FC1, double(0));
origin.at<double>(3, 0) = 1;
//End = (x, y, z, 1)
cv::Mat end(4, 1, CV_64FC1, double(1));
end.at<double>(0, 0) = x; end.at<double>(1, 0) = y; end.at<double>(2, 0) = z;
//multiplies transformation matrix by camera matrix
cv::Mat mat = cameraMatrix * pose.colRange(0, 4).rowRange(0, 3);
//projects points
origin = mat * origin;
end = mat * end;
//draws corresponding line
cv::line(dst, cv::Point(origin.at<double>(0, 0), origin.at<double>(1, 0)),
cv::Point(end.at<double>(0, 0), end.at<double>(1, 0)),
CV_RGB(255 * r, 255 * g, 255 * b));
}
void drawVector_withProjectPointsMethod(float x, float y, float z, float r, float g, float b, cv::Mat &pose, cv::Mat &cameraMatrix, cv::Mat &dst) {
std::vector<cv::Point3f> points;
std::vector<cv::Point2f> projectedPoints;
//fills input array with 2 points
points.push_back(cv::Point3f(0, 0, 0));
points.push_back(cv::Point3f(x, y, z));
//Gets rotation vector thanks to cv::Rodrigues() method.
cv::Mat rvec;
cv::Rodrigues(pose.colRange(0, 3).rowRange(0, 3), rvec);
//projects points using cv::projectPoints method
cv::projectPoints(points, rvec, pose.colRange(3, 4).rowRange(0, 3), cameraMatrix, std::vector<double>(), projectedPoints);
//draws corresponding line
cv::line(dst, projectedPoints[0], projectedPoints[1],
CV_RGB(255 * r, 255 * g, 255 * b));
}
void drawAxis(cv::Mat &pose, cv::Mat &cameraMatrix, cv::Mat &dst) {
drawVector(0.1, 0, 0, 1, 1, 0, pose, cameraMatrix, dst);
drawVector(0, 0.1, 0, 0, 1, 1, pose, cameraMatrix, dst);
drawVector(0, 0, 0.1, 1, 0, 1, pose, cameraMatrix, dst);
drawVector_withProjectPointsMethod(0.1, 0, 0, 1, 0, 0, pose, cameraMatrix, dst);
drawVector_withProjectPointsMethod(0, 0.1, 0, 0, 1, 0, pose, cameraMatrix, dst);
drawVector_withProjectPointsMethod(0, 0, 0.1, 0, 0, 1, pose, cameraMatrix, dst);
}
我做错了什么?
我只是忘了将结果点除以投影后的最后一个分量:
给定用于拍摄图像的相机矩阵,对于 3d space 中的任何点 (x, y, z, 1),它在该图像上的投影是计算如下:
//point3D has 4 component (x, y, z, w), point2D has 3 (x, y, z).
point2D = cameraMatrix * point3D;
//then we have to divide the 2 first component of point2D by the third.
point2D /= point2D.z;
我正在尝试从 OpenCV 中重现方法 projectPoints()
的行为。
下面两张图中,red/green/blue轴是用OpenCV的方法得到的,而magenta/yellow/cyan轴是用我自己的方法得到的:
图片1
图片2
用我的方法,轴似乎有一个很好的方向,但翻译不正确。
这是我的代码:
void drawVector(float x, float y, float z, float r, float g, float b, cv::Mat &pose, cv::Mat &cameraMatrix, cv::Mat &dst) {
//Origin = (0, 0, 0, 1)
cv::Mat origin(4, 1, CV_64FC1, double(0));
origin.at<double>(3, 0) = 1;
//End = (x, y, z, 1)
cv::Mat end(4, 1, CV_64FC1, double(1));
end.at<double>(0, 0) = x; end.at<double>(1, 0) = y; end.at<double>(2, 0) = z;
//multiplies transformation matrix by camera matrix
cv::Mat mat = cameraMatrix * pose.colRange(0, 4).rowRange(0, 3);
//projects points
origin = mat * origin;
end = mat * end;
//draws corresponding line
cv::line(dst, cv::Point(origin.at<double>(0, 0), origin.at<double>(1, 0)),
cv::Point(end.at<double>(0, 0), end.at<double>(1, 0)),
CV_RGB(255 * r, 255 * g, 255 * b));
}
void drawVector_withProjectPointsMethod(float x, float y, float z, float r, float g, float b, cv::Mat &pose, cv::Mat &cameraMatrix, cv::Mat &dst) {
std::vector<cv::Point3f> points;
std::vector<cv::Point2f> projectedPoints;
//fills input array with 2 points
points.push_back(cv::Point3f(0, 0, 0));
points.push_back(cv::Point3f(x, y, z));
//Gets rotation vector thanks to cv::Rodrigues() method.
cv::Mat rvec;
cv::Rodrigues(pose.colRange(0, 3).rowRange(0, 3), rvec);
//projects points using cv::projectPoints method
cv::projectPoints(points, rvec, pose.colRange(3, 4).rowRange(0, 3), cameraMatrix, std::vector<double>(), projectedPoints);
//draws corresponding line
cv::line(dst, projectedPoints[0], projectedPoints[1],
CV_RGB(255 * r, 255 * g, 255 * b));
}
void drawAxis(cv::Mat &pose, cv::Mat &cameraMatrix, cv::Mat &dst) {
drawVector(0.1, 0, 0, 1, 1, 0, pose, cameraMatrix, dst);
drawVector(0, 0.1, 0, 0, 1, 1, pose, cameraMatrix, dst);
drawVector(0, 0, 0.1, 1, 0, 1, pose, cameraMatrix, dst);
drawVector_withProjectPointsMethod(0.1, 0, 0, 1, 0, 0, pose, cameraMatrix, dst);
drawVector_withProjectPointsMethod(0, 0.1, 0, 0, 1, 0, pose, cameraMatrix, dst);
drawVector_withProjectPointsMethod(0, 0, 0.1, 0, 0, 1, pose, cameraMatrix, dst);
}
我做错了什么?
我只是忘了将结果点除以投影后的最后一个分量:
给定用于拍摄图像的相机矩阵,对于 3d space 中的任何点 (x, y, z, 1),它在该图像上的投影是计算如下:
//point3D has 4 component (x, y, z, w), point2D has 3 (x, y, z).
point2D = cameraMatrix * point3D;
//then we have to divide the 2 first component of point2D by the third.
point2D /= point2D.z;