OpenCV实时立体图像处理问题(post-标定)
OpenCV real-time stereocam image processing problems (post-calibration)
我用两个 USB 摄像头(带有烦人的自动对焦)构建了自己的立体摄像头。我已经使用 /opencv/samples/cpp/stereo_calib.cpp',
校准了它们,它生成了一个 extrinsics/intrinsics 文件,其 RMS 误差为 0.4818,平均重投影误差为 0.5426
我现在正尝试在 openCV 中使用 Ptr<StereoBM> bm
运行 实时深度映射。我得到无用的图像,我想知道是否有人可以指出我正确的寻找方向。我还想知道相机的自动对焦是否经常会改变焦距,从而改变校准,改变结果。
下面的代码和图片。
如果有人可以推荐任何比 StereoBM 更强大的方法在 openCV 中进行匹配,则加分 :)
棋盘校准图像:
左摄像头图像:
Post StereoBM 处理:
代码片段:
//Set up stereoBM
Ptr<StereoBM> bm = StereoBM::create(16,9);
//Read intrinsice parameters
string intrinsic_filepath = "/home/will/Desktop/repos3.0/stereo-vision/Stereo-Vision/intrinsics.yml";
FileStorage fs(intrinsic_filepath, FileStorage::READ);
if(!fs.isOpened())
{
printf("Failed to open intrinsics.yml");
return -1;
}
Mat M1, D1, M2, D2;
fs["M1"] >> M1;
fs["D1"] >> D1;
fs["M2"] >> M2;
fs["D2"] >> D2;
//Read Extrinsic Parameters
string extrinsic_filepath = "/home/will/Desktop/repos3.0/stereo-vision/Stereo-Vision/extrinsics.yml";
fs.open(extrinsic_filepath, FileStorage::READ);
if(!fs.isOpened())
{
printf("Failed to open extrinsics");
return -1;
}
Mat R, T, R1, P1, R2, P2;
fs["R"] >> R;
fs["T"] >> T;
Mat frame1,frame2, gray1, gray2;
int counter = 0;
capture >> frame1;
capture >> frame2;
Size img_size = frame1.size();
Rect roi1, roi2;
Mat Q;
stereoRectify( M1, D1, M2, D2, img_size, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, -1, img_size, &roi1, &roi2 );
Mat map11, map12, map21, map22;
initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);
initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);
int numberOfDisparities = 16;
int SADWindowSize = 9;
bm->setROI1(roi1);
bm->setROI2(roi2);
bm->setPreFilterCap(31);
bm->setBlockSize(SADWindowSize);
bm->setMinDisparity(0);
bm->setNumDisparities(numberOfDisparities);
bm->setTextureThreshold(10);
bm->setUniquenessRatio(15);
bm->setSpeckleWindowSize(100);
bm->setSpeckleRange(32);
bm->setDisp12MaxDiff(1);
while(1){
capture >> frame1;
capture2 >> frame2;
imshow("Cam1", frame1);
imshow("Cam2", frame2);
/************************* STEREO ***********************/
cvtColor(frame1, gray1, CV_RGB2GRAY);
cvtColor(frame2, gray2, CV_RGB2GRAY);
int64 t = getTickCount();
Mat img1r, img2r;
remap(gray1, img1r, map11, map12, INTER_LINEAR);
remap(gray2, img2r, map21, map22, INTER_LINEAR);
Mat disp, disp8;
bm->compute(img1r, img2r, disp);
t = getTickCount() - t;
printf("Time elapsed: %fms\n", t*1000/getTickFrequency());
disp.convertTo(disp8, CV_8U, 255/(numberOfDisparities*16.));
imshow("disparity", disp8);
}
因此,半个像素的 RMS 误差意味着您的校准基本上是垃圾。
在你的校准图像中,我注意到目标甚至都不平坦。如果我能看到,相机也能,但是校准模型仍然会假设它是平坦的,这会让优化器很伤心。
建议你看一下this answer校准。
我用两个 USB 摄像头(带有烦人的自动对焦)构建了自己的立体摄像头。我已经使用 /opencv/samples/cpp/stereo_calib.cpp',
校准了它们,它生成了一个 extrinsics/intrinsics 文件,其 RMS 误差为 0.4818,平均重投影误差为 0.5426
我现在正尝试在 openCV 中使用 Ptr<StereoBM> bm
运行 实时深度映射。我得到无用的图像,我想知道是否有人可以指出我正确的寻找方向。我还想知道相机的自动对焦是否经常会改变焦距,从而改变校准,改变结果。
下面的代码和图片。
如果有人可以推荐任何比 StereoBM 更强大的方法在 openCV 中进行匹配,则加分 :)
棋盘校准图像:
左摄像头图像:
Post StereoBM 处理:
代码片段:
//Set up stereoBM
Ptr<StereoBM> bm = StereoBM::create(16,9);
//Read intrinsice parameters
string intrinsic_filepath = "/home/will/Desktop/repos3.0/stereo-vision/Stereo-Vision/intrinsics.yml";
FileStorage fs(intrinsic_filepath, FileStorage::READ);
if(!fs.isOpened())
{
printf("Failed to open intrinsics.yml");
return -1;
}
Mat M1, D1, M2, D2;
fs["M1"] >> M1;
fs["D1"] >> D1;
fs["M2"] >> M2;
fs["D2"] >> D2;
//Read Extrinsic Parameters
string extrinsic_filepath = "/home/will/Desktop/repos3.0/stereo-vision/Stereo-Vision/extrinsics.yml";
fs.open(extrinsic_filepath, FileStorage::READ);
if(!fs.isOpened())
{
printf("Failed to open extrinsics");
return -1;
}
Mat R, T, R1, P1, R2, P2;
fs["R"] >> R;
fs["T"] >> T;
Mat frame1,frame2, gray1, gray2;
int counter = 0;
capture >> frame1;
capture >> frame2;
Size img_size = frame1.size();
Rect roi1, roi2;
Mat Q;
stereoRectify( M1, D1, M2, D2, img_size, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, -1, img_size, &roi1, &roi2 );
Mat map11, map12, map21, map22;
initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);
initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);
int numberOfDisparities = 16;
int SADWindowSize = 9;
bm->setROI1(roi1);
bm->setROI2(roi2);
bm->setPreFilterCap(31);
bm->setBlockSize(SADWindowSize);
bm->setMinDisparity(0);
bm->setNumDisparities(numberOfDisparities);
bm->setTextureThreshold(10);
bm->setUniquenessRatio(15);
bm->setSpeckleWindowSize(100);
bm->setSpeckleRange(32);
bm->setDisp12MaxDiff(1);
while(1){
capture >> frame1;
capture2 >> frame2;
imshow("Cam1", frame1);
imshow("Cam2", frame2);
/************************* STEREO ***********************/
cvtColor(frame1, gray1, CV_RGB2GRAY);
cvtColor(frame2, gray2, CV_RGB2GRAY);
int64 t = getTickCount();
Mat img1r, img2r;
remap(gray1, img1r, map11, map12, INTER_LINEAR);
remap(gray2, img2r, map21, map22, INTER_LINEAR);
Mat disp, disp8;
bm->compute(img1r, img2r, disp);
t = getTickCount() - t;
printf("Time elapsed: %fms\n", t*1000/getTickFrequency());
disp.convertTo(disp8, CV_8U, 255/(numberOfDisparities*16.));
imshow("disparity", disp8);
}
因此,半个像素的 RMS 误差意味着您的校准基本上是垃圾。
在你的校准图像中,我注意到目标甚至都不平坦。如果我能看到,相机也能,但是校准模型仍然会假设它是平坦的,这会让优化器很伤心。
建议你看一下this answer校准。