快速播放帧拼接视频

Stitching Video with fast playback of frames

我试图通过找到重叠视频之间的单应性来匹配关键点,从而将两个视频拼接在一起。我已经成功地使用了两个不同的图像。

对于视频,我加载了两个单独的视频文件并循环播放帧并将它们复制到每个视频的空白矩阵 cap1framecap2frame

然后我将每个视频的每一帧发送到拼接函数,拼接函数根据两帧之间的单应性匹配关键点,拼接它们并显示结果图像。 (matching based on openCV example)

拼接成功,但会导致视频播放速度非常慢,并且框架侧面出现某种图形异常。在照片中看到。

我想知道如何通过快速视频播放来提高效率。

int main(int argc, char** argv){
      // Create a VideoCapture object and open the input file
      VideoCapture cap1("left.mov");
      VideoCapture cap2("right.mov");
      // Check if camera opened successfully
      if(!cap1.isOpened() || !cap2.isOpened()){
        cout << "Error opening video stream or file" << endl;
        return -1;
      }
        //Trying to loop frames
        for (;;){
        Mat cap1frame;
        Mat cap2frame;

        cap1 >> cap1frame;
        cap2 >> cap2frame;

        // If the frame is empty, break immediately
        if (cap1frame.empty() || cap2frame.empty())
          break;

        //sending each frame from each video to the stitch function then displaying
        imshow( "Result", Stitching(cap1frame,cap2frame));

        if(waitKey(30) >= 0) break;
         //destroyWindow("Stitching");
        // waitKey(0);
      }
      return 0;
    }

我能够通过仅使用第一帧视频预先计算单应性来解决我的问题。这样函数只被调用一次。

然后我循环播放视频的其余部分以应用视频帧的变形,以便可以根据预先计算的单应性将它们拼接在一起。这一点最初是在我的拼接功能中。

此时我仍然遇到问题,调用 imshow 时播放速度仍然很慢。但我决定导出生成的视频,当在 VideoWriter 对象中设置了正确的 fps 时,这就起作用了。我想知道我是否只需要调整 imshow 的 fps 播放,但我不确定这一点。

我的完整代码如下:

#include <stdio.h>
#include <iostream>
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/xfeatures2d.hpp"
#include <opencv2/xfeatures2d/nonfree.hpp>
#include <opencv2/xfeatures2d/cuda.hpp>
#include <opencv2/opencv.hpp>
#include <vector>
//To get homography from images passed in. Matching points in the images.

Mat Stitching(Mat image1,Mat image2){

    Mat I_1 = image1;
    Mat I_2 = image2;
//based on https://docs.opencv.org/3.3.0/d7/dff/tutorial_feature_homography.html
    cv::Ptr<Feature2D> f2d = xfeatures2d::SIFT::create();
        // Step 1: Detect the keypoints:
        std::vector<KeyPoint> keypoints_1, keypoints_2;
        f2d->detect( I_1, keypoints_1 );
        f2d->detect( I_2, keypoints_2 );
        // Step 2: Calculate descriptors (feature vectors)
        Mat descriptors_1, descriptors_2;
        f2d->compute( I_1, keypoints_1, descriptors_1 );
        f2d->compute( I_2, keypoints_2, descriptors_2 );
        // Step 3: Matching descriptor vectors using BFMatcher :
        BFMatcher matcher;
        std::vector< DMatch > matches;
        matcher.match( descriptors_1, descriptors_2, matches );
        // Keep best matches only to have a nice drawing.
        // We sort distance between descriptor matches
        Mat index;
        int nbMatch = int(matches.size());
        Mat tab(nbMatch, 1, CV_32F);
        for (int i = 0; i < nbMatch; i++)
            tab.at<float>(i, 0) = matches[i].distance;
        sortIdx(tab, index, SORT_EVERY_COLUMN + SORT_ASCENDING);
        vector<DMatch> bestMatches;
        for (int i = 0; i < 200; i++)
            bestMatches.push_back(matches[index.at < int > (i, 0)]);
        // 1st image is the destination image and the 2nd image is the src image
        std::vector<Point2f> dst_pts;                   //1st
        std::vector<Point2f> source_pts;                //2nd

        for (vector<DMatch>::iterator it = bestMatches.begin(); it != bestMatches.end(); ++it) {
            //cout << it->queryIdx << "\t" <<  it->trainIdx << "\t"  <<  it->distance << "\n";
            //-- Get the keypoints from the good matches
            dst_pts.push_back( keypoints_1[ it->queryIdx ].pt );
            source_pts.push_back( keypoints_2[ it->trainIdx ].pt );
        }
        Mat H_12 = findHomography( source_pts, dst_pts, CV_RANSAC );
      return H_12;
}
int main(int argc, char** argv){
  //Mats to get the first frame of video and pass to Stitching function.
  Mat I1, h_I1;
  Mat I2, h_I2;
  // Create a VideoCapture object and open the input file
  VideoCapture cap1("left.mov");
  VideoCapture cap2("right.mov");
  cap1.set(CV_CAP_PROP_BUFFERSIZE, 10);
  cap2.set(CV_CAP_PROP_BUFFERSIZE, 10);
  //Check if camera opened successfully
  if(!cap1.isOpened() || !cap2.isOpened()){
    cout << "Error opening video stream or file" << endl;
    return -1;
  }
//passing first frame to Stitching function
  if (cap1.read(I1)){
     h_I1 = I1;
   }

   if (cap2.read(I2)){
     h_I2 = I2;
   }
   Mat homography;
//passing here.
   homography = Stitching(h_I1,h_I2);
  std::cout << homography << '\n';

//creating VideoWriter object with defined values.
VideoWriter video("video/output.avi",CV_FOURCC('M','J','P','G'),30, Size(1280,720));

//Looping through frames of both videos.
    for (;;){
    Mat cap1frame;
    Mat cap2frame;

    cap1 >> cap1frame;
    cap2 >> cap2frame;

    // If the frame is empty, break immediately
    if (cap1frame.empty() || cap2frame.empty())
      break;
      Mat warpImage2;
      //warping the second video cap2frame so it matches with the first one.
      //size is defined as the final video size
      warpPerspective(cap2frame, warpImage2, homography, Size(1280,720), INTER_CUBIC);
      //final is the final canvas where both videos will be warped onto.
      Mat final (Size(1280,720), CV_8UC3);
      //Mat final(Size(cap1frame.cols*2 + cap1frame.cols, cap1frame.rows*2),CV_8UC3);
      //Using roi getting the relivent areas of each video.
      Mat roi1(final, Rect(0, 0,  cap1frame.cols, cap1frame.rows));
      Mat roi2(final, Rect(0, 0, warpImage2.cols, warpImage2.rows));
      //warping images on to the canvases which are linked with the final canvas.
      warpImage2.copyTo(roi2);
      cap1frame.copyTo(roi1);
      //writing to video.
      video.write(final);
      //imshow ("Result", final);
    if(waitKey(30) >= 0) break;
  }
  video.release();
  return 0;
}