Android 使用 OpenCV 在相机视图中进行实时 DFT

Android Real Time DFT in Camera View using OpenCV

我一直在努力实现一个 Android 直接在相机视图上应用 DFT 的应用程序。在 Whosebug 上进行研究,我可以找到以下主题:

SOLVED - Load Image in Mat and Display after DFT process

SOLVED - Load Image in Mat and Display after DFT process

Convert OpenCv DCT to Android

我也尝试过使用 JNI 的不同解决方案: http://allaboutee.com/2011/11/12/discrete-fourier-transform-in-android-with-opencv/

然后我可以实现我的主要 Activity 代码:

package ch.hepia.lsn.opencv_native_androidstudio;

import android.app.Activity;
import android.os.Bundle;
import android.util.Log;
import android.view.SurfaceView;
import android.view.WindowManager;
import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.CameraBridgeViewBase;
import android.hardware.Camera;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.Rect;
import org.opencv.core.Size;
import org.opencv.imgproc.Imgproc;

import java.util.ArrayList;
import java.util.List;

public class MainActivity extends Activity implements CameraBridgeViewBase.CvCameraViewListener2 {
    private static final String TAG = "OCVSample::Activity";

    private CameraBridgeViewBase mOpenCvCameraView;

    private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
        @Override
        public void onManagerConnected(int status) {
            switch (status) {
                case LoaderCallbackInterface.SUCCESS: {
                    Log.i(TAG, "OpenCV loaded successfully");
                    mOpenCvCameraView.enableView();
                }
                break;
                default: {
                    super.onManagerConnected(status);
                }
            }
        }
    };

    @Override
    public void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);

        // Load ndk built module, as specified
        // in moduleName in build.gradle
        System.loadLibrary("native");

        getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);

        setContentView(R.layout.activity_main);

        mOpenCvCameraView = (CameraBridgeViewBase) findViewById(R.id.main_surface);
        mOpenCvCameraView.setVisibility(SurfaceView.VISIBLE);
        mOpenCvCameraView.setCvCameraViewListener(this);
    }

    @Override
    public void onPause() {
        super.onPause();
        disableCamera();
    }

    @Override
    public void onResume() {
        super.onResume();
        if (!OpenCVLoader.initDebug()) {
            Log.d(TAG, "Internal OpenCV library not found. Using OpenCV Manager for initialization");
            OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_3_0_0, this, mLoaderCallback);
        } else {
            Log.d(TAG, "OpenCV library found inside package. Using it!");
            mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS);
        }
    }

    public void onDestroy() {
        super.onDestroy();
        disableCamera();
    }

    public void disableCamera() {
        if (mOpenCvCameraView != null)
            mOpenCvCameraView.disableView();
    }

    public void onCameraViewStarted(int width, int height) {
    }

    public void onCameraViewStopped() {
    }

    private Mat getDFT(Mat singleChannel) {

        singleChannel.convertTo(singleChannel, CvType.CV_64FC1);

        int m = Core.getOptimalDFTSize(singleChannel.rows());
        int n = Core.getOptimalDFTSize(singleChannel.cols()); // on the border
        // add zero
        // values
        // Imgproc.copyMakeBorder(image1,
        // padded, 0, m -
        // image1.rows(), 0, n

        Mat padded = new Mat(new Size(n, m), CvType.CV_64FC1); // expand input
        // image to
        // optimal size

        Core.copyMakeBorder(singleChannel, padded, 0, m - singleChannel.rows(), 0,
                n - singleChannel.cols(), Core.BORDER_CONSTANT);

        List<Mat> planes = new ArrayList<Mat>();
        planes.add(padded);
        planes.add(Mat.zeros(padded.rows(), padded.cols(), CvType.CV_64FC1));

        Mat complexI = Mat.zeros(padded.rows(), padded.cols(), CvType.CV_64FC2);

        Mat complexI2 = Mat
                .zeros(padded.rows(), padded.cols(), CvType.CV_64FC2);

        Core.merge(planes, complexI); // Add to the expanded another plane with
        // zeros

        Core.dft(complexI, complexI2); // this way the result may fit in the
        // source matrix

        // compute the magnitude and switch to logarithmic scale
        // => log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2))
        Core.split(complexI2, planes); // planes[0] = Re(DFT(I), planes[1] =
        // Im(DFT(I))

        Mat mag = new Mat(planes.get(0).size(), planes.get(0).type());

        Core.magnitude(planes.get(0), planes.get(1), mag);// planes[0]
        // =
        // magnitude

        Mat magI = mag;
        Mat magI2 = new Mat(magI.size(), magI.type());
        Mat magI3 = new Mat(magI.size(), magI.type());
        Mat magI4 = new Mat(magI.size(), magI.type());
        Mat magI5 = new Mat(magI.size(), magI.type());

        Core.add(magI, Mat.ones(padded.rows(), padded.cols(), CvType.CV_64FC1),
                magI2); // switch to logarithmic scale
        Core.log(magI2, magI3);

        Mat crop = new Mat(magI3, new Rect(0, 0, magI3.cols() & -2,
                magI3.rows() & -2));

        magI4 = crop.clone();

        // rearrange the quadrants of Fourier image so that the origin is at the
        // image center
        int cx = magI4.cols() / 2;
        int cy = magI4.rows() / 2;

        Rect q0Rect = new Rect(0, 0, cx, cy);
        Rect q1Rect = new Rect(cx, 0, cx, cy);
        Rect q2Rect = new Rect(0, cy, cx, cy);
        Rect q3Rect = new Rect(cx, cy, cx, cy);

        Mat q0 = new Mat(magI4, q0Rect); // Top-Left - Create a ROI per quadrant
        Mat q1 = new Mat(magI4, q1Rect); // Top-Right
        Mat q2 = new Mat(magI4, q2Rect); // Bottom-Left
        Mat q3 = new Mat(magI4, q3Rect); // Bottom-Right

        Mat tmp = new Mat(); // swap quadrants (Top-Left with Bottom-Right)
        q0.copyTo(tmp);
        q3.copyTo(q0);
        tmp.copyTo(q3);

        q1.copyTo(tmp); // swap quadrant (Top-Right with Bottom-Left)
        q2.copyTo(q1);
        tmp.copyTo(q2);

        Core.normalize(magI4, magI5, 0, 255, Core.NORM_MINMAX);

        Mat realResult = new Mat(magI5.size(), CvType.CV_8UC1);

        magI5.convertTo(realResult, CvType.CV_8UC1);

        return realResult;
    }

    public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
        //System.out.print("teste");
        Mat matGray = inputFrame.gray();
        return getDFT(inputFrame.gray());
    }
}

但问题是,我仍然收到此错误:

07-03 22:46:46.205 13700-28322/ch.hepia.lsn.opencv_native_androidstudio A/libc: Fatal signal 11 (SIGSEGV), code 1, fault addr 0x10 in tid 28322 (Thread-9802)

我认为这是因为某些处理限制,因为我只是复制了与其他用户一起使用普通图像的代码。

我的问题是:

谢谢。

我已将最初 post 编辑的代码移植到 Android Studio(来自 Eclipse)和 OpenCV 3.1.0。我认为此版本的 openCV 中的 Core.add() 函数存在问题 - 请参阅 post here

我按照建议使用了 Core.addWeighted() 并且我至少可以让 dft 显示但不久之后它就会 运行 内存不足。我 认为 类似 split 的函数也使用 add() 所以我认为我们需要在 openCV 中查看该问题的修复程序。

我 post 编辑的代码可以改进以更好地利用资源,例如,保持键数组的静态分配,不要继续调用 size() 但再次保持静态,减少分配的垫子数量等。您还可以减小捕获图像的大小,因为垫子在更现代的手机(我有三星 S6)上会变得很大,所以使用

mOpenCvCameraView.setMaxFrameSize(176, 152);

或任何更易于管理的大小。

如果你想减少帧处理的数量,那么保留一个静态计数器,你在每次捕获帧时增加它,并且只有在计数器可以被 5、10 或其他任何东西整除时才调用 getDFT() 以仅处理那些第5、10帧等