使用 imread OpenCV 读取图像时出错
Error reading images using imread OpenCV
我正在使用 findHomography() 比较两个图像。我在 OpenCV 3.1.0 中添加了来自 opencv_contrib 的额外模块以使用 Surf 和 Sift 算法 并针对最新的 Android 架构进行编译。我可以使用 ndk-build
成功编译库。
问题:
当我 运行 LG Nexus 5 上的应用程序时,我可以使用 imread
读取图像,但是当我 运行 [=] 上的相同应用程序时22=]LG Nexus 5X, imread
不读取图像。我已经在 Samsung S6 和 OnePlus X 上进行了测试,但遇到了同样的问题。下面是我的原生方法:
#include <jni.h>
#include <string.h>
#include <stdio.h>
#include <android/log.h>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/xfeatures2d/nonfree.hpp"
#include "opencv2/opencv.hpp"
using namespace std;
using namespace cv;
#define LOG_TAG "nonfree_jni"
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__)
jboolean detect_features(JNIEnv * env, jstring scenePath, jstring objectPath) {
const char *nativeScenePath = (env)->GetStringUTFChars(scenePath, NULL);
const char *nativeObjectPath = (env)->GetStringUTFChars(objectPath, NULL);
nativeScenePath = env->GetStringUTFChars(scenePath, 0);
nativeObjectPath = env->GetStringUTFChars(objectPath, 0);
(env)->ReleaseStringUTFChars(scenePath, nativeScenePath);
(env)->ReleaseStringUTFChars(objectPath, nativeObjectPath);
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "Object path: ----- %s \n", nativeObjectPath);
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "Scene path: ----- %s \n", nativeScenePath);
Mat img_object = imread( nativeObjectPath, CV_LOAD_IMAGE_GRAYSCALE );
Mat img_scene = imread( nativeScenePath, CV_LOAD_IMAGE_GRAYSCALE );
if( !img_object.data || !img_scene.data){
LOGI(" --(!) Error reading images ");
return false;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "Image comparison rows: ----- %d \n", img_object.rows);
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "Image comparison colums: ----- %d \n", img_object.cols);
// cv::xfeatures2d::SurfFeatureDetector detector( minHessian );
Ptr<cv::xfeatures2d::SurfFeatureDetector> detector = cv::xfeatures2d::SurfFeatureDetector::create(minHessian);
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector->detect( img_object, keypoints_object );
detector->detect( img_scene, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
// cv::xfeatures2d::SurfDescriptorExtractor extractor;
Ptr<cv::xfeatures2d::SurfDescriptorExtractor> extractor = cv::xfeatures2d::SurfDescriptorExtractor::create();
Mat descriptors_object, descriptors_scene;
extractor->compute( img_object, keypoints_object, descriptors_object );
extractor->compute( img_scene, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{
double dist = matches[i].distance;
if (dist == 0) continue;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "-- Max dist : %f \n", max_dist);
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "-- Min dist : %f \n", min_dist);
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{
if( matches[i].distance <= 0.1 ) //3*min_dist
{
good_matches.push_back( matches[i]);
}
}
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "FLANN total matches -----: %zu \n", matches.size());
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "FLANN good matches -----: %zu \n", good_matches.size());
Mat img_matches;
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
if (good_matches.size() >= 5)
{
Mat H = findHomography( obj, scene, CV_RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
std::vector<Point2f> scene_corners(4);
Mat output, matrix;
warpPerspective(img_object, output, H, { img_scene.cols, img_scene.rows });
////////////////////////////////////////////////////////////////////////////////
detector->detect( output, keypoints_object );
//-- Step 2: Calculate descriptors (feature vectors)
//cv::xfeatures2d::SurfDescriptorExtractor extractor;
Ptr<cv::xfeatures2d::SurfDescriptorExtractor> extractor = cv::xfeatures2d::SurfDescriptorExtractor::create();
extractor->compute( output, keypoints_object, descriptors_object );
extractor->compute( img_scene, keypoints_scene, descriptors_scene );
std::vector<std::vector<cv::DMatch>> matches2;
BFMatcher matcher;
matcher.knnMatch(descriptors_object, descriptors_scene, matches2, 2);
vector<cv::DMatch> good_matches2;
for (int i = 0; i < matches2.size(); ++i)
{
const float ratio = 0.8; // As in Lowe's paper; can be tuned
if (matches2[i][0].distance < ratio * matches2[i][1].distance)
{
good_matches2.push_back(matches2[i][0]);
}
}
if (matches2.size() == 0 || good_matches2.size() == 0) {
LOGI( "End run!\n");
return false;
}
double ratioOfSimilarity = static_cast<double>(good_matches2.size()) / static_cast<double>(matches2.size());
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "Bruteforce total matches -----: %zu \n", matches2.size());
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "Bruteforce good matches -----: %zu \n", good_matches2.size());
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "Bruteforce similarity ratio -----: %f \n", ratioOfSimilarity);
if(ratioOfSimilarity >= 0.3) {
LOGI( "End run!\n");
return true;
}
LOGI( "End run!\n");
return false;
}
LOGI( "End run!\n");
return false;
}
方法在这一行中断:
if( !img_object.data || !img_scene.data){
LOGI(" --(!) Error reading images ");
return false;
}
应该是!img_object.data
?
现在您会在有数据而不是没有数据时记录错误和 return false。
我在 Nexus 5x android 7.0 设备上测试了您的未读问题,
所以我只在我的 android 项目中使用了 imread 命令。
我的 opencv 库是 OpenCV 3.1.0 预建库。
经过一些测试,我只能读取nexus 5x中的图像:
- /sdcard 正常
- /storage/emulated/0/失败
我认为实际上是相同的路径,但它不会使用第二个选项加载图像。
Mat flag=imread("/sdcard/Pictures/mytest.jpg", CV_LOAD_IMAGE_GRAYSCALE);
根据我的开发经验,我在外部存储路径方面遇到了问题,因为有些设备模拟了外部存储而有些则没有。
所以通常,为了避免这个问题,我在执行时将我的资源复制到内部.APK。
我将资源存储在 res.raw 文件夹中,并使用
获取内部路径
config_path = m_context.getApplicationContext().getFilesDir().toString();
希望我的测试能帮助解决您的问题。
干杯。
乌奈.
我正在使用 findHomography() 比较两个图像。我在 OpenCV 3.1.0 中添加了来自 opencv_contrib 的额外模块以使用 Surf 和 Sift 算法 并针对最新的 Android 架构进行编译。我可以使用 ndk-build
成功编译库。
问题:
当我 运行 LG Nexus 5 上的应用程序时,我可以使用 imread
读取图像,但是当我 运行 [=] 上的相同应用程序时22=]LG Nexus 5X, imread
不读取图像。我已经在 Samsung S6 和 OnePlus X 上进行了测试,但遇到了同样的问题。下面是我的原生方法:
#include <jni.h>
#include <string.h>
#include <stdio.h>
#include <android/log.h>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/xfeatures2d/nonfree.hpp"
#include "opencv2/opencv.hpp"
using namespace std;
using namespace cv;
#define LOG_TAG "nonfree_jni"
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__)
jboolean detect_features(JNIEnv * env, jstring scenePath, jstring objectPath) {
const char *nativeScenePath = (env)->GetStringUTFChars(scenePath, NULL);
const char *nativeObjectPath = (env)->GetStringUTFChars(objectPath, NULL);
nativeScenePath = env->GetStringUTFChars(scenePath, 0);
nativeObjectPath = env->GetStringUTFChars(objectPath, 0);
(env)->ReleaseStringUTFChars(scenePath, nativeScenePath);
(env)->ReleaseStringUTFChars(objectPath, nativeObjectPath);
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "Object path: ----- %s \n", nativeObjectPath);
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "Scene path: ----- %s \n", nativeScenePath);
Mat img_object = imread( nativeObjectPath, CV_LOAD_IMAGE_GRAYSCALE );
Mat img_scene = imread( nativeScenePath, CV_LOAD_IMAGE_GRAYSCALE );
if( !img_object.data || !img_scene.data){
LOGI(" --(!) Error reading images ");
return false;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "Image comparison rows: ----- %d \n", img_object.rows);
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "Image comparison colums: ----- %d \n", img_object.cols);
// cv::xfeatures2d::SurfFeatureDetector detector( minHessian );
Ptr<cv::xfeatures2d::SurfFeatureDetector> detector = cv::xfeatures2d::SurfFeatureDetector::create(minHessian);
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector->detect( img_object, keypoints_object );
detector->detect( img_scene, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
// cv::xfeatures2d::SurfDescriptorExtractor extractor;
Ptr<cv::xfeatures2d::SurfDescriptorExtractor> extractor = cv::xfeatures2d::SurfDescriptorExtractor::create();
Mat descriptors_object, descriptors_scene;
extractor->compute( img_object, keypoints_object, descriptors_object );
extractor->compute( img_scene, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{
double dist = matches[i].distance;
if (dist == 0) continue;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "-- Max dist : %f \n", max_dist);
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "-- Min dist : %f \n", min_dist);
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{
if( matches[i].distance <= 0.1 ) //3*min_dist
{
good_matches.push_back( matches[i]);
}
}
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "FLANN total matches -----: %zu \n", matches.size());
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "FLANN good matches -----: %zu \n", good_matches.size());
Mat img_matches;
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
if (good_matches.size() >= 5)
{
Mat H = findHomography( obj, scene, CV_RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
std::vector<Point2f> scene_corners(4);
Mat output, matrix;
warpPerspective(img_object, output, H, { img_scene.cols, img_scene.rows });
////////////////////////////////////////////////////////////////////////////////
detector->detect( output, keypoints_object );
//-- Step 2: Calculate descriptors (feature vectors)
//cv::xfeatures2d::SurfDescriptorExtractor extractor;
Ptr<cv::xfeatures2d::SurfDescriptorExtractor> extractor = cv::xfeatures2d::SurfDescriptorExtractor::create();
extractor->compute( output, keypoints_object, descriptors_object );
extractor->compute( img_scene, keypoints_scene, descriptors_scene );
std::vector<std::vector<cv::DMatch>> matches2;
BFMatcher matcher;
matcher.knnMatch(descriptors_object, descriptors_scene, matches2, 2);
vector<cv::DMatch> good_matches2;
for (int i = 0; i < matches2.size(); ++i)
{
const float ratio = 0.8; // As in Lowe's paper; can be tuned
if (matches2[i][0].distance < ratio * matches2[i][1].distance)
{
good_matches2.push_back(matches2[i][0]);
}
}
if (matches2.size() == 0 || good_matches2.size() == 0) {
LOGI( "End run!\n");
return false;
}
double ratioOfSimilarity = static_cast<double>(good_matches2.size()) / static_cast<double>(matches2.size());
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "Bruteforce total matches -----: %zu \n", matches2.size());
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "Bruteforce good matches -----: %zu \n", good_matches2.size());
__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, "Bruteforce similarity ratio -----: %f \n", ratioOfSimilarity);
if(ratioOfSimilarity >= 0.3) {
LOGI( "End run!\n");
return true;
}
LOGI( "End run!\n");
return false;
}
LOGI( "End run!\n");
return false;
}
方法在这一行中断:
if( !img_object.data || !img_scene.data){
LOGI(" --(!) Error reading images ");
return false;
}
应该是!img_object.data
?
现在您会在有数据而不是没有数据时记录错误和 return false。
我在 Nexus 5x android 7.0 设备上测试了您的未读问题, 所以我只在我的 android 项目中使用了 imread 命令。
我的 opencv 库是 OpenCV 3.1.0 预建库。
经过一些测试,我只能读取nexus 5x中的图像:
- /sdcard 正常
- /storage/emulated/0/失败
我认为实际上是相同的路径,但它不会使用第二个选项加载图像。
Mat flag=imread("/sdcard/Pictures/mytest.jpg", CV_LOAD_IMAGE_GRAYSCALE);
根据我的开发经验,我在外部存储路径方面遇到了问题,因为有些设备模拟了外部存储而有些则没有。
所以通常,为了避免这个问题,我在执行时将我的资源复制到内部.APK。
我将资源存储在 res.raw 文件夹中,并使用
获取内部路径config_path = m_context.getApplicationContext().getFilesDir().toString();
希望我的测试能帮助解决您的问题。
干杯。
乌奈.