具有特征检测和单应性的 OpenCV 对象检测

OpenCV Object detection with Feature Detection and Homography

我正在尝试检查这张图片是否:

包含在像这样的图像中:

我正在使用特征检测 (SURF) 和单应性,因为模板匹配不是尺度不变的。可悲的是,除了少数几个关键点外,所有关键点都在错误的位置。我是否应该通过多次缩放图像来尝试模板匹配?如果是这样,尝试缩放图像的最佳方法是什么?

代码:

import java.util.ArrayList;
import java.util.List;
import org.opencv.calib3d.Calib3d;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.DMatch;
import org.opencv.core.KeyPoint;
import org.opencv.core.Mat;
import org.opencv.core.MatOfByte;
import org.opencv.core.MatOfDMatch;
import org.opencv.core.MatOfKeyPoint;
import org.opencv.core.MatOfPoint2f;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.Features2d;
import org.opencv.highgui.HighGui;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
import org.opencv.xfeatures2d.SURF;
class SURFFLANNMatchingHomography {
    public void run(String[] args) {
        String filenameObject = args.length > 1 ? args[0] : "../data/box.png";
        String filenameScene = args.length > 1 ? args[1] : "../data/box_in_scene.png";
        Mat imgObject = Imgcodecs.imread(filenameObject, Imgcodecs.IMREAD_GRAYSCALE);
        Mat imgScene = Imgcodecs.imread(filenameScene, Imgcodecs.IMREAD_GRAYSCALE);
        if (imgObject.empty() || imgScene.empty()) {
            System.err.println("Cannot read images!");
            System.exit(0);
        }
        //-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
        double hessianThreshold = 400;
        int nOctaves = 4, nOctaveLayers = 3;
        boolean extended = false, upright = false;
        SURF detector = SURF.create(hessianThreshold, nOctaves, nOctaveLayers, extended, upright);
        MatOfKeyPoint keypointsObject = new MatOfKeyPoint(), keypointsScene = new MatOfKeyPoint();
        Mat descriptorsObject = new Mat(), descriptorsScene = new Mat();
        detector.detectAndCompute(imgObject, new Mat(), keypointsObject, descriptorsObject);
        detector.detectAndCompute(imgScene, new Mat(), keypointsScene, descriptorsScene);
        //-- Step 2: Matching descriptor vectors with a FLANN based matcher
        // Since SURF is a floating-point descriptor NORM_L2 is used
        DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED);
        List<MatOfDMatch> knnMatches = new ArrayList<>();
        matcher.knnMatch(descriptorsObject, descriptorsScene, knnMatches, 2);
        //-- Filter matches using the Lowe's ratio test
        float ratioThresh = 0.75f;
        List<DMatch> listOfGoodMatches = new ArrayList<>();
        for (int i = 0; i < knnMatches.size(); i++) {
            if (knnMatches.get(i).rows() > 1) {
                DMatch[] matches = knnMatches.get(i).toArray();
                if (matches[0].distance < ratioThresh * matches[1].distance) {
                    listOfGoodMatches.add(matches[0]);
                }
            }
        }
        MatOfDMatch goodMatches = new MatOfDMatch();
        goodMatches.fromList(listOfGoodMatches);
        //-- Draw matches
        Mat imgMatches = new Mat();
        Features2d.drawMatches(imgObject, keypointsObject, imgScene, keypointsScene, goodMatches, imgMatches, Scalar.all(-1),
                Scalar.all(-1), new MatOfByte(), Features2d.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS);
        //-- Localize the object
        List<Point> obj = new ArrayList<>();
        List<Point> scene = new ArrayList<>();
        List<KeyPoint> listOfKeypointsObject = keypointsObject.toList();
        List<KeyPoint> listOfKeypointsScene = keypointsScene.toList();
        for (int i = 0; i < listOfGoodMatches.size(); i++) {
            //-- Get the keypoints from the good matches
            obj.add(listOfKeypointsObject.get(listOfGoodMatches.get(i).queryIdx).pt);
            scene.add(listOfKeypointsScene.get(listOfGoodMatches.get(i).trainIdx).pt);
        }
        MatOfPoint2f objMat = new MatOfPoint2f(), sceneMat = new MatOfPoint2f();
        objMat.fromList(obj);
        sceneMat.fromList(scene);
        double ransacReprojThreshold = 3.0;
        Mat H = Calib3d.findHomography( objMat, sceneMat, Calib3d.RANSAC, ransacReprojThreshold );
        //-- Get the corners from the image_1 ( the object to be "detected" )
        Mat objCorners = new Mat(4, 1, CvType.CV_32FC2), sceneCorners = new Mat();
        float[] objCornersData = new float[(int) (objCorners.total() * objCorners.channels())];
        objCorners.get(0, 0, objCornersData);
        objCornersData[0] = 0;
        objCornersData[1] = 0;
        objCornersData[2] = imgObject.cols();
        objCornersData[3] = 0;
        objCornersData[4] = imgObject.cols();
        objCornersData[5] = imgObject.rows();
        objCornersData[6] = 0;
        objCornersData[7] = imgObject.rows();
        objCorners.put(0, 0, objCornersData);
        Core.perspectiveTransform(objCorners, sceneCorners, H);
        float[] sceneCornersData = new float[(int) (sceneCorners.total() * sceneCorners.channels())];
        sceneCorners.get(0, 0, sceneCornersData);
        //-- Draw lines between the corners (the mapped object in the scene - image_2 )
        Imgproc.line(imgMatches, new Point(sceneCornersData[0] + imgObject.cols(), sceneCornersData[1]),
                new Point(sceneCornersData[2] + imgObject.cols(), sceneCornersData[3]), new Scalar(0, 255, 0), 4);
        Imgproc.line(imgMatches, new Point(sceneCornersData[2] + imgObject.cols(), sceneCornersData[3]),
                new Point(sceneCornersData[4] + imgObject.cols(), sceneCornersData[5]), new Scalar(0, 255, 0), 4);
        Imgproc.line(imgMatches, new Point(sceneCornersData[4] + imgObject.cols(), sceneCornersData[5]),
                new Point(sceneCornersData[6] + imgObject.cols(), sceneCornersData[7]), new Scalar(0, 255, 0), 4);
        Imgproc.line(imgMatches, new Point(sceneCornersData[6] + imgObject.cols(), sceneCornersData[7]),
                new Point(sceneCornersData[0] + imgObject.cols(), sceneCornersData[1]), new Scalar(0, 255, 0), 4);
        //-- Show detected matches
        HighGui.imshow("Good Matches & Object detection", imgMatches);
        HighGui.waitKey(0);
        System.exit(0);
    }
}
public class SURFFLANNMatchingHomographyDemo {
    public static void main(String[] args) {
        // Load the native OpenCV library
        System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
        new SURFFLANNMatchingHomography().run(args);
    }

结果图像:

如果可以选择寻找特定颜色,则无论大小如何,您都可以依靠细分来快速找到候选项。但是你必须添加一些 post-filtering.

这是一个可能的解决方案。代码在Python,但是操作非常简单,希望你能把它移植到Java。我正在使用模板匹配。我想,要点是我正在对从输入图像的 Cyan (C) 分量获得的二进制掩码执行模板匹配。步骤如下:

  1. Trim 你的图像去除不需要的噪音
  2. 将图像转换CMYK颜色space并获得青色通道
  3. 清理青色通道
  4. 读取模板
  5. 将模板转换为二进制图像
  6. 执行模板匹配

让我们看看。模板在目标图像中的位置似乎是不变的,因此我们可以裁剪图像以去除一些我们确定不会找到模板的部分。我通过指定感兴趣区域 (ROI) 的坐标 (top left x, top left y, width, height) 来裁剪图像以消除部分“页眉”和“页脚”,如下所示:

# imports:
import numpy as np
import cv2

# image path
path = "D://opencvImages//"
fileName = "screen.png"

# Reading an image in default mode:
inputImage = cv2.imread(path + fileName)

# Deep copy for results:
inputImageCopy = inputImage.copy()

# Get image dimensions:
(imageHeight, imageWidth) = inputImage.shape[:2]

# Set the ROI location:
roiX = 0
roiY = 225
roiWidth = imageWidth
roiHeight = 1390

# Crop the ROI:
imageROI = inputImage[roiY:roiHeight,roiX:roiWidth]

# Store a deep copy of this image for results:
imageROIcopy = imageROI.copy()

您将得到以下裁剪图像:

您可以裁剪更多,但我不确定您的要求。让我们处理这个并将新图像转换为 CYMK 颜色 space。然后,提取 Cyan 频道,因为模板似乎在该特定频道中包含大部分内容。在 OpenCV 中没有直接转换为 CYMK 颜色 space,所以我直接应用 conversion formula。我们可以从该公式中得到每个颜色 space 分量,但我们只对 C 通道感兴趣,它只需要预先计算 K (Key) 通道。可以这样计算:

# Convert the image to float and divide by 255:
floatImage = imageROI.astype(np.float)/255.

# Calculate channel K (Key):
kChannel = 1 - np.max(floatImage, axis=2)

# Calculate  channel C (Cyan):
cChannel = np.where(kChannel < 0.9, (1-floatImage[..., 2] - kChannel)/(1 - kChannel), 0)

# Convert Cyan channel to uint 8:
cChannel = (255*cChannel).astype(np.uint8)

小心你的数据类型。我们需要对 float 数组进行操作,因此这是我执行的第一个转换。获得 C 通道后,我们将图像转换回 unsigned 8-bit 数组。这是您为 C 频道获得的图像:

接下来,通过 Otsu 的阈值从中得到一个二进制掩码:

# Threshold via Otsu:
_, binaryImage = cv2.threshold(cChannel, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

这是面具:

我们可以通过 flood-filling 用黑色“消除”一些白色区域。让我们对二值图像应用四个flood-filling操作:左上角、右上角、左下角和右下角:

# Get the dimensions of the cropped image:
(imageHeight, imageWidth) = binaryImage.shape[:2]

# Apply flood-fill at seed point (0,0) - Top Left:
cv2.floodFill(binaryImage, mask=None, seedPoint=(0, 0), newVal=0)

# Apply flood-fill at seed point (imageWidth - 1, 0) - Top Right:
cv2.floodFill(binaryImage, mask=None, seedPoint=(imageWidth - 1, 0), newVal=0)

# Apply flood-fill at seed point (0, imageHeight - 1) - Bottom Left:
cv2.floodFill(binaryImage, mask=None, seedPoint=(0, imageHeight - 1), newVal=0)

# Apply flood-fill at seed point (imageWidth - 1, imageHeight - 1) - Bottom Right:
cv2.floodFill(binaryImage, mask=None, seedPoint=(imageWidth - 1, imageHeight - 1), newVal=0)

这是结果。注意我们要找的子图是孤立的,大部分大噪点都没有了:

您可能可以 运行 一个 区域过滤器 来消除较小(和较大)的噪音斑点,但让我们暂时使用这个结果.好了,第一部分就完成了。让我们读取模板并执行模板匹配。现在,您的模板有一个在这里没有用的 alpha 通道。我在 GIMP 中打开了您的图像,并将 alpha 通道替换为纯白色,这是我得到的模板:

让我们阅读,将其转换为灰度并执行 Otsu 的阈值处理以获得二值图像:

# Read template:
template = cv2.imread(path+"colorTemplate.png")

# Convert it to grayscale:
template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)

# Threshold via Otsu:
_, template = cv2.threshold(template, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

这是二进制模板:

现在,您可以在此处实施渐进式缩放机制,按缩放百分比调整此模板的大小,并 运行 以“步长”在目标图像上进行模板匹配,然后在目标图像上寻找最佳匹配结果整个“运行”并将其与最小阈值进行比较。但是让我们按原样测试模板:

# Get template dimensions:
(templateHeight, templateWidth) = template.shape[:2]

# Run Template Matching:
result = cv2.matchTemplate(binaryImage, template, cv2.TM_CCOEFF_NORMED)

# Get Template Matching Results:
(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(result)

# Get Matching Score:
matchScore = maxVal
print("Match Score: "+str(matchScore))

使用此模板,我得到了 matchScore 个:

Match Score: 0.806335985660553

看起来很可以接受。让我们在找到最大匹配分数的位置画一个漂亮的矩形,只是为了可视化结果:

# Set ROI where the largest matching score was found:
matchX = maxLoc[0]
matchY = maxLoc[1]
matchWidth = matchX + templateWidth
matchHeight = matchY + templateHeight

# Draw the ROI on the copy of the cropped BGR image:
cv2.rectangle(imageROIcopy, (matchX, matchY), (matchWidth, matchHeight), (0, 0, 255), 2)
# Show the result:
cv2.imshow("Result (Local)", imageROIcopy)
cv2.waitKey(0)

这是(裁剪后的)结果:

看起来不错。当我们将图像裁剪为 运行 此操作时,让我们在未裁剪的实际图像上找到匹配的 ROI

# Show result on original image:
matchX = roiX + matchX
matchY = roiY + matchY
matchWidth = matchX + templateWidth
matchHeight = matchY + templateHeight

# Draw the ROI on the copy of the cropped BGR image:
cv2.rectangle(inputImage, (matchX, matchY), (matchWidth, matchHeight), (0, 0, 255), 2)

此外,我们可以画一个漂亮的标签,里面有匹配的分数。这是可选的,只是为了在原始图像上绘制所有信息:

# Draw label with match result:
# Round up match score to two significant digits:
matchScore = "{:.2f}".format(matchScore)

# Draw a filled rectangle:
labelOrigin = (matchX-1, matchY - 40)
(labelWidth, labelHeight) = (matchWidth+1, matchY)
cv2.rectangle(inputImage, labelOrigin, (labelWidth, labelHeight), (0, 0, 255), -1)

# Draw the text:
labelOrigin = (matchX-1, matchY - 10)
cv2.putText(inputImage, str(matchScore), labelOrigin, cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0), 2)

cv2.imshow("Result (Global)", inputImage)
cv2.waitKey(0)

这是(全尺寸)结果:


编辑:处理新图像

我注意到您的新图片与原始图片不同。看来您正在从具有不同屏幕分辨率的不同手机捕获屏幕。现在,问题是如果你改变目标图像的大小,模板必须重新缩放,否则模板对于新的匹配来说太小(或太大),结果很差。您 可以 实施我上面提到的重新缩放机制来放大模板,最终您会在某个重新缩放的尺寸下找到不错的结果 - 这是一个现有选项。

other选项是将新图像重新缩放到与原始图像相似的大小。您的原始图片大小为 1125 x 2001,而大小为 1600 x 2560。这是一个重要的区别。让我们 resize 新图像与原始图像具有相同的宽度。代码的开头将修改为:

# image path
path = "D://opencvImages//"
fileName = "newScreen.png"

# Reading an image in default mode:
inputImage = cv2.imread(path + fileName)

# Set the reference width:
referenceWidth = 1125

# Get image dimensions:
(imageHeight, imageWidth) = inputImage.shape[:2]

# Check input width vs reference width:
if imageWidth != referenceWidth:

    # Get original aspect ratio:
    aspectRatio = imageWidth / imageHeight
    # Compute new height using the reference width:
    newHeight = referenceWidth / aspectRatio
    # Set the new dimensions as a tuple:
    dim = (int(referenceWidth), int(newHeight))
    # Resize the image:
    inputImage = cv2.resize(inputImage, dim, interpolation=cv2.INTER_AREA)
    # Get new dimensions for further processing:
    (imageHeight, imageWidth) = inputImage.shape[:2]


# Deep copy for results:
inputImageCopy = inputImage.copy()

# Set the ROI location:
roiX = 0
roiY = 225
roiWidth = imageWidth
roiHeight = 1390

在这里,我将参考宽度设置为 1125 像素,通过 shape 获取输入图像尺寸并检查输入宽度是否与参考不同。如果是这样,我resize按照参考宽度和原始纵横比的图像。其余代码没有修改。新图像的结果将是这样的: