人脸检测 swift 视觉套件

Face detection swift vision kit

我正在尝试 iOS11 的 Vision 套件。我可以使用 Vision 并且可以找到边界框值面。但我不知道如何使用这些点绘制矩形。我希望我的问题很清楚。

希望您能够使用 VNDetectFaceRectanglesRequest 并能够检测人脸。要显示矩形框,有很多方法可以实现。但最简单的方法是使用 CAShapeLayer 为您检测到的每张脸在图像顶部绘制图层。

考虑到您 VNDetectFaceRectanglesRequest 如下所示

let request = VNDetectFaceRectanglesRequest { [unowned self] request, error in
            if let error = error {
                // somthing is not working as expected
            }
            else {
                //  we got some face detected
                self.handleFaces(with: request)
            }
        }
        let handler = VNImageRequestHandler(ciImage: ciImage, options: [:])
        do {
            try handler.perform([request])
        }
        catch {
           // catch exception if any
        }

您可以为检测到的每张脸实现一个名为 handleFace 的简单方法,并使用 VNFaceObservation 属性 绘制 CAShapeLayer

func handleFaces(with request: VNRequest) {
        imageView.layer.sublayers?.forEach { layer in
            layer.removeFromSuperlayer()
        }
        guard let observations = request.results as? [VNFaceObservation] else {
            return
        }
        observations.forEach { observation in
            let boundingBox = observation.boundingBox
            let size = CGSize(width: boundingBox.width * imageView.bounds.width,
                              height: boundingBox.height * imageView.bounds.height)
            let origin = CGPoint(x: boundingBox.minX * imageView.bounds.width,
                                 y: (1 - observation.boundingBox.minY) * imageView.bounds.height - size.height)

            let layer = CAShapeLayer()
            layer.frame = CGRect(origin: origin, size: size)
            layer.borderColor = UIColor.red.cgColor
            layer.borderWidth = 2

            imageView.layer.addSublayer(layer)
        }
    }

可以在 Github 存储库 iOS-11-by-Examples

中找到更多信息

这是绘制方框的简单方法。

let faceRequest = VNDetectFaceRectanglesRequest(completionHandler:self.faceDetection)

func faceDetection (request: VNRequest, error: Error?) {
        guard let observations = request.results as? [VNFaceObservation]
            else { print("unexpected result type from VNFaceObservation")
                return }
        guard observations.first != nil else {
            return
        }
        // Show the pre-processed image
        DispatchQueue.main.async {
            self.resultImageView.subviews.forEach({ (subview) in
                subview.removeFromSuperview()
            })
            for face in observations
            {
                let view = self.CreateBoxView(withColor: UIColor.red)
                view.frame = self.transformRect(fromRect: face.boundingBox, toViewRect: self.analyzedImageView)
                self.analyzedImageView.image = self.originalImageView.image
                self.resultImageView.addSubview(view)                
        }
    }
}

 //MARK - Instance Methods
func boxView(withColor : UIColor) -> UIView {
    let view = UIView()
    view.layer.borderColor = withColor.cgColor
    view.layer.borderWidth = 2.0
    view.backgroundColor = UIColor.clear
    return view
}


//Convert Vision Frame to UIKit Frame
func transformRect(fromRect: CGRect , toViewRect :UIView) -> CGRect {

    var toRect = CGRect()
    toRect.size.width = fromRect.size.width * toViewRect.frame.size.width
    toRect.size.height = fromRect.size.height * toViewRect.frame.size.height
    toRect.origin.y =  (toViewRect.frame.height) - (toViewRect.frame.height * fromRect.origin.y )
    toRect.origin.y  = toRect.origin.y -  toRect.size.height
    toRect.origin.x =  fromRect.origin.x * toViewRect.frame.size.width

    return toRect
}