用 CIDetector 和 CIFaceFeature 的人脸切割圆形图像

Cut rounded image with the face from CIDetector and CIFaceFeature

如何切割我收到的作为 faceViewBounds 的框架以在脸部周围形成一个大圆圈?好像是人脸徽章。

也许我应该得到 faceViewBounds 的中心,然后我必须在 theImageView.image 中找到这个中心并画一个大直径的圆,然后通过逻辑将其余部分切割到圆外,但是我没有代码不知道怎么做..有什么建议吗?

func detectFaceFrom(ImageView theImageView: UIImageView) {

    guard let personImage = CIImage(image: theImageView.image!) else {
        return
    }

    let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyLow]
    let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
    let faces = faceDetector?.features(in: personImage)

    let ciImageSize = personImage.extent.size
    var transform = CGAffineTransform(scaleX: 1, y: -1)
    transform = transform.translatedBy(x: 0, y: -ciImageSize.height)
    if(faces?.count==1){
        for face in faces as! [CIFaceFeature] {
            var faceViewBounds = face.bounds.applying(transform)

            let viewSize = theImageView.bounds.size
            let scale = min(viewSize.width / ciImageSize.width,
                            viewSize.height / ciImageSize.height)
            let offsetX = (viewSize.width - ciImageSize.width * scale) / 2
            let offsetY = (viewSize.height - ciImageSize.height * scale) / 2

            faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale))
            faceViewBounds.origin.x += offsetX
            faceViewBounds.origin.y += offsetY

            let faceBox = UIView(frame: faceViewBounds)
            faceBox.layer.borderWidth = 3
            faceBox.layer.borderColor = UIColor.green.cgColor
            faceBox.backgroundColor = UIColor.clear

            drawCircleFromCenter(faceViewBounds.center ???

        }
        return cuttedCircleWithFace
    }else{
        return theImageView.image
    }
}

我刚刚在 Facebook 上看到一则广告,内容与我想要完成的完全相同:

如果您只想聚焦图像内部的人脸。您应该首先设置一个图像视图并将其遮罩成一个圆圈:

let image = UIImage(named: "face.jpg")
let imageView = UIImageView(frame: CGRect(x: 0, y: 0, width: 50.0, height: 50.0))
imageView.image = image
imageView.contentMode = .scaleAspectFill
imageView.layer.cornerRadius = imageView.bounds.height * 0.5
imageView.layer.masksToBounds = true

接下来你运行 CIDetector

func focusOnFace(in imageView: UIImageView)
{

    guard let image = imageView.image,
          var personImage = CIImage(image: image) else { return }

    let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyLow]
    let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
    // This will just take the first detected face but you can do something more sophisticated
    guard let face = faceDetector?.features(in: personImage).first as? CIFaceFeature else { return }

    // Make the facial rect a square so it will mask nicely to a circle (may not be strictly necessary as `CIFaceFeature` bounds is typically a square)
    var rect = face.bounds
    rect.size.height = max(face.bounds.height, face.bounds.width)
    rect.size.width = max(face.bounds.height, face.bounds.width)
    rect = rect.insetBy(dx: -30, dy: -30) // Adds padding around the face so it's not so tightly cropped

    // Crop to the face detected
    personImage = personImage.cropping(to: rect)

    // Set the new cropped image as the image view image
    imageView.image = UIImage(ciImage: personImage)
}

例子

宁运行之前focusOnFace:

宁运行后focusOnFace:

更新示例

宁运行之前focusOnFace:

宁运行后focusOnFace:

问题是您应该使用 image.size 而不是 theImageView.bounds.size。您还应该处理功能选项 CIDetectorImageOrientation.

extension UIImage{
    var faces: [UIImage] {
        guard let ciimage = CIImage(image: self) else { return [] }
        var orientation: NSNumber {
            switch imageOrientation {
            case .up:            return 1
            case .upMirrored:    return 2
            case .down:          return 3
            case .downMirrored:  return 4
            case .leftMirrored:  return 5
            case .right:         return 6
            case .rightMirrored: return 7
            case .left:          return 8
            }
        }
        return CIDetector(ofType: CIDetectorTypeFace, context: nil, options: [CIDetectorAccuracy: CIDetectorAccuracyLow])?
            .features(in: ciimage, options: [CIDetectorImageOrientation: orientation])
            .compactMap {
                let rect = [=10=].bounds.insetBy(dx: -10, dy: -10)
                UIGraphicsBeginImageContextWithOptions(rect.size, false, scale)
                defer { UIGraphicsEndImageContext() }
                UIImage(ciImage: ciimage.cropped(to: rect)).draw(in: CGRect(origin: .zero, size: rect.size))
                guard let face = UIGraphicsGetImageFromCurrentImageContext() else { return nil }
                // now that you have your face image you need to properly apply a circle mask to it
                let size = face.size
                let breadth = min(size.width, size.height)
                let breadthSize = CGSize(width: breadth, height: breadth)
                UIGraphicsBeginImageContextWithOptions(breadthSize, false, scale)
                defer { UIGraphicsEndImageContext() }
                guard let cgImage = face.cgImage?.cropping(to: CGRect(origin: CGPoint(x: size.width > size.height ? (size.width-size.height).rounded(.down)/2 : 0, y: size.height > size.width ? (size.height-size.width).rounded(.down)/2 : 0), size: breadthSize))
                    else { return nil }
                let faceRect = CGRect(origin: .zero, size: CGSize(width: min(size.width, size.height), height: min(size.width, size.height)))
                UIBezierPath(ovalIn: faceRect).addClip()
                UIImage(cgImage: cgImage).draw(in: faceRect)
                return UIGraphicsGetImageFromCurrentImageContext()
            } ?? []
    }
}

let profilePicture = UIImage(data: try! Data(contentsOf: URL(string:"http://i.stack.imgur.com/Xs4RX.jpg")!))!
if let face =  profilePicture.faces.first {
    print(face.size)
}