Swift4:如何用ios11视觉框架从人脸界标点创建人脸图
Swift 4: How to create a face map with ios11 vision framework from face landmark points
我正在使用 ios 11 视觉框架来实时生成面部标志点。我能够获取面部标志点并使用面部标志点的 UIBezierPath 覆盖相机层。但是,我想得到类似右下角图片的东西。目前我有一些看起来像左图的东西,我尝试循环遍历这些点并添加中点,但我不知道如何从这些点生成所有这些三角形。我将如何从左侧的点生成右侧的地图?
我不确定我是否可以使用我所有的点,并不是说它会有太大帮助,但我也有来自整个面部边界框的点。最后,有没有什么框架可以让我识别所有我需要的点,比如openCV或者其他的,请告诉我。谢谢!
这是我一直在使用的代码 https://github.com/DroidsOnRoids/VisionFaceDetection:
func detectLandmarks(on image: CIImage) {
try? faceLandmarksDetectionRequest.perform([faceLandmarks], on: image)
if let landmarksResults = faceLandmarks.results as? [VNFaceObservation] {
for observation in landmarksResults {
DispatchQueue.main.async {
if let boundingBox = self.faceLandmarks.inputFaceObservations?.first?.boundingBox {
let faceBoundingBox = boundingBox.scaled(to: self.view.bounds.size)
//different types of landmarks
let faceContour = observation.landmarks?.faceContour
self.convertPointsForFace(faceContour, faceBoundingBox)
let leftEye = observation.landmarks?.leftEye
self.convertPointsForFace(leftEye, faceBoundingBox)
let rightEye = observation.landmarks?.rightEye
self.convertPointsForFace(rightEye, faceBoundingBox)
let leftPupil = observation.landmarks?.leftPupil
self.convertPointsForFace(leftPupil, faceBoundingBox)
let rightPupil = observation.landmarks?.rightPupil
self.convertPointsForFace(rightPupil, faceBoundingBox)
let nose = observation.landmarks?.nose
self.convertPointsForFace(nose, faceBoundingBox)
let lips = observation.landmarks?.innerLips
self.convertPointsForFace(lips, faceBoundingBox)
let leftEyebrow = observation.landmarks?.leftEyebrow
self.convertPointsForFace(leftEyebrow, faceBoundingBox)
let rightEyebrow = observation.landmarks?.rightEyebrow
self.convertPointsForFace(rightEyebrow, faceBoundingBox)
let noseCrest = observation.landmarks?.noseCrest
self.convertPointsForFace(noseCrest, faceBoundingBox)
let outerLips = observation.landmarks?.outerLips
self.convertPointsForFace(outerLips, faceBoundingBox)
}
}
}
}
}
func convertPointsForFace(_ landmark: VNFaceLandmarkRegion2D?, _ boundingBox: CGRect) {
if let points = landmark?.points, let count = landmark?.pointCount {
let convertedPoints = convert(points, with: count)
let faceLandmarkPoints = convertedPoints.map { (point: (x: CGFloat, y: CGFloat)) -> (x: CGFloat, y: CGFloat) in
let pointX = point.x * boundingBox.width + boundingBox.origin.x
let pointY = point.y * boundingBox.height + boundingBox.origin.y
return (x: pointX, y: pointY)
}
DispatchQueue.main.async {
self.draw(points: faceLandmarkPoints)
}
}
}
func draw(points: [(x: CGFloat, y: CGFloat)]) {
let newLayer = CAShapeLayer()
newLayer.strokeColor = UIColor.blue.cgColor
newLayer.lineWidth = 4.0
let path = UIBezierPath()
path.move(to: CGPoint(x: points[0].x, y: points[0].y))
for i in 0..<points.count - 1 {
let point = CGPoint(x: points[i].x, y: points[i].y)
path.addLine(to: point)
path.move(to: point)
}
path.addLine(to: CGPoint(x: points[0].x, y: points[0].y))
newLayer.path = path.cgPath
shapeLayer.addSublayer(newLayer)
}
右图中您想要的是 Candide 网格。您需要将这些点映射到网格,仅此而已。我认为您不需要走评论中讨论的路线。
P.S 我在浏览一个著名的过滤器应用程序的 APK 内容时发现了 Candide(让我想起了 casper)——还没有时间亲自尝试。
我最终找到了一个可行的解决方案。我通过 https://github.com/AlexLittlejohn/DelaunaySwift 使用 delaunay 三角剖分,并修改它以处理通过视觉框架的人脸界标检测请求生成的点。这不容易用代码片段来解释,所以我在下面链接了我的 github 存储库,它显示了我的解决方案。请注意,这不会从前额获取点数,因为视觉框架只会从眉毛以下获取点数。
我正在使用 ios 11 视觉框架来实时生成面部标志点。我能够获取面部标志点并使用面部标志点的 UIBezierPath 覆盖相机层。但是,我想得到类似右下角图片的东西。目前我有一些看起来像左图的东西,我尝试循环遍历这些点并添加中点,但我不知道如何从这些点生成所有这些三角形。我将如何从左侧的点生成右侧的地图?
我不确定我是否可以使用我所有的点,并不是说它会有太大帮助,但我也有来自整个面部边界框的点。最后,有没有什么框架可以让我识别所有我需要的点,比如openCV或者其他的,请告诉我。谢谢!
这是我一直在使用的代码 https://github.com/DroidsOnRoids/VisionFaceDetection:
func detectLandmarks(on image: CIImage) {
try? faceLandmarksDetectionRequest.perform([faceLandmarks], on: image)
if let landmarksResults = faceLandmarks.results as? [VNFaceObservation] {
for observation in landmarksResults {
DispatchQueue.main.async {
if let boundingBox = self.faceLandmarks.inputFaceObservations?.first?.boundingBox {
let faceBoundingBox = boundingBox.scaled(to: self.view.bounds.size)
//different types of landmarks
let faceContour = observation.landmarks?.faceContour
self.convertPointsForFace(faceContour, faceBoundingBox)
let leftEye = observation.landmarks?.leftEye
self.convertPointsForFace(leftEye, faceBoundingBox)
let rightEye = observation.landmarks?.rightEye
self.convertPointsForFace(rightEye, faceBoundingBox)
let leftPupil = observation.landmarks?.leftPupil
self.convertPointsForFace(leftPupil, faceBoundingBox)
let rightPupil = observation.landmarks?.rightPupil
self.convertPointsForFace(rightPupil, faceBoundingBox)
let nose = observation.landmarks?.nose
self.convertPointsForFace(nose, faceBoundingBox)
let lips = observation.landmarks?.innerLips
self.convertPointsForFace(lips, faceBoundingBox)
let leftEyebrow = observation.landmarks?.leftEyebrow
self.convertPointsForFace(leftEyebrow, faceBoundingBox)
let rightEyebrow = observation.landmarks?.rightEyebrow
self.convertPointsForFace(rightEyebrow, faceBoundingBox)
let noseCrest = observation.landmarks?.noseCrest
self.convertPointsForFace(noseCrest, faceBoundingBox)
let outerLips = observation.landmarks?.outerLips
self.convertPointsForFace(outerLips, faceBoundingBox)
}
}
}
}
}
func convertPointsForFace(_ landmark: VNFaceLandmarkRegion2D?, _ boundingBox: CGRect) {
if let points = landmark?.points, let count = landmark?.pointCount {
let convertedPoints = convert(points, with: count)
let faceLandmarkPoints = convertedPoints.map { (point: (x: CGFloat, y: CGFloat)) -> (x: CGFloat, y: CGFloat) in
let pointX = point.x * boundingBox.width + boundingBox.origin.x
let pointY = point.y * boundingBox.height + boundingBox.origin.y
return (x: pointX, y: pointY)
}
DispatchQueue.main.async {
self.draw(points: faceLandmarkPoints)
}
}
}
func draw(points: [(x: CGFloat, y: CGFloat)]) {
let newLayer = CAShapeLayer()
newLayer.strokeColor = UIColor.blue.cgColor
newLayer.lineWidth = 4.0
let path = UIBezierPath()
path.move(to: CGPoint(x: points[0].x, y: points[0].y))
for i in 0..<points.count - 1 {
let point = CGPoint(x: points[i].x, y: points[i].y)
path.addLine(to: point)
path.move(to: point)
}
path.addLine(to: CGPoint(x: points[0].x, y: points[0].y))
newLayer.path = path.cgPath
shapeLayer.addSublayer(newLayer)
}
右图中您想要的是 Candide 网格。您需要将这些点映射到网格,仅此而已。我认为您不需要走评论中讨论的路线。
P.S 我在浏览一个著名的过滤器应用程序的 APK 内容时发现了 Candide(让我想起了 casper)——还没有时间亲自尝试。
我最终找到了一个可行的解决方案。我通过 https://github.com/AlexLittlejohn/DelaunaySwift 使用 delaunay 三角剖分,并修改它以处理通过视觉框架的人脸界标检测请求生成的点。这不容易用代码片段来解释,所以我在下面链接了我的 github 存储库,它显示了我的解决方案。请注意,这不会从前额获取点数,因为视觉框架只会从眉毛以下获取点数。