在 iOS 13 中跟踪多个对象
Tracking multiple objects in iOS 13
我的应用程序使用 Apple 的示例代码使用 Vision 跟踪视频的多个对象(在我的例子中,它在举重练习期间跟踪杠铃的路径),但是在更新到 iOS 13 之后视频显示不正确。现在视频已被裁剪,您只能看到其中的一小部分,而不是像过去那样填满屏幕。我已经与 Apple 技术支持谈过了,他们承认了这个错误,但他们的计划中没有修复。
最让我烦恼的是 a) 横向视频可以工作,但纵向视频不能工作,并且 b) 该错误只发生在真实设备中,而不发生在模拟器中。请参阅附件中用于显示视频的代码部分,具体取决于视频的比例(横向或纵向)。
private func scaleImage(to viewSize: CGSize) -> UIImage? {
guard self.image != nil && self.image.size != CGSize.zero else {
return nil
}
self.imageAreaRect = CGRect.zero
// There are two possible cases to fully fit self.image into the the ImageTrackingView area:
// Option 1) image.width = view.width ==> image.height <= view.height
// Option 2) image.height = view.height ==> image.width <= view.width
let imageAspectRatio = self.image.size.width / self.image.size.height
// Check if we're in Option 1) case and initialize self.imageAreaRect accordingly
let imageSizeOption1 = CGSize(width: viewSize.width, height: floor(viewSize.width / imageAspectRatio))
if imageSizeOption1.height <= viewSize.height {
print("Landscape. View size: \(viewSize)")
let imageX: CGFloat = 0
let imageY = floor((viewSize.height - imageSizeOption1.height) / 2.0)
self.imageAreaRect = CGRect(x: imageX,
y: imageY,
width: imageSizeOption1.width,
height: imageSizeOption1.height)
}
if self.imageAreaRect == CGRect.zero {
// Check if we're in Option 2) case if Option 1) didn't work out and initialize imageAreaRect accordingly
print("portrait. View size: \(viewSize)")
let imageSizeOption2 = CGSize(width: floor(viewSize.height * imageAspectRatio), height: viewSize.height)
if imageSizeOption2.width <= viewSize.width {
let imageX = floor((viewSize.width - imageSizeOption2.width) / 2.0)
let imageY: CGFloat = 0
self.imageAreaRect = CGRect(x: imageX,
y: imageY,
width: imageSizeOption2.width,
height: imageSizeOption2.height)
}
}
// In next line, pass 0.0 to use the current device's pixel scaling factor (and thus account for Retina resolution).
// Pass 1.0 to force exact pixel size.
UIGraphicsBeginImageContextWithOptions(self.imageAreaRect.size, false, 0.0)
self.image.draw(in: CGRect(x: 0.0, y: 0.0, width: self.imageAreaRect.size.width, height: self.imageAreaRect.size.height))
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
如有任何帮助,我们将不胜感激。
谢谢
在跟踪 Apple 技术支持几周后,他们提出了一个非常简单的解决方法。基本上,你只需要通过 CGImage 进行转换,而不是直接从 CIImage 到 UIImage。
这是旧代码(如您在 Apple 文档中的当前示例中所见)
if let frame = frame {
let ciImage = CIImage(cvPixelBuffer: frame).transformed(by: transform)
let uiImage = UIImage(ciImage: ciImage)
self.trackingView.image = uiImage
}
这是更正
if let frame = frame {
let ciImage = CIImage(cvPixelBuffer: frame).transformed(by: transform)
guard let cgImage = CIContext.init().createCGImage(ciImage, from: ciImage.extent) else {
return
}
let uiImage = UIImage(cgImage: cgImage)
self.trackingView.image = uiImage
}
希望对您有所帮助!
我的应用程序使用 Apple 的示例代码使用 Vision 跟踪视频的多个对象(在我的例子中,它在举重练习期间跟踪杠铃的路径),但是在更新到 iOS 13 之后视频显示不正确。现在视频已被裁剪,您只能看到其中的一小部分,而不是像过去那样填满屏幕。我已经与 Apple 技术支持谈过了,他们承认了这个错误,但他们的计划中没有修复。
最让我烦恼的是 a) 横向视频可以工作,但纵向视频不能工作,并且 b) 该错误只发生在真实设备中,而不发生在模拟器中。请参阅附件中用于显示视频的代码部分,具体取决于视频的比例(横向或纵向)。
private func scaleImage(to viewSize: CGSize) -> UIImage? {
guard self.image != nil && self.image.size != CGSize.zero else {
return nil
}
self.imageAreaRect = CGRect.zero
// There are two possible cases to fully fit self.image into the the ImageTrackingView area:
// Option 1) image.width = view.width ==> image.height <= view.height
// Option 2) image.height = view.height ==> image.width <= view.width
let imageAspectRatio = self.image.size.width / self.image.size.height
// Check if we're in Option 1) case and initialize self.imageAreaRect accordingly
let imageSizeOption1 = CGSize(width: viewSize.width, height: floor(viewSize.width / imageAspectRatio))
if imageSizeOption1.height <= viewSize.height {
print("Landscape. View size: \(viewSize)")
let imageX: CGFloat = 0
let imageY = floor((viewSize.height - imageSizeOption1.height) / 2.0)
self.imageAreaRect = CGRect(x: imageX,
y: imageY,
width: imageSizeOption1.width,
height: imageSizeOption1.height)
}
if self.imageAreaRect == CGRect.zero {
// Check if we're in Option 2) case if Option 1) didn't work out and initialize imageAreaRect accordingly
print("portrait. View size: \(viewSize)")
let imageSizeOption2 = CGSize(width: floor(viewSize.height * imageAspectRatio), height: viewSize.height)
if imageSizeOption2.width <= viewSize.width {
let imageX = floor((viewSize.width - imageSizeOption2.width) / 2.0)
let imageY: CGFloat = 0
self.imageAreaRect = CGRect(x: imageX,
y: imageY,
width: imageSizeOption2.width,
height: imageSizeOption2.height)
}
}
// In next line, pass 0.0 to use the current device's pixel scaling factor (and thus account for Retina resolution).
// Pass 1.0 to force exact pixel size.
UIGraphicsBeginImageContextWithOptions(self.imageAreaRect.size, false, 0.0)
self.image.draw(in: CGRect(x: 0.0, y: 0.0, width: self.imageAreaRect.size.width, height: self.imageAreaRect.size.height))
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
如有任何帮助,我们将不胜感激。
谢谢
在跟踪 Apple 技术支持几周后,他们提出了一个非常简单的解决方法。基本上,你只需要通过 CGImage 进行转换,而不是直接从 CIImage 到 UIImage。
这是旧代码(如您在 Apple 文档中的当前示例中所见)
if let frame = frame {
let ciImage = CIImage(cvPixelBuffer: frame).transformed(by: transform)
let uiImage = UIImage(ciImage: ciImage)
self.trackingView.image = uiImage
}
这是更正
if let frame = frame {
let ciImage = CIImage(cvPixelBuffer: frame).transformed(by: transform)
guard let cgImage = CIContext.init().createCGImage(ciImage, from: ciImage.extent) else {
return
}
let uiImage = UIImage(cgImage: cgImage)
self.trackingView.image = uiImage
}
希望对您有所帮助!