如何克服 IOS 中实时摄像机视图的缓慢问题
How to overcome slowness of live camera view in IOS
我正在尝试开发一个图像分割应用程序并在我的 coreml 模型中处理实时相机视图。但是我看到输出有些缓慢。带有掩码预测的相机视图速度较慢。下面是我的视觉管理器 class 预测像素缓冲区和调用此 class 的函数以在继续进行相机输出之前转换为颜色。以前有人遇到过这个问题吗?您是否发现我的代码中有导致运行缓慢的错误?
视觉管理器Class:
class VisionManager: NSObject {
static let shared = VisionManager()
static let MODEL = ba_224_segm().model
private lazy var predictionRequest: VNCoreMLRequest = {
do{
let model = try VNCoreMLModel(for: VisionManager.MODEL)
let request = VNCoreMLRequest(model: model)
request.imageCropAndScaleOption = VNImageCropAndScaleOption.centerCrop
return request
} catch {
fatalError("can't load Vision ML Model")
}
}()
func predict(pixelBuffer: CVImageBuffer, sampleBuffer: CMSampleBuffer, onResult: ((_ observations: [VNCoreMLFeatureValueObservation]) -> Void)) {
var requestOptions: [VNImageOption: Any] = [:]
if let cameraIntrinsicData = CMGetAttachment(sampleBuffer, key: kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix, attachmentModeOut: nil) {
requestOptions = [.cameraIntrinsics: cameraIntrinsicData]
}
let handler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: requestOptions)
do {
try handler.perform([predictionRequest])
} catch {
print("error handler")
}
guard let observations = predictionRequest.results as? [VNCoreMLFeatureValueObservation] else {
fatalError("unexpected result type from VNCoreMLRequest")
}
onResult(observations)
}
预测相机输出函数:
func handleCameraOutput(pixelBuffer: CVImageBuffer, sampleBuffer: CMSampleBuffer, onFinish: @escaping ((_ image: UIImage?) -> Void)) {
VisionManager.shared.predict(pixelBuffer: pixelBuffer, sampleBuffer: sampleBuffer) { [weak self ] (observations) in
if let multiArray: MLMultiArray = observations[0].featureValue.multiArrayValue {
mask = maskEdit.maskToRGBA(maskArray: MultiArray<Float32>(multiArray), rgba: (Float(r),Float(g),Float(b),Float(a)))!
maskInverted = maskEdit.maskToRGBAInvert(maskArray: MultiArray<Float32>(multiArray), rgba: (r: 1.0, g: 1.0, b:1.0, a: 0.4))!
let image = maskEdit.mergeMaskAndBackground( invertedMask: maskInverted, mask: mask, background: pixelBuffer, size: Int(size))
DispatchQueue.main.async {
onFinish(image)
}
}
}
我在 viwDidAppear 下调用这些模型如下:
CameraManager.shared.setDidOutputHandler { [weak self] (output, pixelBuffer, sampleBuffer, connection) in
self!.maskColor.getRed(&self!.r, green:&self!.g, blue:&self!.b, alpha:&self!.a)
self!.a = 0.5
self?.handleCameraOutput(pixelBuffer: pixelBuffer, sampleBuffer: sampleBuffer, onFinish: { (image) in
self?.predictionView.image = image
})
}
您的模型执行分割需要时间,然后将输出转换为图像也需要时间。除了使模型更小并确保输出 -> 图像转换代码尽可能快之外,您可以做很多事情来缩短此延迟。
我发现了关于不使用不同线程的问题。由于我是新开发人员,所以我不知道这些细节,并且仍在学习,感谢该领域的专家和他们共享的知识。请查看我的新旧 captureOutput 函数。使用不同的线程解决了我的问题:
旧状态:
public func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
else { return }
self.handler?(output, pixelBuffer, sampleBuffer, connection)
self.onCapture?(pixelBuffer, sampleBuffer)
self.onCapture = nil
}
和新状态:
public func captureOutput(_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
if currentBuffer == nil{
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
currentBuffer = pixelBuffer
DispatchQueue.global(qos: .userInitiated).async {
self.handler?(output, self.currentBuffer!, sampleBuffer, connection)
self.currentBuffer = nil
}
}
}
我正在尝试开发一个图像分割应用程序并在我的 coreml 模型中处理实时相机视图。但是我看到输出有些缓慢。带有掩码预测的相机视图速度较慢。下面是我的视觉管理器 class 预测像素缓冲区和调用此 class 的函数以在继续进行相机输出之前转换为颜色。以前有人遇到过这个问题吗?您是否发现我的代码中有导致运行缓慢的错误?
视觉管理器Class:
class VisionManager: NSObject {
static let shared = VisionManager()
static let MODEL = ba_224_segm().model
private lazy var predictionRequest: VNCoreMLRequest = {
do{
let model = try VNCoreMLModel(for: VisionManager.MODEL)
let request = VNCoreMLRequest(model: model)
request.imageCropAndScaleOption = VNImageCropAndScaleOption.centerCrop
return request
} catch {
fatalError("can't load Vision ML Model")
}
}()
func predict(pixelBuffer: CVImageBuffer, sampleBuffer: CMSampleBuffer, onResult: ((_ observations: [VNCoreMLFeatureValueObservation]) -> Void)) {
var requestOptions: [VNImageOption: Any] = [:]
if let cameraIntrinsicData = CMGetAttachment(sampleBuffer, key: kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix, attachmentModeOut: nil) {
requestOptions = [.cameraIntrinsics: cameraIntrinsicData]
}
let handler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: requestOptions)
do {
try handler.perform([predictionRequest])
} catch {
print("error handler")
}
guard let observations = predictionRequest.results as? [VNCoreMLFeatureValueObservation] else {
fatalError("unexpected result type from VNCoreMLRequest")
}
onResult(observations)
}
预测相机输出函数:
func handleCameraOutput(pixelBuffer: CVImageBuffer, sampleBuffer: CMSampleBuffer, onFinish: @escaping ((_ image: UIImage?) -> Void)) {
VisionManager.shared.predict(pixelBuffer: pixelBuffer, sampleBuffer: sampleBuffer) { [weak self ] (observations) in
if let multiArray: MLMultiArray = observations[0].featureValue.multiArrayValue {
mask = maskEdit.maskToRGBA(maskArray: MultiArray<Float32>(multiArray), rgba: (Float(r),Float(g),Float(b),Float(a)))!
maskInverted = maskEdit.maskToRGBAInvert(maskArray: MultiArray<Float32>(multiArray), rgba: (r: 1.0, g: 1.0, b:1.0, a: 0.4))!
let image = maskEdit.mergeMaskAndBackground( invertedMask: maskInverted, mask: mask, background: pixelBuffer, size: Int(size))
DispatchQueue.main.async {
onFinish(image)
}
}
}
我在 viwDidAppear 下调用这些模型如下:
CameraManager.shared.setDidOutputHandler { [weak self] (output, pixelBuffer, sampleBuffer, connection) in
self!.maskColor.getRed(&self!.r, green:&self!.g, blue:&self!.b, alpha:&self!.a)
self!.a = 0.5
self?.handleCameraOutput(pixelBuffer: pixelBuffer, sampleBuffer: sampleBuffer, onFinish: { (image) in
self?.predictionView.image = image
})
}
您的模型执行分割需要时间,然后将输出转换为图像也需要时间。除了使模型更小并确保输出 -> 图像转换代码尽可能快之外,您可以做很多事情来缩短此延迟。
我发现了关于不使用不同线程的问题。由于我是新开发人员,所以我不知道这些细节,并且仍在学习,感谢该领域的专家和他们共享的知识。请查看我的新旧 captureOutput 函数。使用不同的线程解决了我的问题:
旧状态:
public func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
else { return }
self.handler?(output, pixelBuffer, sampleBuffer, connection)
self.onCapture?(pixelBuffer, sampleBuffer)
self.onCapture = nil
}
和新状态:
public func captureOutput(_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
if currentBuffer == nil{
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
currentBuffer = pixelBuffer
DispatchQueue.global(qos: .userInitiated).async {
self.handler?(output, self.currentBuffer!, sampleBuffer, connection)
self.currentBuffer = nil
}
}
}