使用 CIContext 在 GLKView 中绘制时,方面填充 AVCaptureVideoDataOutput
Aspect fill AVCaptureVideoDataOutput when drawing in GLKView with CIContext
我正在 GLKView
中绘制来自 AVCaptureVideoDataOutput
的相机输出,但相机是 4:3,与 [=15= 的纵横比不匹配](全屏)。我正在尝试进行纵横比填充,但相机输出似乎被压扁了,因此它不会超出视图框架的边缘。如何使用 GLKView
获得全屏相机视图而不弄乱纵横比?
正在初始化视图:
videoDisplayView = GLKView(frame: superview.bounds, context: EAGLContext(api: .openGLES2))
videoDisplayView.transform = CGAffineTransform(rotationAngle: CGFloat(M_PI_2))
videoDisplayView.frame = superview.bounds
superview.addSubview(videoDisplayView)
superview.sendSubview(toBack: videoDisplayView)
renderContext = CIContext(eaglContext: videoDisplayView.context)
sessionQueue = DispatchQueue(label: "AVSessionQueue", attributes: [])
videoDisplayView.bindDrawable()
videoDisplayViewBounds = CGRect(x: 0, y: 0, width: videoDisplayView.drawableWidth, height: videoDisplayView.drawableHeight)
正在初始化视频输出:
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: sessionQueue)
if captureSession.canAddOutput(videoOutput) {
captureSession.addOutput(videoOutput)
}
渲染输出:
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
// Need to shimmy this through type-hell
let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
// Force the type change - pass through opaque buffer
let opaqueBuffer = Unmanaged<CVImageBuffer>.passUnretained(imageBuffer!).toOpaque()
let pixelBuffer = Unmanaged<CVPixelBuffer>.fromOpaque(opaqueBuffer).takeUnretainedValue()
let sourceImage = CIImage(cvPixelBuffer: pixelBuffer, options: nil)
// Do some detection on the image
let detectionResult = applyFilter?(sourceImage)
var outputImage = sourceImage
if detectionResult != nil {
outputImage = detectionResult!
}
if videoDisplayView.context != EAGLContext.current() {
EAGLContext.setCurrent(videoDisplayView.context)
}
videoDisplayView.bindDrawable()
// clear eagl view to grey
glClearColor(0.5, 0.5, 0.5, 1.0);
glClear(0x00004000)
// set the blend mode to "source over" so that CI will use that
glEnable(0x0BE2);
glBlendFunc(1, 0x0303);
renderContext.draw(outputImage, in: videoDisplayViewBounds, from: outputImage.extent)
videoDisplayView.display()
}
我尝试过的事情:
// Results in 4:3 stream leaving a gap at the bottom
renderContext.draw(outputImage, in: outputImage.extent, from: outputImage.extent)
// Results in same 4:3 stream
let rect = CGRect(x: 0, y: 0, width: outputImage.extent.width, height: videoDisplayViewBounds.height)
renderContext.draw(outputImage, in: rect, from: outputImage.extent)
实际上我最终不得不将我的输出裁剪为我在其中显示输出的视图的大小。
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
// Need to shimmy this through type-hell
let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
// Force the type change - pass through opaque buffer
let opaqueBuffer = Unmanaged<CVImageBuffer>.passUnretained(imageBuffer!).toOpaque()
let pixelBuffer = Unmanaged<CVPixelBuffer>.fromOpaque(opaqueBuffer).takeUnretainedValue()
let sourceImage = CIImage(cvPixelBuffer: pixelBuffer, options: nil)
// Make a rect to crop to that's the size of the view we want to display the image in
let cropRect = AVMakeRect(aspectRatio: CGSize(width: videoDisplayViewBounds.width, height: videoDisplayViewBounds.height), insideRect: sourceImage.extent)
// Crop
let croppedImage = sourceImage.cropping(to: cropRect)
// Cropping changes the origin coordinates of the cropped image, so move it back to 0
let translatedImage = croppedImage.applying(CGAffineTransform(translationX: 0, y: -croppedImage.extent.origin.y))
// Do some detection on the image
let detectionResult = applyFilter?(translatedImage)
var outputImage = translatedImage
if detectionResult != nil {
outputImage = detectionResult!
}
if videoDisplayView.context != EAGLContext.current() {
EAGLContext.setCurrent(videoDisplayView.context)
}
videoDisplayView.bindDrawable()
// clear eagl view to grey
glClearColor(0.5, 0.5, 0.5, 1.0)
glClear(0x00004000)
// set the blend mode to "source over" so that CI will use that
glEnable(0x0BE2);
glBlendFunc(1, 0x0303)
renderContext.draw(outputImage, in: videoDisplayViewBounds, from: outputImage.extent)
videoDisplayView.display()
}
我正在 GLKView
中绘制来自 AVCaptureVideoDataOutput
的相机输出,但相机是 4:3,与 [=15= 的纵横比不匹配](全屏)。我正在尝试进行纵横比填充,但相机输出似乎被压扁了,因此它不会超出视图框架的边缘。如何使用 GLKView
获得全屏相机视图而不弄乱纵横比?
正在初始化视图:
videoDisplayView = GLKView(frame: superview.bounds, context: EAGLContext(api: .openGLES2))
videoDisplayView.transform = CGAffineTransform(rotationAngle: CGFloat(M_PI_2))
videoDisplayView.frame = superview.bounds
superview.addSubview(videoDisplayView)
superview.sendSubview(toBack: videoDisplayView)
renderContext = CIContext(eaglContext: videoDisplayView.context)
sessionQueue = DispatchQueue(label: "AVSessionQueue", attributes: [])
videoDisplayView.bindDrawable()
videoDisplayViewBounds = CGRect(x: 0, y: 0, width: videoDisplayView.drawableWidth, height: videoDisplayView.drawableHeight)
正在初始化视频输出:
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: sessionQueue)
if captureSession.canAddOutput(videoOutput) {
captureSession.addOutput(videoOutput)
}
渲染输出:
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
// Need to shimmy this through type-hell
let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
// Force the type change - pass through opaque buffer
let opaqueBuffer = Unmanaged<CVImageBuffer>.passUnretained(imageBuffer!).toOpaque()
let pixelBuffer = Unmanaged<CVPixelBuffer>.fromOpaque(opaqueBuffer).takeUnretainedValue()
let sourceImage = CIImage(cvPixelBuffer: pixelBuffer, options: nil)
// Do some detection on the image
let detectionResult = applyFilter?(sourceImage)
var outputImage = sourceImage
if detectionResult != nil {
outputImage = detectionResult!
}
if videoDisplayView.context != EAGLContext.current() {
EAGLContext.setCurrent(videoDisplayView.context)
}
videoDisplayView.bindDrawable()
// clear eagl view to grey
glClearColor(0.5, 0.5, 0.5, 1.0);
glClear(0x00004000)
// set the blend mode to "source over" so that CI will use that
glEnable(0x0BE2);
glBlendFunc(1, 0x0303);
renderContext.draw(outputImage, in: videoDisplayViewBounds, from: outputImage.extent)
videoDisplayView.display()
}
我尝试过的事情:
// Results in 4:3 stream leaving a gap at the bottom
renderContext.draw(outputImage, in: outputImage.extent, from: outputImage.extent)
// Results in same 4:3 stream
let rect = CGRect(x: 0, y: 0, width: outputImage.extent.width, height: videoDisplayViewBounds.height)
renderContext.draw(outputImage, in: rect, from: outputImage.extent)
实际上我最终不得不将我的输出裁剪为我在其中显示输出的视图的大小。
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
// Need to shimmy this through type-hell
let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
// Force the type change - pass through opaque buffer
let opaqueBuffer = Unmanaged<CVImageBuffer>.passUnretained(imageBuffer!).toOpaque()
let pixelBuffer = Unmanaged<CVPixelBuffer>.fromOpaque(opaqueBuffer).takeUnretainedValue()
let sourceImage = CIImage(cvPixelBuffer: pixelBuffer, options: nil)
// Make a rect to crop to that's the size of the view we want to display the image in
let cropRect = AVMakeRect(aspectRatio: CGSize(width: videoDisplayViewBounds.width, height: videoDisplayViewBounds.height), insideRect: sourceImage.extent)
// Crop
let croppedImage = sourceImage.cropping(to: cropRect)
// Cropping changes the origin coordinates of the cropped image, so move it back to 0
let translatedImage = croppedImage.applying(CGAffineTransform(translationX: 0, y: -croppedImage.extent.origin.y))
// Do some detection on the image
let detectionResult = applyFilter?(translatedImage)
var outputImage = translatedImage
if detectionResult != nil {
outputImage = detectionResult!
}
if videoDisplayView.context != EAGLContext.current() {
EAGLContext.setCurrent(videoDisplayView.context)
}
videoDisplayView.bindDrawable()
// clear eagl view to grey
glClearColor(0.5, 0.5, 0.5, 1.0)
glClear(0x00004000)
// set the blend mode to "source over" so that CI will use that
glEnable(0x0BE2);
glBlendFunc(1, 0x0303)
renderContext.draw(outputImage, in: videoDisplayViewBounds, from: outputImage.extent)
videoDisplayView.display()
}