AVCaptureConnection.previewLayer 在委托 AVCaptureMetadataOutputObjectsDelegate 中为 nil
AVCaptureConnection.previewLayer is nil in delegate AVCaptureMetadataOutputObjectsDelegate
下面的代码可以 运行 在 iPhone 上,当针对任何 QR 码时,应该打印内容。下面二维码指向example.com.
问题是代理应该提供 connection: AVCaptureConnection
而它确实提供了,但是它的 previewLayer
属性 是 nil
.
可以将以下代码粘贴到一个新的空 Xcode 项目中。如果您禁用(注释掉)第 57 行,并启用第 56 行,它工作正常。但我想将委托放在 CaptureView
class 之外。我如何设置捕获使得 AVCaptureMetadataOutputObjectsDelegate
它的 previewLayer
属性 不是 nil
?
import UIKit
import AVFoundation
func printMetadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], previewLayer: AVCaptureVideoPreviewLayer) {
for object in metadataObjects {
let visualCodeObject = previewLayer.transformedMetadataObject(for: object)
guard let object = visualCodeObject, let barcode = object as? AVMetadataMachineReadableCodeObject else {
NSLog("Ignoring object that is not AVMetadataMachineReadableCodeObject")
continue
}
guard let barcodeString = barcode.stringValue else {
NSLog("Captured something that's not a string")
continue
}
NSLog("Captured string %@", barcodeString)
}
}
class CaptureView: UIView, AVCaptureMetadataOutputObjectsDelegate {
private let previewLayer = AVCaptureVideoPreviewLayer()
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: self.previewLayer)
}
override func layoutSubviews() {
super.layoutSubviews()
self.previewLayer.frame = self.frame
}
init(frame: CGRect, delegate: AVCaptureMetadataOutputObjectsDelegate) {
guard let captureDevice = AVCaptureDevice.default(for: .video) else {
fatalError("Couldn't find default capture device")
}
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else {
super.init(frame: frame)
return
}
let captureSession = AVCaptureSession()
captureSession.addInput(captureDeviceInput)
self.previewLayer.session = captureSession
self.previewLayer.videoGravity = .resizeAspectFill
super.init(frame: frame)
self.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.frame
captureSession.startRunning()
let metadataOutput = AVCaptureMetadataOutput()
// metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.setMetadataObjectsDelegate(delegate, queue: DispatchQueue.main)
metadataOutput.rectOfInterest = CGRect(x: 0, y: 0, width: 1, height: 1)
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
} else {
fatalError("Can't add metadata output to capture session")
}
metadataOutput.metadataObjectTypes = [.qr]
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
class MetadataDelegate: NSObject, AVCaptureMetadataOutputObjectsDelegate {
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
guard let previewLayer = connection.videoPreviewLayer else {
print("previewLayer was nil")
return
}
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: previewLayer)
}
}
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
private let metadataDelegate = MetadataDelegate()
override func viewDidLoad() {
let captureView = CaptureView(frame: CGRect(), delegate: self.metadataDelegate)
captureView.frame = self.view.frame
captureView.autoresizingMask = [.flexibleHeight, .flexibleWidth]
self.view.addSubview(captureView)
}
}
我发现了这个错误。
事实上,即使当你启用这条线时:
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
在CaptureView
的对应metadataOutput(_, didOutput:, from:,)
中,connection.videoPreviewLayer
仍然是nil。正如 开发人员文档 所说:
This property is the set if you initialized the connection using init(inputPort:videoPreviewLayer:) or connectionWithInputPort:videoPreviewLayer:.
所以,在这两种情况下,connection.videoPreviewLayer
都将为零。
我已经稍微更新了您的代码以使其按您想要的方式工作。
import UIKit
import AVFoundation
func printMetadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], previewLayer: AVCaptureVideoPreviewLayer) {
for object in metadataObjects {
let visualCodeObject = previewLayer.transformedMetadataObject(for: object)
guard let object = visualCodeObject, let barcode = object as? AVMetadataMachineReadableCodeObject else {
NSLog("Ignoring object that is not AVMetadataMachineReadableCodeObject")
continue
}
guard let barcodeString = barcode.stringValue else {
NSLog("Captured something that's not a string")
continue
}
NSLog("Captured string %@", barcodeString)
}
}
class CaptureView: UIView, AVCaptureMetadataOutputObjectsDelegate {
let previewLayer = AVCaptureVideoPreviewLayer()
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
if connection.videoPreviewLayer == nil {
print("connection.videoPreviewLayer was nil")
}
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: self.previewLayer)
}
override func layoutSubviews() {
super.layoutSubviews()
self.previewLayer.frame = self.frame
}
init(frame: CGRect, delegate: AVCaptureMetadataOutputObjectsDelegate) {
guard let captureDevice = AVCaptureDevice.default(for: .video) else {
fatalError("Couldn't find default capture device")
}
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else {
super.init(frame: frame)
return
}
let captureSession = AVCaptureSession()
captureSession.addInput(captureDeviceInput)
self.previewLayer.session = captureSession
self.previewLayer.videoGravity = .resizeAspectFill
super.init(frame: frame)
self.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.frame
captureSession.startRunning()
let metadataOutput = AVCaptureMetadataOutput()
// metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.setMetadataObjectsDelegate(delegate, queue: DispatchQueue.main)
metadataOutput.rectOfInterest = CGRect(x: 0, y: 0, width: 1, height: 1)
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
} else {
fatalError("Can't add metadata output to capture session")
}
metadataOutput.metadataObjectTypes = [.qr]
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
class MetadataDelegate: NSObject, AVCaptureMetadataOutputObjectsDelegate {
var previewLayer: AVCaptureVideoPreviewLayer?
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
guard let previewLayer = previewLayer else {
print("previewLayer was nil")
return
}
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: previewLayer)
}
}
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
private let metadataDelegate = MetadataDelegate()
override func viewDidLoad() {
let captureView = CaptureView(frame: CGRect(), delegate: self.metadataDelegate)
metadataDelegate.previewLayer = captureView.previewLayer
captureView.frame = self.view.frame
captureView.autoresizingMask = [.flexibleHeight, .flexibleWidth]
self.view.addSubview(captureView)
}
}
正如 videoPreviewLayer 文档所述:
This property is the set if you initialized the connection using
init(inputPort:videoPreviewLayer:)
or
connectionWithInputPort:videoPreviewLayer:
.
因此,为了在 videoPreviewLayer
属性 中获取值,您必须手动设置 AVCaptureConnection
对象。
相反,我建议将 AVCaptureMetadataOutputObjectsDelegate
隐藏在您可以声明的自定义协议后面:
protocol CaptureViewMetadataOutputObjectsDelegate {
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection, previewLayer: AVCaptureVideoPreviewLayer)
}
然后,在您的 CaptureView
中实施 AVCaptureMetadataOutputObjectsDelegate
协议,并调用您的协议的函数传递所需的 AVCaptureVideoPreviewLayer
。您的代码将是这样的:
class CaptureView: UIView, AVCaptureMetadataOutputObjectsDelegate {
private let previewLayer = AVCaptureVideoPreviewLayer()
private let delegate: CaptureViewMetadataOutputObjectsDelegate
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
delegate.metadataOutput(output, didOutput: metadataObjects, from: connection, previewLayer: previewLayer)
// printMetadataOutput(output, didOutput: metadataObjects, previewLayer: self.previewLayer)
}
override func layoutSubviews() {
super.layoutSubviews()
self.previewLayer.frame = self.frame
}
init(frame: CGRect, delegate: CaptureViewMetadataOutputObjectsDelegate) {
self.delegate = delegate
guard let captureDevice = AVCaptureDevice.default(for: .video) else {
fatalError("Couldn't find default capture device")
}
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else {
super.init(frame: frame)
return
}
let captureSession = AVCaptureSession()
captureSession.addInput(captureDeviceInput)
self.previewLayer.session = captureSession
self.previewLayer.videoGravity = .resizeAspectFill
super.init(frame: frame)
self.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.frame
captureSession.startRunning()
let metadataOutput = AVCaptureMetadataOutput()
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
// metadataOutput.setMetadataObjectsDelegate(delegate, queue: DispatchQueue.main)
metadataOutput.rectOfInterest = CGRect(x: 0, y: 0, width: 1, height: 1)
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
} else {
fatalError("Can't add metadata output to capture session")
}
metadataOutput.metadataObjectTypes = [.qr]
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
class MetadataDelegate: CaptureViewMetadataOutputObjectsDelegate {
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection, previewLayer: AVCaptureVideoPreviewLayer) {
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: previewLayer)
}
}
class ViewController: UIViewController {
private let metadataDelegate = MetadataDelegate()
override func viewDidLoad() {
let captureView = CaptureView(frame: CGRect(), delegate: self.metadataDelegate)
captureView.frame = self.view.frame
captureView.autoresizingMask = [.flexibleHeight, .flexibleWidth]
self.view.addSubview(captureView)
}
}
更新:经过一番研究,我确实在init(inputPort:videoPreviewLayer:)函数文档中找到了这条语句:
When using addInput(_:):
or addOutput(_:)
, connections are
automatically formed between all compatible inputs and outputs. You do
not need to manually create and add connections to the session unless
you use the primitive addInputWithNoConnections(_:)
and
addOutputWithNoConnections(_:)
methods.
Tha 意味着当您将设备相机添加为输入并将 AVCaptureMetadataOutput
添加为输出时,所有兼容的 AVCaptureConnection
都会自动创建。
我尝试使用以下代码创建另一个 AVCaptureConnection
:
if let port = captureDeviceInput.ports.first(where: { [=12=].mediaType == .video }) {
let con = AVCaptureConnection(inputPort: port, videoPreviewLayer: self.previewLayer)
if captureSession.canAddConnection(con) {
captureSession.addConnection(con)
}
}
但是 canAddConnection(_:)
函数总是 returns false
.
之后我打印了 AVCaptureSession
的 connections
数组,我看到了以下内容:
(lldb) po captureSession.connections
[<AVCaptureConnection: 0x280d67980 (AVCaptureDeviceInput: 0x280d119a0 Back Camera) -> (AVCaptureVideoPreviewLayer: 0x280d6ba40) [type:vide][enabled:1][active:1]>, <AVCaptureConnection: 0x280d7bee0 (AVCaptureDeviceInput: 0x280d119a0 Back Camera) -> (AVCaptureMetadataOutput: 0x280d700e0) [type:mobj][enabled:1][active:1]>]
因此,已创建一个 AVCaptureConnection
并使用后置摄像头作为输入和一个 AVCaptureVideoPreviewLayer
实例(可能是您创建的 previewLayer
属性)另一个以后置摄像头作为输入,你传递给 AVCaptureSession
.
的 AVCaptureMetadataOutput
显然,第一个确实对 videoPreviewLayer
属性 有一定的价值:
(lldb) po captureSession.connections[0].videoPreviewLayer
▿ Optional<AVCaptureVideoPreviewLayer>
- some : <AVCaptureVideoPreviewLayer:0x280d6ba40; position = CGPoint (0 0); bounds = CGRect (0 0; 0 0); sublayers = (<CALayer: 0x280d6bc20>); masksToBounds = YES; allowsGroupOpacity = YES; inheritsTiming = NO; >
显然,您在 metadataOutput(_:didOutput:from:)
函数中获得的 AVCaptureConnection
实例将始终是第二个实例。将后置摄像头与 AVCaptureMetadataOutput
.
相关联的那个
下面的代码可以 运行 在 iPhone 上,当针对任何 QR 码时,应该打印内容。下面二维码指向example.com.
问题是代理应该提供 connection: AVCaptureConnection
而它确实提供了,但是它的 previewLayer
属性 是 nil
.
可以将以下代码粘贴到一个新的空 Xcode 项目中。如果您禁用(注释掉)第 57 行,并启用第 56 行,它工作正常。但我想将委托放在 CaptureView
class 之外。我如何设置捕获使得 AVCaptureMetadataOutputObjectsDelegate
它的 previewLayer
属性 不是 nil
?
import UIKit
import AVFoundation
func printMetadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], previewLayer: AVCaptureVideoPreviewLayer) {
for object in metadataObjects {
let visualCodeObject = previewLayer.transformedMetadataObject(for: object)
guard let object = visualCodeObject, let barcode = object as? AVMetadataMachineReadableCodeObject else {
NSLog("Ignoring object that is not AVMetadataMachineReadableCodeObject")
continue
}
guard let barcodeString = barcode.stringValue else {
NSLog("Captured something that's not a string")
continue
}
NSLog("Captured string %@", barcodeString)
}
}
class CaptureView: UIView, AVCaptureMetadataOutputObjectsDelegate {
private let previewLayer = AVCaptureVideoPreviewLayer()
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: self.previewLayer)
}
override func layoutSubviews() {
super.layoutSubviews()
self.previewLayer.frame = self.frame
}
init(frame: CGRect, delegate: AVCaptureMetadataOutputObjectsDelegate) {
guard let captureDevice = AVCaptureDevice.default(for: .video) else {
fatalError("Couldn't find default capture device")
}
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else {
super.init(frame: frame)
return
}
let captureSession = AVCaptureSession()
captureSession.addInput(captureDeviceInput)
self.previewLayer.session = captureSession
self.previewLayer.videoGravity = .resizeAspectFill
super.init(frame: frame)
self.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.frame
captureSession.startRunning()
let metadataOutput = AVCaptureMetadataOutput()
// metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.setMetadataObjectsDelegate(delegate, queue: DispatchQueue.main)
metadataOutput.rectOfInterest = CGRect(x: 0, y: 0, width: 1, height: 1)
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
} else {
fatalError("Can't add metadata output to capture session")
}
metadataOutput.metadataObjectTypes = [.qr]
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
class MetadataDelegate: NSObject, AVCaptureMetadataOutputObjectsDelegate {
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
guard let previewLayer = connection.videoPreviewLayer else {
print("previewLayer was nil")
return
}
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: previewLayer)
}
}
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
private let metadataDelegate = MetadataDelegate()
override func viewDidLoad() {
let captureView = CaptureView(frame: CGRect(), delegate: self.metadataDelegate)
captureView.frame = self.view.frame
captureView.autoresizingMask = [.flexibleHeight, .flexibleWidth]
self.view.addSubview(captureView)
}
}
我发现了这个错误。
事实上,即使当你启用这条线时:
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
在CaptureView
的对应metadataOutput(_, didOutput:, from:,)
中,connection.videoPreviewLayer
仍然是nil。正如 开发人员文档 所说:
This property is the set if you initialized the connection using init(inputPort:videoPreviewLayer:) or connectionWithInputPort:videoPreviewLayer:.
所以,在这两种情况下,connection.videoPreviewLayer
都将为零。
我已经稍微更新了您的代码以使其按您想要的方式工作。
import UIKit
import AVFoundation
func printMetadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], previewLayer: AVCaptureVideoPreviewLayer) {
for object in metadataObjects {
let visualCodeObject = previewLayer.transformedMetadataObject(for: object)
guard let object = visualCodeObject, let barcode = object as? AVMetadataMachineReadableCodeObject else {
NSLog("Ignoring object that is not AVMetadataMachineReadableCodeObject")
continue
}
guard let barcodeString = barcode.stringValue else {
NSLog("Captured something that's not a string")
continue
}
NSLog("Captured string %@", barcodeString)
}
}
class CaptureView: UIView, AVCaptureMetadataOutputObjectsDelegate {
let previewLayer = AVCaptureVideoPreviewLayer()
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
if connection.videoPreviewLayer == nil {
print("connection.videoPreviewLayer was nil")
}
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: self.previewLayer)
}
override func layoutSubviews() {
super.layoutSubviews()
self.previewLayer.frame = self.frame
}
init(frame: CGRect, delegate: AVCaptureMetadataOutputObjectsDelegate) {
guard let captureDevice = AVCaptureDevice.default(for: .video) else {
fatalError("Couldn't find default capture device")
}
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else {
super.init(frame: frame)
return
}
let captureSession = AVCaptureSession()
captureSession.addInput(captureDeviceInput)
self.previewLayer.session = captureSession
self.previewLayer.videoGravity = .resizeAspectFill
super.init(frame: frame)
self.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.frame
captureSession.startRunning()
let metadataOutput = AVCaptureMetadataOutput()
// metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.setMetadataObjectsDelegate(delegate, queue: DispatchQueue.main)
metadataOutput.rectOfInterest = CGRect(x: 0, y: 0, width: 1, height: 1)
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
} else {
fatalError("Can't add metadata output to capture session")
}
metadataOutput.metadataObjectTypes = [.qr]
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
class MetadataDelegate: NSObject, AVCaptureMetadataOutputObjectsDelegate {
var previewLayer: AVCaptureVideoPreviewLayer?
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
guard let previewLayer = previewLayer else {
print("previewLayer was nil")
return
}
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: previewLayer)
}
}
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
private let metadataDelegate = MetadataDelegate()
override func viewDidLoad() {
let captureView = CaptureView(frame: CGRect(), delegate: self.metadataDelegate)
metadataDelegate.previewLayer = captureView.previewLayer
captureView.frame = self.view.frame
captureView.autoresizingMask = [.flexibleHeight, .flexibleWidth]
self.view.addSubview(captureView)
}
}
正如 videoPreviewLayer 文档所述:
This property is the set if you initialized the connection using
init(inputPort:videoPreviewLayer:)
orconnectionWithInputPort:videoPreviewLayer:
.
因此,为了在 videoPreviewLayer
属性 中获取值,您必须手动设置 AVCaptureConnection
对象。
相反,我建议将 AVCaptureMetadataOutputObjectsDelegate
隐藏在您可以声明的自定义协议后面:
protocol CaptureViewMetadataOutputObjectsDelegate {
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection, previewLayer: AVCaptureVideoPreviewLayer)
}
然后,在您的 CaptureView
中实施 AVCaptureMetadataOutputObjectsDelegate
协议,并调用您的协议的函数传递所需的 AVCaptureVideoPreviewLayer
。您的代码将是这样的:
class CaptureView: UIView, AVCaptureMetadataOutputObjectsDelegate {
private let previewLayer = AVCaptureVideoPreviewLayer()
private let delegate: CaptureViewMetadataOutputObjectsDelegate
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
delegate.metadataOutput(output, didOutput: metadataObjects, from: connection, previewLayer: previewLayer)
// printMetadataOutput(output, didOutput: metadataObjects, previewLayer: self.previewLayer)
}
override func layoutSubviews() {
super.layoutSubviews()
self.previewLayer.frame = self.frame
}
init(frame: CGRect, delegate: CaptureViewMetadataOutputObjectsDelegate) {
self.delegate = delegate
guard let captureDevice = AVCaptureDevice.default(for: .video) else {
fatalError("Couldn't find default capture device")
}
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else {
super.init(frame: frame)
return
}
let captureSession = AVCaptureSession()
captureSession.addInput(captureDeviceInput)
self.previewLayer.session = captureSession
self.previewLayer.videoGravity = .resizeAspectFill
super.init(frame: frame)
self.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.frame
captureSession.startRunning()
let metadataOutput = AVCaptureMetadataOutput()
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
// metadataOutput.setMetadataObjectsDelegate(delegate, queue: DispatchQueue.main)
metadataOutput.rectOfInterest = CGRect(x: 0, y: 0, width: 1, height: 1)
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
} else {
fatalError("Can't add metadata output to capture session")
}
metadataOutput.metadataObjectTypes = [.qr]
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
class MetadataDelegate: CaptureViewMetadataOutputObjectsDelegate {
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection, previewLayer: AVCaptureVideoPreviewLayer) {
printMetadataOutput(output, didOutput: metadataObjects, previewLayer: previewLayer)
}
}
class ViewController: UIViewController {
private let metadataDelegate = MetadataDelegate()
override func viewDidLoad() {
let captureView = CaptureView(frame: CGRect(), delegate: self.metadataDelegate)
captureView.frame = self.view.frame
captureView.autoresizingMask = [.flexibleHeight, .flexibleWidth]
self.view.addSubview(captureView)
}
}
更新:经过一番研究,我确实在init(inputPort:videoPreviewLayer:)函数文档中找到了这条语句:
When using
addInput(_:):
oraddOutput(_:)
, connections are automatically formed between all compatible inputs and outputs. You do not need to manually create and add connections to the session unless you use the primitiveaddInputWithNoConnections(_:)
andaddOutputWithNoConnections(_:)
methods.
Tha 意味着当您将设备相机添加为输入并将 AVCaptureMetadataOutput
添加为输出时,所有兼容的 AVCaptureConnection
都会自动创建。
我尝试使用以下代码创建另一个 AVCaptureConnection
:
if let port = captureDeviceInput.ports.first(where: { [=12=].mediaType == .video }) {
let con = AVCaptureConnection(inputPort: port, videoPreviewLayer: self.previewLayer)
if captureSession.canAddConnection(con) {
captureSession.addConnection(con)
}
}
但是 canAddConnection(_:)
函数总是 returns false
.
之后我打印了 AVCaptureSession
的 connections
数组,我看到了以下内容:
(lldb) po captureSession.connections
[<AVCaptureConnection: 0x280d67980 (AVCaptureDeviceInput: 0x280d119a0 Back Camera) -> (AVCaptureVideoPreviewLayer: 0x280d6ba40) [type:vide][enabled:1][active:1]>, <AVCaptureConnection: 0x280d7bee0 (AVCaptureDeviceInput: 0x280d119a0 Back Camera) -> (AVCaptureMetadataOutput: 0x280d700e0) [type:mobj][enabled:1][active:1]>]
因此,已创建一个 AVCaptureConnection
并使用后置摄像头作为输入和一个 AVCaptureVideoPreviewLayer
实例(可能是您创建的 previewLayer
属性)另一个以后置摄像头作为输入,你传递给 AVCaptureSession
.
AVCaptureMetadataOutput
显然,第一个确实对 videoPreviewLayer
属性 有一定的价值:
(lldb) po captureSession.connections[0].videoPreviewLayer
▿ Optional<AVCaptureVideoPreviewLayer>
- some : <AVCaptureVideoPreviewLayer:0x280d6ba40; position = CGPoint (0 0); bounds = CGRect (0 0; 0 0); sublayers = (<CALayer: 0x280d6bc20>); masksToBounds = YES; allowsGroupOpacity = YES; inheritsTiming = NO; >
显然,您在 metadataOutput(_:didOutput:from:)
函数中获得的 AVCaptureConnection
实例将始终是第二个实例。将后置摄像头与 AVCaptureMetadataOutput
.