相机视图调整

Camera View adjusting

我对 Swift 或 Xcode 不太熟悉,所以任何帮助将不胜感激!

我为我的 QR/Camera 控制器制作了一个单独的 .swift 文件。我在网上找到了 this 关于如何制作 QR 码的教程 Reader 并且我输入了提供的代码,一切都很好,除了相机视图没有正确显示在屏幕上(使用 iPhone 8). 如何调整视频视图?

代码:

import UIKit
import AVFoundation

class CameraController: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate, AVCapturePhotoCaptureDelegate, AVCaptureMetadataOutputObjectsDelegate {

    @IBOutlet weak var previewView: UIView!
    @IBOutlet weak var lblOutput: UILabel!

    var imageOrientation: AVCaptureVideoOrientation?
    var captureSession: AVCaptureSession?
    var videoPreviewLayer: AVCaptureVideoPreviewLayer?
    var capturePhotoOutput: AVCapturePhotoOutput?

    override func viewDidLoad() {
        super.viewDidLoad()

        // Get an instance of the AVCaptureDevice class to initialize a
        // device object and provide the video as the media type parameter
        guard let captureDevice = AVCaptureDevice.default(for: AVMediaType.video) else {
            fatalError("No video device found")
        }
        // handler chiamato quando viene cambiato orientamento
        self.imageOrientation = AVCaptureVideoOrientation.portrait

        do {
            // Get an instance of the AVCaptureDeviceInput class using the previous deivce object
            let input = try AVCaptureDeviceInput(device: captureDevice)

            // Initialize the captureSession object
            captureSession = AVCaptureSession()

            // Set the input device on the capture session
            captureSession?.addInput(input)

            // Get an instance of ACCapturePhotoOutput class
            capturePhotoOutput = AVCapturePhotoOutput()
            capturePhotoOutput?.isHighResolutionCaptureEnabled = true

            // Set the output on the capture session
            captureSession?.addOutput(capturePhotoOutput!)
            captureSession?.sessionPreset = .high

            // Initialize a AVCaptureMetadataOutput object and set it as the input device
            let captureMetadataOutput = AVCaptureMetadataOutput()
            captureSession?.addOutput(captureMetadataOutput)

            // Set delegate and use the default dispatch queue to execute the call back
            captureMetadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
            captureMetadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.qr]

            //Initialise the video preview layer and add it as a sublayer to the viewPreview view's layer
            videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
            videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
            videoPreviewLayer?.frame = view.layer.bounds
            previewView.layer.addSublayer(videoPreviewLayer!)

            //start video capture
            captureSession?.startRunning()

        } catch {
            //If any error occurs, simply print it out
            print(error)
            return
        }

    }

    override func viewWillAppear(_ animated: Bool) {
        navigationController?.setNavigationBarHidden(true, animated: false)
        self.captureSession?.startRunning()
    }

    // Find a camera with the specified AVCaptureDevicePosition, returning nil if one is not found
    func cameraWithPosition(position: AVCaptureDevice.Position) -> AVCaptureDevice? {
        let discoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .unspecified)
        for device in discoverySession.devices {
            if device.position == position {
                return device
            }
        }

        return nil
    }

    func metadataOutput(_ captureOutput: AVCaptureMetadataOutput,
                        didOutput metadataObjects: [AVMetadataObject],
                        from connection: AVCaptureConnection) {
        // Check if the metadataObjects array is contains at least one object.
        if metadataObjects.count == 0 {
            return
        }

        //self.captureSession?.stopRunning()

        // Get the metadata object.
        let metadataObj = metadataObjects[0] as! AVMetadataMachineReadableCodeObject

        if metadataObj.type == AVMetadataObject.ObjectType.qr {
            if let outputString = metadataObj.stringValue {
                DispatchQueue.main.async {
                    print(outputString)
                    self.lblOutput.text = outputString
                }
            }
        }

    }

}

当前视图的图像:

突出显示的白色框是 UIView

您应该使用故事板中的 NSLayoutConstraint。

第 1 步

这是你目前的状态

第 2 步

添加顶部、前导、尾随和底部约束

第 3 步

最终结果

我预计会发生以下情况之一: - 你没有正确设置你的约束 - 您的视图调整大小 - 您使用了不正确的视图来设置层的大小

设置约束几乎不可能通过编写来解释。设置它们的方法有很多,所以我做了一个非常简短的关于设置约束的 video that explains one way (or two)

第二个和第三个可以在这个片段中解释:

override func viewDidLoad() {
    super.viewDidLoad()

        ...

    videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
    videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
    previewView.layer.addSublayer(videoPreviewLayer!)
    updatePreviewLayerFrame()

        ...
}

override func viewDidLayoutSubviews() {
    super.viewDidLayoutSubviews()
    updatePreviewLayerFrame()
}

private func updatePreviewLayerFrame() {
    videoPreviewLayer?.frame = previewView.bounds
}

重写 viewDidLayoutSubviews 应该会调整图层的大小,因为只要视图控制器 "resizes" 就会调用此方法。它也在 viewDidLoad 之后不久被调用。另请注意,previewView 用于确定帧:videoPreviewLayer?.frame = previewView.bounds.

图层不会随父视图自动调整大小。这意味着您的 videoPreviewLayer 从原始框架(尚未布局)previewView 获取框架并且永远不会更改它。要更新图层,您可以覆盖此方法:

override func viewDidLayoutSubviews() {
    super.viewDidLayoutSubviews()
    // you need to keep a reference for that
    self.videoPreviewLayer.frame = self.previewView.bounds
}

或者,我认为这更好,您可以查看 Apple's AVCam example app 中预览视图的实现方式。使用他们的方法时,调整大小将由自动布局处理。

错误是您使用 viewframe,但将 videoPreviewLayer 添加到更小的 previewView(就像您在故事板中展示的那样)。

将行替换为 viewPreviewLayer 框架配置。

        //Initialise the video preview layer and add it as a sublayer to the viewPreview view's layer
        videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
        videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
        videoPreviewLayer?.frame = view.layer.bounds
        previewView.layer.addSublayer(videoPreviewLayer!)

这一行

        videoPreviewLayer?.frame = view.layer.bounds

        videoPreviewLayer?.frame = previewView.layer.bounds