SwiftUI 不会将状态更新为@ObservedObject cameraViewModel 对象
SwiftUI doesn't update state to @ObservedObject cameraViewModel object
我是 SwiftUI 和手动相机功能的新手,我真的需要帮助。
所以我尝试构建一个 SwiftUI 相机视图,它有一个 UIKit 相机作为包装器,以通过 SwiftUI 选择器视图控制聚焦镜头位置,显示在焦点值下方,并想尝试在 AVcaptureDevice.lensPosition 从 0 到 1.0 以及焦点选择器视图中显示的专长。但是现在,我只想在屏幕上显示那个 fucus 数字。
问题是当我尝试通过协调器焦点观察更新焦点并将其设置为相机视图模型时,没有任何反应。请帮助
代码如下:
import SwiftUI
import AVFoundation
import Combine
struct ContentView: View {
@State private var didTapCapture = false
@State private var focusLensPosition: Float = 0
@ObservedObject var cameraViewModel = CameraViewModel(focusLensPosition: 0)
var body: some View {
VStack {
ZStack {
CameraPreviewRepresentable(didTapCapture: $didTapCapture, cameraViewModel: cameraViewModel)
.frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .center)
VStack {
FocusPicker(selectedFocus: $focusLensPosition)
Text(String(cameraViewModel.focusLensPosition))
.foregroundColor(.red)
.font(.largeTitle)
}
.frame(maxWidth: .infinity, alignment: .leading)
}
.edgesIgnoringSafeArea(.all)
Spacer()
CaptureButton(didTapCapture: $didTapCapture)
.frame(width: 100, height: 100, alignment: .center)
.padding(.bottom, 20)
}
}
}
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView()
}
}
struct CaptureButton: View {
@Binding var didTapCapture : Bool
var body: some View {
Button {
didTapCapture.toggle()
} label: {
Image(systemName: "photo")
.font(.largeTitle)
.padding(30)
.background(Color.red)
.foregroundColor(.white)
.clipShape(Circle())
.overlay(
Circle()
.stroke(Color.red)
)
}
}
}
struct CameraPreviewRepresentable: UIViewControllerRepresentable {
@Environment(\.presentationMode) var presentationMode
@Binding var didTapCapture: Bool
@ObservedObject var cameraViewModel: CameraViewModel
let cameraController: CustomCameraController = CustomCameraController()
func makeUIViewController(context: Context) -> CustomCameraController {
cameraController.delegate = context.coordinator
return cameraController
}
func updateUIViewController(_ cameraViewController: CustomCameraController, context: Context) {
if (self.didTapCapture) {
cameraViewController.didTapRecord()
}
}
func makeCoordinator() -> Coordinator {
Coordinator(self, cameraViewModel: cameraViewModel)
}
class Coordinator: NSObject, UINavigationControllerDelegate, AVCapturePhotoCaptureDelegate {
let parent: CameraPreviewRepresentable
var cameraViewModel: CameraViewModel
var focusLensPositionObserver: NSKeyValueObservation?
init(_ parent: CameraPreviewRepresentable, cameraViewModel: CameraViewModel) {
self.parent = parent
self.cameraViewModel = cameraViewModel
super.init()
focusLensPositionObserver = self.parent.cameraController.currentCamera?.observe(\.lensPosition, options: [.new]) { [weak self] camera, _ in
print(Float(camera.lensPosition))
//announcing changes via Publisher
self?.cameraViewModel.focusLensPosition = camera.lensPosition
}
}
deinit {
focusLensPositionObserver = nil
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
parent.didTapCapture = false
if let imageData = photo.fileDataRepresentation(), let image = UIImage(data: imageData) {
UIImageWriteToSavedPhotosAlbum(image, nil, nil, nil)
}
parent.presentationMode.wrappedValue.dismiss()
}
}
}
class CameraViewModel: ObservableObject {
@Published var focusLensPosition: Float = 0
init(focusLensPosition: Float) {
self.focusLensPosition = focusLensPosition
}
}
class CustomCameraController: UIViewController {
var image: UIImage?
var captureSession = AVCaptureSession()
var backCamera: AVCaptureDevice?
var frontCamera: AVCaptureDevice?
var currentCamera: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
//DELEGATE
var delegate: AVCapturePhotoCaptureDelegate?
func showFocusLensPosition() -> Float {
// guard let camera = currentCamera else { return 0 }
// try! currentCamera!.lockForConfiguration()
// currentCamera!.focusMode = .autoFocus
//// currentCamera!.setFocusModeLocked(lensPosition: currentCamera!.lensPosition, completionHandler: nil)
// currentCamera!.unlockForConfiguration()
return currentCamera!.lensPosition
}
func didTapRecord() {
let settings = AVCapturePhotoSettings()
photoOutput?.capturePhoto(with: settings, delegate: delegate!)
}
override func viewDidLoad() {
super.viewDidLoad()
setup()
}
func setup() {
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
}
func setupCaptureSession() {
captureSession.sessionPreset = .photo
}
func setupDevice() {
let deviceDiscoverySession =
AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera],
mediaType: .video,
position: .unspecified)
for device in deviceDiscoverySession.devices {
switch device.position {
case .front:
self.frontCamera = device
case .back:
self.backCamera = device
default:
break
}
}
self.currentCamera = self.backCamera
}
func setupInputOutput() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
captureSession.addOutput(photoOutput!)
} catch {
print(error)
}
}
func setupPreviewLayer() {
self.cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
let deviceOrientation = UIDevice.current.orientation
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation(rawValue: deviceOrientation.rawValue)!
self.cameraPreviewLayer?.frame = self.view.frame
// view.transform = CGAffineTransform(scaleX: 0.5, y: 0.5)
self.view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}
func startRunningCaptureSession() {
captureSession.startRunning()
}
}
struct FocusPicker: View {
var feets = ["∞ ft", "30", "15", "10", "7", "5", "4", "3.5", "3", "2.5", "2", "1.5", "1", "0.5", "Auto"]
@Binding var selectedFocus: Float
var body: some View {
Picker(selection: $selectedFocus, label: Text("")) {
ForEach(0 ..< feets.count) {
Text(feets[[=10=]])
.foregroundColor(.white)
.font(.subheadline)
.fontWeight(.medium)
}
.animation(.none)
.background(Color.clear)
.pickerStyle(WheelPickerStyle())
}
.frame(width: 60, height: 200)
.border(Color.gray, width: 5)
.clipped()
}
}
您提供的代码存在的问题是 FocusPicker
视图中 selectedFocus
的类型应该是 Integer 而不是 Float。因此,一种选择是将此类型更改为 Integer 并找到一种方法将 AVCaptureDevice.lensPosition
表示为具有给定范围的 Integer。
第二个选项是用枚举替换 feets 数组。通过使枚举符合 CustomStringConvertible
协议,您甚至可以提供适当的描述。请看下面我的例子。
我对你的代码进行了一些删减,因为你只是想在第一步中显示数字,这样代码更容易理解。
我的工作示例:
import SwiftUI
import Combine
struct ContentView: View {
@ObservedObject var cameraViewModel = CameraViewModel(focusLensPosition: 0.5)
var body: some View {
VStack {
ZStack {
VStack {
FocusPicker(selectedFocus: $cameraViewModel.focusLensPosition)
Text(String(self.cameraViewModel.focusLensPosition))
.foregroundColor(.red)
.font(.largeTitle)
}
.frame(maxWidth: .infinity, alignment: .leading)
}
.edgesIgnoringSafeArea(.all)
}
}
}
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView()
}
}
class CameraViewModel: ObservableObject {
@Published var focusLensPosition: Float
init(focusLensPosition: Float) {
self.focusLensPosition = focusLensPosition
}
}
enum Feets: Float, CustomStringConvertible, CaseIterable, Identifiable {
case case1 = 0.0
case case2 = 0.5
case case3 = 1.0
var id: Float { self.rawValue }
var description: String {
get {
switch self {
case .case1:
return "∞ ft"
case .case2:
return "4"
case .case3:
return "Auto"
}
}
}
}
struct FocusPicker: View {
@Binding var selectedFocus: Float
var body: some View {
Picker(selection: $selectedFocus, label: Text("")) {
ForEach(Feets.allCases) { feet in
Text(feet.description)
}
.animation(.none)
.background(Color.clear)
.pickerStyle(WheelPickerStyle())
}
.frame(width: 60, height: 200)
.border(Color.gray, width: 5)
.clipped()
}
}
我是 SwiftUI 和手动相机功能的新手,我真的需要帮助。
所以我尝试构建一个 SwiftUI 相机视图,它有一个 UIKit 相机作为包装器,以通过 SwiftUI 选择器视图控制聚焦镜头位置,显示在焦点值下方,并想尝试在 AVcaptureDevice.lensPosition 从 0 到 1.0 以及焦点选择器视图中显示的专长。但是现在,我只想在屏幕上显示那个 fucus 数字。
问题是当我尝试通过协调器焦点观察更新焦点并将其设置为相机视图模型时,没有任何反应。请帮助
代码如下:
import SwiftUI
import AVFoundation
import Combine
struct ContentView: View {
@State private var didTapCapture = false
@State private var focusLensPosition: Float = 0
@ObservedObject var cameraViewModel = CameraViewModel(focusLensPosition: 0)
var body: some View {
VStack {
ZStack {
CameraPreviewRepresentable(didTapCapture: $didTapCapture, cameraViewModel: cameraViewModel)
.frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .center)
VStack {
FocusPicker(selectedFocus: $focusLensPosition)
Text(String(cameraViewModel.focusLensPosition))
.foregroundColor(.red)
.font(.largeTitle)
}
.frame(maxWidth: .infinity, alignment: .leading)
}
.edgesIgnoringSafeArea(.all)
Spacer()
CaptureButton(didTapCapture: $didTapCapture)
.frame(width: 100, height: 100, alignment: .center)
.padding(.bottom, 20)
}
}
}
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView()
}
}
struct CaptureButton: View {
@Binding var didTapCapture : Bool
var body: some View {
Button {
didTapCapture.toggle()
} label: {
Image(systemName: "photo")
.font(.largeTitle)
.padding(30)
.background(Color.red)
.foregroundColor(.white)
.clipShape(Circle())
.overlay(
Circle()
.stroke(Color.red)
)
}
}
}
struct CameraPreviewRepresentable: UIViewControllerRepresentable {
@Environment(\.presentationMode) var presentationMode
@Binding var didTapCapture: Bool
@ObservedObject var cameraViewModel: CameraViewModel
let cameraController: CustomCameraController = CustomCameraController()
func makeUIViewController(context: Context) -> CustomCameraController {
cameraController.delegate = context.coordinator
return cameraController
}
func updateUIViewController(_ cameraViewController: CustomCameraController, context: Context) {
if (self.didTapCapture) {
cameraViewController.didTapRecord()
}
}
func makeCoordinator() -> Coordinator {
Coordinator(self, cameraViewModel: cameraViewModel)
}
class Coordinator: NSObject, UINavigationControllerDelegate, AVCapturePhotoCaptureDelegate {
let parent: CameraPreviewRepresentable
var cameraViewModel: CameraViewModel
var focusLensPositionObserver: NSKeyValueObservation?
init(_ parent: CameraPreviewRepresentable, cameraViewModel: CameraViewModel) {
self.parent = parent
self.cameraViewModel = cameraViewModel
super.init()
focusLensPositionObserver = self.parent.cameraController.currentCamera?.observe(\.lensPosition, options: [.new]) { [weak self] camera, _ in
print(Float(camera.lensPosition))
//announcing changes via Publisher
self?.cameraViewModel.focusLensPosition = camera.lensPosition
}
}
deinit {
focusLensPositionObserver = nil
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
parent.didTapCapture = false
if let imageData = photo.fileDataRepresentation(), let image = UIImage(data: imageData) {
UIImageWriteToSavedPhotosAlbum(image, nil, nil, nil)
}
parent.presentationMode.wrappedValue.dismiss()
}
}
}
class CameraViewModel: ObservableObject {
@Published var focusLensPosition: Float = 0
init(focusLensPosition: Float) {
self.focusLensPosition = focusLensPosition
}
}
class CustomCameraController: UIViewController {
var image: UIImage?
var captureSession = AVCaptureSession()
var backCamera: AVCaptureDevice?
var frontCamera: AVCaptureDevice?
var currentCamera: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
//DELEGATE
var delegate: AVCapturePhotoCaptureDelegate?
func showFocusLensPosition() -> Float {
// guard let camera = currentCamera else { return 0 }
// try! currentCamera!.lockForConfiguration()
// currentCamera!.focusMode = .autoFocus
//// currentCamera!.setFocusModeLocked(lensPosition: currentCamera!.lensPosition, completionHandler: nil)
// currentCamera!.unlockForConfiguration()
return currentCamera!.lensPosition
}
func didTapRecord() {
let settings = AVCapturePhotoSettings()
photoOutput?.capturePhoto(with: settings, delegate: delegate!)
}
override func viewDidLoad() {
super.viewDidLoad()
setup()
}
func setup() {
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
}
func setupCaptureSession() {
captureSession.sessionPreset = .photo
}
func setupDevice() {
let deviceDiscoverySession =
AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera],
mediaType: .video,
position: .unspecified)
for device in deviceDiscoverySession.devices {
switch device.position {
case .front:
self.frontCamera = device
case .back:
self.backCamera = device
default:
break
}
}
self.currentCamera = self.backCamera
}
func setupInputOutput() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
captureSession.addOutput(photoOutput!)
} catch {
print(error)
}
}
func setupPreviewLayer() {
self.cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
let deviceOrientation = UIDevice.current.orientation
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation(rawValue: deviceOrientation.rawValue)!
self.cameraPreviewLayer?.frame = self.view.frame
// view.transform = CGAffineTransform(scaleX: 0.5, y: 0.5)
self.view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}
func startRunningCaptureSession() {
captureSession.startRunning()
}
}
struct FocusPicker: View {
var feets = ["∞ ft", "30", "15", "10", "7", "5", "4", "3.5", "3", "2.5", "2", "1.5", "1", "0.5", "Auto"]
@Binding var selectedFocus: Float
var body: some View {
Picker(selection: $selectedFocus, label: Text("")) {
ForEach(0 ..< feets.count) {
Text(feets[[=10=]])
.foregroundColor(.white)
.font(.subheadline)
.fontWeight(.medium)
}
.animation(.none)
.background(Color.clear)
.pickerStyle(WheelPickerStyle())
}
.frame(width: 60, height: 200)
.border(Color.gray, width: 5)
.clipped()
}
}
您提供的代码存在的问题是 FocusPicker
视图中 selectedFocus
的类型应该是 Integer 而不是 Float。因此,一种选择是将此类型更改为 Integer 并找到一种方法将 AVCaptureDevice.lensPosition
表示为具有给定范围的 Integer。
第二个选项是用枚举替换 feets 数组。通过使枚举符合 CustomStringConvertible
协议,您甚至可以提供适当的描述。请看下面我的例子。
我对你的代码进行了一些删减,因为你只是想在第一步中显示数字,这样代码更容易理解。
我的工作示例:
import SwiftUI
import Combine
struct ContentView: View {
@ObservedObject var cameraViewModel = CameraViewModel(focusLensPosition: 0.5)
var body: some View {
VStack {
ZStack {
VStack {
FocusPicker(selectedFocus: $cameraViewModel.focusLensPosition)
Text(String(self.cameraViewModel.focusLensPosition))
.foregroundColor(.red)
.font(.largeTitle)
}
.frame(maxWidth: .infinity, alignment: .leading)
}
.edgesIgnoringSafeArea(.all)
}
}
}
struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView()
}
}
class CameraViewModel: ObservableObject {
@Published var focusLensPosition: Float
init(focusLensPosition: Float) {
self.focusLensPosition = focusLensPosition
}
}
enum Feets: Float, CustomStringConvertible, CaseIterable, Identifiable {
case case1 = 0.0
case case2 = 0.5
case case3 = 1.0
var id: Float { self.rawValue }
var description: String {
get {
switch self {
case .case1:
return "∞ ft"
case .case2:
return "4"
case .case3:
return "Auto"
}
}
}
}
struct FocusPicker: View {
@Binding var selectedFocus: Float
var body: some View {
Picker(selection: $selectedFocus, label: Text("")) {
ForEach(Feets.allCases) { feet in
Text(feet.description)
}
.animation(.none)
.background(Color.clear)
.pickerStyle(WheelPickerStyle())
}
.frame(width: 60, height: 200)
.border(Color.gray, width: 5)
.clipped()
}
}