AVAssetWriter 无法用视频录制音频 |崩溃
AVAssetWriter Unable to record audio with video | Crashing
我正在尝试从 CMSampleBuffer
捕获 video/Audio 帧,但完全无法获得正确的视频记录。
预期输出:
.mp4
格式的视频文件,同时包含音频(来自麦克风)和视频帧。
当前输出:
没有音频的空 Directory/A 视频文件。
在 运行 上崩溃:Media type of sample buffer must match receiver's media type ("soun")
我尝试了几乎所有在线可用的方法来解决此问题。我的最后期限快到了,我只是抓着头发想弄清楚到底发生了什么。非常感谢任何 help/pointers。
以下是来源。
CameraController.swift
class CameraController: UIViewController, SFrameCaptureDelegate {
override func viewDidLoad() {
super.viewDidLoad()
setupUI()
assetWriter = AssetManager(filename: UUID().uuidString.appending(".mp4"))
frameBuffer.delegate = self
frameBuffer.startSession()
}
var previewView: PreviewView = {
let instance = PreviewView()
return instance
}()
var frameBuffer = FrameCapture(type: .AudioVideo)
var captureButton: UIButton = {
let instance = UIButton()
instance.setTitle("Capture", for: .normal)
instance.backgroundColor = .white
return instance
}()
// if the user is recording the frames from the phone
var frameCaptureRunning = false
var assetWriter : AssetManager!
var videoDirectoryPath = SFileManager.shared.getDocumentDirectory()
func setupUI() {
view.addSubview(previewView)
previewView.top(to: view)
previewView.left(to: view)
previewView.right(to: view)
previewView.height(view.frame.height)
previewView.session = frameBuffer.session
view.addSubview(captureButton)
captureButton.size(CGSize(width: 100, height: 100))
captureButton.centerX(to: view)
captureButton.bottom(to: view, offset: -20)
captureButton.addTarget(self, action: #selector(startpic), for: .touchDown)
captureButton.addTarget(self, action: #selector(stopic), for: .touchUpInside)
}
@objc func startpic() {
frameCaptureRunning = true
assetWriter.isRecording = true
}
@objc func stopic() {
frameCaptureRunning = false
assetWriter.isRecording = false
assetWriter.finish {
DispatchQueue.main.async {
let activity = UIActivityViewController(activityItems: [self.assetWriter.url!], applicationActivities: nil)
self.present(activity, animated: true, completion: nil)
}
print("This -- ",self.assetWriter.url.path)
do {
let attr = try FileManager.default.attributesOfItem(atPath: self.assetWriter.url.path)
let fileSize = attr[FileAttributeKey.size] as! UInt64
print("H264 file size = \(fileSize)")
DispatchQueue.main.async {
let player = AVPlayer(url: self.assetWriter.url)
let playerLayer = AVPlayerLayer(player: player)
playerLayer.videoGravity = .resizeAspectFill
playerLayer.frame = self.view.bounds
playerLayer.backgroundColor = UIColor.red.cgColor
self.view.layer.addSublayer(playerLayer)
player.play()
}
}catch{
print("issues with finishing")
}
}
}
func capturedFrame(buffers: CMSampleBuffer) {
if !frameCaptureRunning { return }
assetWriter.write(buffer: buffers)
}
}
FrameCapture.swift
protocol SFrameCaptureDelegate: class {
func capturedFrame(buffers: CMSampleBuffer)
}
class FrameCapture: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAudioDataOutputSampleBufferDelegate {
init(type: SessionType) {
super.init()
print("SFC - Frame Buffers initialized with Config - ", type.self)
sessionType = type
}
func startSession() {
print("SFC - Frame Buffers Session Starting")
sessionQueue.async {
self.configureSession(type: self.sessionType)
self.session.startRunning()
}
}
weak var delegate: SFrameCaptureDelegate?
enum SessionSetupResult {
case success
case notAuthorized
case configurationFailed
}
enum SessionType {
case Audio
case Video
case AudioVideo
}
let session = AVCaptureSession()
let sessionQueue = DispatchQueue(label: "sessionQueue", qos: .userInitiated)
let videoQueue = DispatchQueue(label: "videoQueue", qos: .userInitiated)
let audioQueue = DispatchQueue(label: "audioQueue", qos: .userInitiated)
var setupResult: SessionSetupResult = .success
var sessionType: SessionType = .Video
@objc dynamic var videoDeviceInput: AVCaptureDeviceInput!
let videoOutput = AVCaptureVideoDataOutput()
let audioOutput = AVCaptureAudioDataOutput()
var photoQualityPrioritizationMode: AVCapturePhotoOutput.QualityPrioritization = .balanced
/// MARK: SessionConfig
func configureSession(type: SessionType) {
if setupResult != .success { return }
session.beginConfiguration()
session.sessionPreset = .high
do {
var defaultVideoDevice: AVCaptureDevice?
if let dualCameraDevice = AVCaptureDevice.default(.builtInDualWideCamera, for: .video, position: .back) {
defaultVideoDevice = dualCameraDevice
} else if let backCameraDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back){
defaultVideoDevice = backCameraDevice
} else if let frontCameraDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front){
defaultVideoDevice = frontCameraDevice
}
guard let videoDevice = defaultVideoDevice else {
print("CAM - Camera unavailable")
setupResult = .configurationFailed
session.commitConfiguration()
return
}
let videoInputDevice = try AVCaptureDeviceInput(device: videoDevice)
if session.canAddInput(videoInputDevice) {
session.addInput(videoInputDevice)
videoDeviceInput = videoInputDevice
} else {
print("CAM - Couldn't add input to the session")
setupResult = .configurationFailed
session.commitConfiguration()
return
}
} catch {
print("CAM - Couldn't create device input. Error - ", error.localizedDescription)
setupResult = .configurationFailed
session.commitConfiguration()
return
}
if sessionType == .AudioVideo {
do {
let audioDevice = AVCaptureDevice.default(for: .audio)
let audioDeviceInput = try AVCaptureDeviceInput(device: audioDevice!)
print("SFC - in audio device input")
if session.canAddInput(audioDeviceInput) {
session.addInput(audioDeviceInput)
} else { print("CAM - Couldn't add audio input device to session.") }
} catch { print("couldn't create audio input device. Error - ",error.localizedDescription) }
}
videoOutput.setSampleBufferDelegate(self, queue: videoQueue)
if session.canAddOutput(videoOutput) {
session.addOutput(videoOutput)
photoQualityPrioritizationMode = .balanced
} else {
print("Could not add photo output to the session")
setupResult = .configurationFailed
session.commitConfiguration()
return
}
if sessionType == .AudioVideo {
audioOutput.setSampleBufferDelegate(self, queue: audioQueue)
if session.canAddOutput(audioOutput) {
session.addOutput(audioOutput)
} else {
print("Couldn't add audio output")
setupResult = .configurationFailed
session.commitConfiguration()
}
}
videoOutput.connections.first?.videoOrientation = .portrait
videoOutput.videoSettings = [ kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA ]
videoOutput.alwaysDiscardsLateVideoFrames = true
session.commitConfiguration()
}
/// MARK: CMSampleBufferDelegate
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
self.delegate?.capturedFrame(buffers: sampleBuffer)
}
}
AssetManager.swift
class AssetManager: NSObject {
private var assetWriter: AVAssetWriter?
private var videoInput: AVAssetWriterInput?
private var audioInput: AVAssetWriterInput?
var url: URL!
let writerQueue = DispatchQueue(label: "writerQueue", qos: .utility)
var isRecording = false
var video_frames_written = false
init(filename: String) {
super.init()
self.videoDirectory.appendPathComponent(filename)
self.url = self.videoDirectory
}
private var videoDirectory = SFileManager.shared.getDocumentDirectory()
private func setupWriter() {
SFileManager.shared.clearPreviousFiles(withPath: videoDirectory.path)
SFileManager.shared.createNewDirectory(withPath: videoDirectory.path)
printLog(item: self.videoDirectory)
self.assetWriter = try? AVAssetWriter(outputURL: self.videoDirectory, fileType: AVFileType.mp4)
let videoOutputSettings = [
AVVideoCodecKey: AVVideoCodecType.h264,
AVVideoHeightKey: 1280,
AVVideoWidthKey:720
] as [String : Any]
self.videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoOutputSettings)
self.videoInput?.expectsMediaDataInRealTime = true
if let videoInput = self.videoInput, (self.assetWriter?.canAdd(videoInput))! {
self.assetWriter?.add(videoInput)
}
let audioOutputSettings = [
AVFormatIDKey: kAudioFormatMPEG4AAC,
AVNumberOfChannelsKey: 1,
AVSampleRateKey: 44100,
AVEncoderBitRateKey: 64000
] as [String: Any]
self.audioInput = AVAssetWriterInput(mediaType: .audio, outputSettings: audioOutputSettings)
self.audioInput?.expectsMediaDataInRealTime = true
if let audioInput = self.audioInput, (self.assetWriter?.canAdd(audioInput))! {
self.assetWriter?.add(audioInput)
printDone(item: "Asset writer added, \(String(describing: self.audioInput))")
} else {
printError(item: "No audio Input")
}
}
public func write(buffer: CMSampleBuffer) {
writerQueue.sync {
if assetWriter == nil { self.setupWriter() }
if self.assetWriter?.status == .unknown {
self.assetWriter?.startWriting()
self.assetWriter?.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(buffer))
printDone(item: "Started AssetWriter")
}
if self.assetWriter?.status == .failed {
printError(item: "Asset Writer Failed with Error: \(String(describing: self.assetWriter?.error))")
}
if CMSampleBufferDataIsReady(buffer) {
if let videoInput = self.videoInput, videoInput.isReadyForMoreMediaData {
videoInput.append(buffer)
}
if let audioInput = self.audioInput, audioInput.isReadyForMoreMediaData {
audioInput.append(buffer) // Crashes at this line
}
}
}
}
public func finish(completion: @escaping (() -> Void)) {
writerQueue.async {
self.assetWriter?.finishWriting(completionHandler: { [self] in
printDone(item: "Finished Writing")
completion()
})
}
}
}
您正在向 audioInput
写入视频缓冲区,根据缓冲区到达的方式,您还可以向 videoInput
.
写入音频缓冲区
在您的例子中,CMSampleBuffer
包含音频或视频,因此您将音频缓冲区附加到 audioInput
,将视频缓冲区附加到 videoInput
。
您可以通过比较 captureOutput:didOutput:
中的 output
与您的 audioInput
和 videoOutput
或查看缓冲区的 CMSampleBufferGetFormatDescription()
的CMFormatDescriptionGetMediaType()
,但更复杂。
我正在尝试从 CMSampleBuffer
捕获 video/Audio 帧,但完全无法获得正确的视频记录。
预期输出:
.mp4
格式的视频文件,同时包含音频(来自麦克风)和视频帧。
当前输出: 没有音频的空 Directory/A 视频文件。
在 运行 上崩溃:Media type of sample buffer must match receiver's media type ("soun")
我尝试了几乎所有在线可用的方法来解决此问题。我的最后期限快到了,我只是抓着头发想弄清楚到底发生了什么。非常感谢任何 help/pointers。
以下是来源。
CameraController.swift
class CameraController: UIViewController, SFrameCaptureDelegate {
override func viewDidLoad() {
super.viewDidLoad()
setupUI()
assetWriter = AssetManager(filename: UUID().uuidString.appending(".mp4"))
frameBuffer.delegate = self
frameBuffer.startSession()
}
var previewView: PreviewView = {
let instance = PreviewView()
return instance
}()
var frameBuffer = FrameCapture(type: .AudioVideo)
var captureButton: UIButton = {
let instance = UIButton()
instance.setTitle("Capture", for: .normal)
instance.backgroundColor = .white
return instance
}()
// if the user is recording the frames from the phone
var frameCaptureRunning = false
var assetWriter : AssetManager!
var videoDirectoryPath = SFileManager.shared.getDocumentDirectory()
func setupUI() {
view.addSubview(previewView)
previewView.top(to: view)
previewView.left(to: view)
previewView.right(to: view)
previewView.height(view.frame.height)
previewView.session = frameBuffer.session
view.addSubview(captureButton)
captureButton.size(CGSize(width: 100, height: 100))
captureButton.centerX(to: view)
captureButton.bottom(to: view, offset: -20)
captureButton.addTarget(self, action: #selector(startpic), for: .touchDown)
captureButton.addTarget(self, action: #selector(stopic), for: .touchUpInside)
}
@objc func startpic() {
frameCaptureRunning = true
assetWriter.isRecording = true
}
@objc func stopic() {
frameCaptureRunning = false
assetWriter.isRecording = false
assetWriter.finish {
DispatchQueue.main.async {
let activity = UIActivityViewController(activityItems: [self.assetWriter.url!], applicationActivities: nil)
self.present(activity, animated: true, completion: nil)
}
print("This -- ",self.assetWriter.url.path)
do {
let attr = try FileManager.default.attributesOfItem(atPath: self.assetWriter.url.path)
let fileSize = attr[FileAttributeKey.size] as! UInt64
print("H264 file size = \(fileSize)")
DispatchQueue.main.async {
let player = AVPlayer(url: self.assetWriter.url)
let playerLayer = AVPlayerLayer(player: player)
playerLayer.videoGravity = .resizeAspectFill
playerLayer.frame = self.view.bounds
playerLayer.backgroundColor = UIColor.red.cgColor
self.view.layer.addSublayer(playerLayer)
player.play()
}
}catch{
print("issues with finishing")
}
}
}
func capturedFrame(buffers: CMSampleBuffer) {
if !frameCaptureRunning { return }
assetWriter.write(buffer: buffers)
}
}
FrameCapture.swift
protocol SFrameCaptureDelegate: class {
func capturedFrame(buffers: CMSampleBuffer)
}
class FrameCapture: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAudioDataOutputSampleBufferDelegate {
init(type: SessionType) {
super.init()
print("SFC - Frame Buffers initialized with Config - ", type.self)
sessionType = type
}
func startSession() {
print("SFC - Frame Buffers Session Starting")
sessionQueue.async {
self.configureSession(type: self.sessionType)
self.session.startRunning()
}
}
weak var delegate: SFrameCaptureDelegate?
enum SessionSetupResult {
case success
case notAuthorized
case configurationFailed
}
enum SessionType {
case Audio
case Video
case AudioVideo
}
let session = AVCaptureSession()
let sessionQueue = DispatchQueue(label: "sessionQueue", qos: .userInitiated)
let videoQueue = DispatchQueue(label: "videoQueue", qos: .userInitiated)
let audioQueue = DispatchQueue(label: "audioQueue", qos: .userInitiated)
var setupResult: SessionSetupResult = .success
var sessionType: SessionType = .Video
@objc dynamic var videoDeviceInput: AVCaptureDeviceInput!
let videoOutput = AVCaptureVideoDataOutput()
let audioOutput = AVCaptureAudioDataOutput()
var photoQualityPrioritizationMode: AVCapturePhotoOutput.QualityPrioritization = .balanced
/// MARK: SessionConfig
func configureSession(type: SessionType) {
if setupResult != .success { return }
session.beginConfiguration()
session.sessionPreset = .high
do {
var defaultVideoDevice: AVCaptureDevice?
if let dualCameraDevice = AVCaptureDevice.default(.builtInDualWideCamera, for: .video, position: .back) {
defaultVideoDevice = dualCameraDevice
} else if let backCameraDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back){
defaultVideoDevice = backCameraDevice
} else if let frontCameraDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front){
defaultVideoDevice = frontCameraDevice
}
guard let videoDevice = defaultVideoDevice else {
print("CAM - Camera unavailable")
setupResult = .configurationFailed
session.commitConfiguration()
return
}
let videoInputDevice = try AVCaptureDeviceInput(device: videoDevice)
if session.canAddInput(videoInputDevice) {
session.addInput(videoInputDevice)
videoDeviceInput = videoInputDevice
} else {
print("CAM - Couldn't add input to the session")
setupResult = .configurationFailed
session.commitConfiguration()
return
}
} catch {
print("CAM - Couldn't create device input. Error - ", error.localizedDescription)
setupResult = .configurationFailed
session.commitConfiguration()
return
}
if sessionType == .AudioVideo {
do {
let audioDevice = AVCaptureDevice.default(for: .audio)
let audioDeviceInput = try AVCaptureDeviceInput(device: audioDevice!)
print("SFC - in audio device input")
if session.canAddInput(audioDeviceInput) {
session.addInput(audioDeviceInput)
} else { print("CAM - Couldn't add audio input device to session.") }
} catch { print("couldn't create audio input device. Error - ",error.localizedDescription) }
}
videoOutput.setSampleBufferDelegate(self, queue: videoQueue)
if session.canAddOutput(videoOutput) {
session.addOutput(videoOutput)
photoQualityPrioritizationMode = .balanced
} else {
print("Could not add photo output to the session")
setupResult = .configurationFailed
session.commitConfiguration()
return
}
if sessionType == .AudioVideo {
audioOutput.setSampleBufferDelegate(self, queue: audioQueue)
if session.canAddOutput(audioOutput) {
session.addOutput(audioOutput)
} else {
print("Couldn't add audio output")
setupResult = .configurationFailed
session.commitConfiguration()
}
}
videoOutput.connections.first?.videoOrientation = .portrait
videoOutput.videoSettings = [ kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA ]
videoOutput.alwaysDiscardsLateVideoFrames = true
session.commitConfiguration()
}
/// MARK: CMSampleBufferDelegate
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
self.delegate?.capturedFrame(buffers: sampleBuffer)
}
}
AssetManager.swift
class AssetManager: NSObject {
private var assetWriter: AVAssetWriter?
private var videoInput: AVAssetWriterInput?
private var audioInput: AVAssetWriterInput?
var url: URL!
let writerQueue = DispatchQueue(label: "writerQueue", qos: .utility)
var isRecording = false
var video_frames_written = false
init(filename: String) {
super.init()
self.videoDirectory.appendPathComponent(filename)
self.url = self.videoDirectory
}
private var videoDirectory = SFileManager.shared.getDocumentDirectory()
private func setupWriter() {
SFileManager.shared.clearPreviousFiles(withPath: videoDirectory.path)
SFileManager.shared.createNewDirectory(withPath: videoDirectory.path)
printLog(item: self.videoDirectory)
self.assetWriter = try? AVAssetWriter(outputURL: self.videoDirectory, fileType: AVFileType.mp4)
let videoOutputSettings = [
AVVideoCodecKey: AVVideoCodecType.h264,
AVVideoHeightKey: 1280,
AVVideoWidthKey:720
] as [String : Any]
self.videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoOutputSettings)
self.videoInput?.expectsMediaDataInRealTime = true
if let videoInput = self.videoInput, (self.assetWriter?.canAdd(videoInput))! {
self.assetWriter?.add(videoInput)
}
let audioOutputSettings = [
AVFormatIDKey: kAudioFormatMPEG4AAC,
AVNumberOfChannelsKey: 1,
AVSampleRateKey: 44100,
AVEncoderBitRateKey: 64000
] as [String: Any]
self.audioInput = AVAssetWriterInput(mediaType: .audio, outputSettings: audioOutputSettings)
self.audioInput?.expectsMediaDataInRealTime = true
if let audioInput = self.audioInput, (self.assetWriter?.canAdd(audioInput))! {
self.assetWriter?.add(audioInput)
printDone(item: "Asset writer added, \(String(describing: self.audioInput))")
} else {
printError(item: "No audio Input")
}
}
public func write(buffer: CMSampleBuffer) {
writerQueue.sync {
if assetWriter == nil { self.setupWriter() }
if self.assetWriter?.status == .unknown {
self.assetWriter?.startWriting()
self.assetWriter?.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(buffer))
printDone(item: "Started AssetWriter")
}
if self.assetWriter?.status == .failed {
printError(item: "Asset Writer Failed with Error: \(String(describing: self.assetWriter?.error))")
}
if CMSampleBufferDataIsReady(buffer) {
if let videoInput = self.videoInput, videoInput.isReadyForMoreMediaData {
videoInput.append(buffer)
}
if let audioInput = self.audioInput, audioInput.isReadyForMoreMediaData {
audioInput.append(buffer) // Crashes at this line
}
}
}
}
public func finish(completion: @escaping (() -> Void)) {
writerQueue.async {
self.assetWriter?.finishWriting(completionHandler: { [self] in
printDone(item: "Finished Writing")
completion()
})
}
}
}
您正在向 audioInput
写入视频缓冲区,根据缓冲区到达的方式,您还可以向 videoInput
.
在您的例子中,CMSampleBuffer
包含音频或视频,因此您将音频缓冲区附加到 audioInput
,将视频缓冲区附加到 videoInput
。
您可以通过比较 captureOutput:didOutput:
中的 output
与您的 audioInput
和 videoOutput
或查看缓冲区的 CMSampleBufferGetFormatDescription()
的CMFormatDescriptionGetMediaType()
,但更复杂。