AVAssetWriter 视频输出不播放附加音频
AVAssetWriter Video Output Does Not Play Appended Audio
我有一个 avassetwriter
可以录制带有应用滤镜的视频,然后通过 avqueueplayer
播放。
我的问题是音频输出附加到音频输入,但播放时没有声音播放。还没有遇到任何现有的解决方案,如果有任何可用的指导,我们将不胜感激..
其次,我必须循环播放的 .AVPlayerItemDidPlayToEndTime
通知观察器也没有触发..
AVCaptureSession 设置
func setupSession() {
let session = AVCaptureSession()
session.sessionPreset = .medium
guard
let camera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front),
let mic = AVCaptureDevice.default(.builtInMicrophone, for: .audio, position: .unspecified),
let videoInput = try? AVCaptureDeviceInput(device: camera),
let audioInput = try? AVCaptureDeviceInput(device: mic),
session.canAddInput(videoInput), session.canAddInput(audioInput) else { return }
let videoOutput = AVCaptureVideoDataOutput()
let audioOutput = AVCaptureAudioDataOutput()
guard session.canAddOutput(videoOutput), session.canAddOutput(audioOutput) else { return }
let queue = DispatchQueue(label: "recordingQueue", qos: .userInteractive)
videoOutput.setSampleBufferDelegate(self, queue: queue)
audioOutput.setSampleBufferDelegate(self, queue: queue)
session.beginConfiguration()
session.addInput(videoInput)
session.addInput(audioInput)
session.addOutput(videoOutput)
session.addOutput(audioOutput)
session.commitConfiguration()
if let connection = videoOutput.connection(with: AVMediaType.video) {
if connection.isVideoStabilizationSupported { connection.preferredVideoStabilizationMode = .auto }
connection.isVideoMirrored = true
connection.videoOrientation = .portrait
}
_videoOutput = videoOutput
_audioOutput = audioOutput
_captureSession = session
DispatchQueue.global(qos: .default).async { session.startRunning() }
}
AVAssetWriter 设置 + didOutput 委托
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let timestamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer).seconds
if output == _videoOutput {
if connection.isVideoOrientationSupported { connection.videoOrientation = .portrait }
guard let cvImageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
let ciImage = CIImage(cvImageBuffer: cvImageBuffer)
guard let filteredCIImage = applyFilters(inputImage: ciImage) else { return }
self.ciImage = filteredCIImage
guard let cvPixelBuffer = getCVPixelBuffer(from: filteredCIImage) else { return }
self.cvPixelBuffer = cvPixelBuffer
self.ciContext.render(filteredCIImage, to: cvPixelBuffer, bounds: filteredCIImage.extent, colorSpace: CGColorSpaceCreateDeviceRGB())
metalView.draw()
}
switch _captureState {
case .start:
guard let outputUrl = tempURL else { return }
let writer = try! AVAssetWriter(outputURL: outputUrl, fileType: .mp4)
let videoSettings = _videoOutput!.recommendedVideoSettingsForAssetWriter(writingTo: .mp4)
let videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings)
videoInput.mediaTimeScale = CMTimeScale(bitPattern: 600)
videoInput.expectsMediaDataInRealTime = true
let pixelBufferAttributes = [
kCVPixelBufferCGImageCompatibilityKey: NSNumber(value: true),
kCVPixelBufferCGBitmapContextCompatibilityKey: NSNumber(value: true),
kCVPixelBufferPixelFormatTypeKey: NSNumber(value: Int32(kCVPixelFormatType_32ARGB))
] as [String:Any]
let adapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoInput, sourcePixelBufferAttributes: pixelBufferAttributes)
if writer.canAdd(videoInput) { writer.add(videoInput) }
let audioSettings = _audioOutput!.recommendedAudioSettingsForAssetWriter(writingTo: .mp4) as? [String:Any]
let audioInput = AVAssetWriterInput(mediaType: .audio, outputSettings: audioSettings)
audioInput.expectsMediaDataInRealTime = true
if writer.canAdd(audioInput) { writer.add(audioInput) }
_filename = outputUrl.absoluteString
_assetWriter = writer
_assetWriterVideoInput = videoInput
_assetWriterAudioInput = audioInput
_adapter = adapter
_captureState = .capturing
_time = timestamp
writer.startWriting()
writer.startSession(atSourceTime: .zero)
case .capturing:
if output == _videoOutput {
if _assetWriterVideoInput?.isReadyForMoreMediaData == true {
let time = CMTime(seconds: timestamp - _time, preferredTimescale: CMTimeScale(600))
_adapter?.append(self.cvPixelBuffer, withPresentationTime: time)
}
} else if output == _audioOutput {
if _assetWriterAudioInput?.isReadyForMoreMediaData == true {
_assetWriterAudioInput?.append(sampleBuffer)
}
}
break
case .end:
guard _assetWriterVideoInput?.isReadyForMoreMediaData == true, _assetWriter!.status != .failed else { break }
_assetWriterVideoInput?.markAsFinished()
_assetWriterAudioInput?.markAsFinished()
_assetWriter?.finishWriting { [weak self] in
guard let output = self?._assetWriter?.outputURL else { return }
self?._captureState = .idle
self?._assetWriter = nil
self?._assetWriterVideoInput = nil
self?._assetWriterAudioInput = nil
self?.previewRecordedVideo(with: output)
}
default:
break
}
}
从您遇到的第一个音频或视频样本缓冲区的演示时间戳开始您的时间线:
writer.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
之前您从零开始时间线,但捕获的样本缓冲区具有时间戳,通常似乎与自系统启动以来经过的时间量相关,因此在文件“开始”( sourceTime
for AVAssetWriter
) 以及视频和音频出现时。
你的问题并没有说你不看视频,而且我希望一些视频播放器能够跳过一大堆内容直到您的样本开始的时间线,但无论如何文件都是错误的。
我有一个 avassetwriter
可以录制带有应用滤镜的视频,然后通过 avqueueplayer
播放。
我的问题是音频输出附加到音频输入,但播放时没有声音播放。还没有遇到任何现有的解决方案,如果有任何可用的指导,我们将不胜感激..
其次,我必须循环播放的 .AVPlayerItemDidPlayToEndTime
通知观察器也没有触发..
AVCaptureSession 设置
func setupSession() {
let session = AVCaptureSession()
session.sessionPreset = .medium
guard
let camera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front),
let mic = AVCaptureDevice.default(.builtInMicrophone, for: .audio, position: .unspecified),
let videoInput = try? AVCaptureDeviceInput(device: camera),
let audioInput = try? AVCaptureDeviceInput(device: mic),
session.canAddInput(videoInput), session.canAddInput(audioInput) else { return }
let videoOutput = AVCaptureVideoDataOutput()
let audioOutput = AVCaptureAudioDataOutput()
guard session.canAddOutput(videoOutput), session.canAddOutput(audioOutput) else { return }
let queue = DispatchQueue(label: "recordingQueue", qos: .userInteractive)
videoOutput.setSampleBufferDelegate(self, queue: queue)
audioOutput.setSampleBufferDelegate(self, queue: queue)
session.beginConfiguration()
session.addInput(videoInput)
session.addInput(audioInput)
session.addOutput(videoOutput)
session.addOutput(audioOutput)
session.commitConfiguration()
if let connection = videoOutput.connection(with: AVMediaType.video) {
if connection.isVideoStabilizationSupported { connection.preferredVideoStabilizationMode = .auto }
connection.isVideoMirrored = true
connection.videoOrientation = .portrait
}
_videoOutput = videoOutput
_audioOutput = audioOutput
_captureSession = session
DispatchQueue.global(qos: .default).async { session.startRunning() }
}
AVAssetWriter 设置 + didOutput 委托
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let timestamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer).seconds
if output == _videoOutput {
if connection.isVideoOrientationSupported { connection.videoOrientation = .portrait }
guard let cvImageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
let ciImage = CIImage(cvImageBuffer: cvImageBuffer)
guard let filteredCIImage = applyFilters(inputImage: ciImage) else { return }
self.ciImage = filteredCIImage
guard let cvPixelBuffer = getCVPixelBuffer(from: filteredCIImage) else { return }
self.cvPixelBuffer = cvPixelBuffer
self.ciContext.render(filteredCIImage, to: cvPixelBuffer, bounds: filteredCIImage.extent, colorSpace: CGColorSpaceCreateDeviceRGB())
metalView.draw()
}
switch _captureState {
case .start:
guard let outputUrl = tempURL else { return }
let writer = try! AVAssetWriter(outputURL: outputUrl, fileType: .mp4)
let videoSettings = _videoOutput!.recommendedVideoSettingsForAssetWriter(writingTo: .mp4)
let videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings)
videoInput.mediaTimeScale = CMTimeScale(bitPattern: 600)
videoInput.expectsMediaDataInRealTime = true
let pixelBufferAttributes = [
kCVPixelBufferCGImageCompatibilityKey: NSNumber(value: true),
kCVPixelBufferCGBitmapContextCompatibilityKey: NSNumber(value: true),
kCVPixelBufferPixelFormatTypeKey: NSNumber(value: Int32(kCVPixelFormatType_32ARGB))
] as [String:Any]
let adapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoInput, sourcePixelBufferAttributes: pixelBufferAttributes)
if writer.canAdd(videoInput) { writer.add(videoInput) }
let audioSettings = _audioOutput!.recommendedAudioSettingsForAssetWriter(writingTo: .mp4) as? [String:Any]
let audioInput = AVAssetWriterInput(mediaType: .audio, outputSettings: audioSettings)
audioInput.expectsMediaDataInRealTime = true
if writer.canAdd(audioInput) { writer.add(audioInput) }
_filename = outputUrl.absoluteString
_assetWriter = writer
_assetWriterVideoInput = videoInput
_assetWriterAudioInput = audioInput
_adapter = adapter
_captureState = .capturing
_time = timestamp
writer.startWriting()
writer.startSession(atSourceTime: .zero)
case .capturing:
if output == _videoOutput {
if _assetWriterVideoInput?.isReadyForMoreMediaData == true {
let time = CMTime(seconds: timestamp - _time, preferredTimescale: CMTimeScale(600))
_adapter?.append(self.cvPixelBuffer, withPresentationTime: time)
}
} else if output == _audioOutput {
if _assetWriterAudioInput?.isReadyForMoreMediaData == true {
_assetWriterAudioInput?.append(sampleBuffer)
}
}
break
case .end:
guard _assetWriterVideoInput?.isReadyForMoreMediaData == true, _assetWriter!.status != .failed else { break }
_assetWriterVideoInput?.markAsFinished()
_assetWriterAudioInput?.markAsFinished()
_assetWriter?.finishWriting { [weak self] in
guard let output = self?._assetWriter?.outputURL else { return }
self?._captureState = .idle
self?._assetWriter = nil
self?._assetWriterVideoInput = nil
self?._assetWriterAudioInput = nil
self?.previewRecordedVideo(with: output)
}
default:
break
}
}
从您遇到的第一个音频或视频样本缓冲区的演示时间戳开始您的时间线:
writer.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
之前您从零开始时间线,但捕获的样本缓冲区具有时间戳,通常似乎与自系统启动以来经过的时间量相关,因此在文件“开始”( sourceTime
for AVAssetWriter
) 以及视频和音频出现时。
你的问题并没有说你不看视频,而且我希望一些视频播放器能够跳过一大堆内容直到您的样本开始的时间线,但无论如何文件都是错误的。