AVAssetWriter - 捕获视频但没有音频

AVAssetWriter - Capturing video but no audio

我正在制作一个录制视频的应用程序。到目前为止,我已经能够使用 AVCaptureMovieFileOutput 成功录制视频和音频,但是,我现在需要实时编辑视频帧以将一些数据叠加到视频上。我开始切换到 AVAssetWriter.

切换后,我可以使用 AVCaptureVideoDataOutput 录制视频(使用我的叠加层),但是,AVCaptureAudioDataOutput 从不调用委托方法,所以我的音频不会录制。

这是我设置 AVCaptureSession 的方式:

    fileprivate func setupCamera() {

        //Set queues
        queue = DispatchQueue(label: "myqueue", qos: .utility, attributes: .concurrent, autoreleaseFrequency: DispatchQueue.AutoreleaseFrequency.inherit, target: DispatchQueue.global())


        //The size of output video will be 720x1280
        print("Established AVCaptureSession")
        cameraSession.sessionPreset = AVCaptureSession.Preset.hd1280x720

        //Setup your camera
        //Detect which type of camera should be used via `isUsingFrontFacingCamera`
        let videoDevice: AVCaptureDevice
        videoDevice = AVCaptureDevice.default(AVCaptureDevice.DeviceType.builtInWideAngleCamera, for: AVMediaType.video, position: AVCaptureDevice.Position.front)!
        print("Created AVCaptureDeviceInput: video")

        //Setup your microphone
        var audioDevice: AVCaptureDevice
        //audioDevice = AVCaptureDevice.default(for: AVMediaType.audio)!
        audioDevice = AVCaptureDevice.default(AVCaptureDevice.DeviceType.builtInMicrophone, for: AVMediaType.audio, position: AVCaptureDevice.Position.unspecified)!
        print("Created AVCaptureDeviceInput: audio")


        do {
            cameraSession.beginConfiguration()
            cameraSession.automaticallyConfiguresApplicationAudioSession = false
            cameraSession.usesApplicationAudioSession = true


            // Add camera to your session
            let videoInput = try AVCaptureDeviceInput(device: videoDevice)
            if cameraSession.canAddInput(videoInput) {
                cameraSession.addInput(videoInput)
                print("Added AVCaptureDeviceInput: video")
            } else
            {
                print("Could not add VIDEO!!!")
            }

            // Add microphone to your session
            let audioInput = try AVCaptureDeviceInput(device: audioDevice)
            if cameraSession.canAddInput(audioInput) {
                cameraSession.addInput(audioInput)
                print("Added AVCaptureDeviceInput: audio")
            } else
            {
                print("Could not add MIC!!!")
            }


            //Define your video output
            videoDataOutput.videoSettings = [
                kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA,
            ]
            videoDataOutput.alwaysDiscardsLateVideoFrames = true
            if cameraSession.canAddOutput(videoDataOutput) {
                videoDataOutput.setSampleBufferDelegate(self, queue: queue)
                cameraSession.addOutput(videoDataOutput)
                print("Added AVCaptureDataOutput: video")
            }


            //Define your audio output
            if cameraSession.canAddOutput(audioDataOutput) {
                audioDataOutput.setSampleBufferDelegate(self, queue: queue)
                cameraSession.addOutput(audioDataOutput)
                print("Added AVCaptureDataOutput: audio")
            }


            //Set up the AVAssetWriter (to write to file)
            do {
                videoWriter = try AVAssetWriter(outputURL: getURL()!, fileType: AVFileType.mp4)
                print("Setup AVAssetWriter")


                //Video Settings
                let videoSettings: [String : Any] = [
                    AVVideoCodecKey  : AVVideoCodecType.h264,
                    AVVideoWidthKey  : 720,
                    AVVideoHeightKey : 1280,
                    ]
                videoWriterVideoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoSettings)
                videoWriterVideoInput?.expectsMediaDataInRealTime = true;
                print("Setup AVAssetWriterInput: Video")
                if (videoWriter?.canAdd(videoWriterVideoInput!))!
                {
                    videoWriter?.add(videoWriterVideoInput!)
                    print("Added AVAssetWriterInput: Video")
                } else{
                    print("Could not add VideoWriterInput to VideoWriter")
                }


                // Add the audio input

                //Audio Settings
                let audioSettings : [String : Any] = [
                    AVFormatIDKey : kAudioFormatMPEG4AAC,
                    AVSampleRateKey : 44100,
                    AVEncoderBitRateKey : 64000,
                    AVNumberOfChannelsKey: 1
                ]
                videoWriterAudioInput = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: audioSettings)
                videoWriterAudioInput?.expectsMediaDataInRealTime = true;
                print("Setup AVAssetWriterInput: Audio")
                if (videoWriter?.canAdd(videoWriterAudioInput!))!
                {
                    videoWriter?.add(videoWriterAudioInput!)
                    print("Added AVAssetWriterInput: Audio")
                } else{
                    print("Could not add AudioWriterInput to VideoWriter")
                }
            }
            catch {
                print("ERROR")
                return
            }



            //PixelWriter
            videoWriterInputPixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterVideoInput!, sourcePixelBufferAttributes: [
                kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA,
                kCVPixelBufferWidthKey as String: 1280,
                kCVPixelBufferHeightKey as String: 768,
                kCVPixelFormatOpenGLESCompatibility as String: true,
                ])
            print("Created AVAssetWriterInputPixelBufferAdaptor")


            //Present the preview of video
            previewLayer = AVCaptureVideoPreviewLayer(session: cameraSession)
            previewLayer.position = CGPoint.init(x: CGFloat(self.view.frame.width/2), y: CGFloat(self.view.frame.height/2))
            previewLayer.bounds = self.view.bounds
            previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
            cameraView.layer.addSublayer(previewLayer)
            print("Created AVCaptureVideoPreviewLayer")

            //Don't forget start running your session
            //this doesn't mean start record!
            cameraSession.commitConfiguration()
            cameraSession.startRunning()
        }
        catch let error {
            debugPrint(error.localizedDescription)
        }
    }

开始录制:

    func startRecording()
    {
        print("Begin Recording...")
        let recordingClock = self.cameraSession.masterClock
        isRecording = true
        videoWriter?.startWriting()
        videoWriter?.startSession(atSourceTime: CMClockGetTime(recordingClock!))


    }

停止录制:

    func stopRecording()
    {

        if (videoWriter?.status.rawValue == 1) {
            videoWriterVideoInput?.markAsFinished()
            videoWriterAudioInput?.markAsFinished()
            print("video finished")
            print("audio finished")
        }else{
            print("not writing")
        }

        self.videoWriter?.finishWriting(){
            self.isRecording = false
            print("finished writing")
            DispatchQueue.main.async{
                if self.videoWriter?.status == AVAssetWriterStatus.failed {
                    print("status: failed")
                }else if self.videoWriter?.status == AVAssetWriterStatus.completed{
                    print("status: completed")
                }else if self.videoWriter?.status == AVAssetWriterStatus.cancelled{
                    print("status: cancelled")
                }else{
                    print("status: unknown")
                }

                if let e=self.videoWriter?.error{
                    print("stop record error:", e)
                }
            }

        }

        print("Stop Recording!")

    }

这是委托方法,它被调用用于视频,但不用于音频:

    func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {

        autoreleasepool {

                guard captureOutput != nil,
                    sampleBuffer != nil,
                    connection != nil,
                    CMSampleBufferDataIsReady(sampleBuffer) else { return }


                guard CMSampleBufferDataIsReady(sampleBuffer) else{
                    return
                }

                if (connection.isVideoOrientationSupported) {
                    connection.videoOrientation = currentVideoOrientation()
                } else
                {
                    return
                }

                if (connection.isVideoStabilizationSupported) {
                    //connection.preferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.auto
                }


                if !self.isRecording
                {
                    return
                }


                var string = ""
                if let audio = self.videoWriterAudioInput
                {
                    if connection.audioChannels.count > 0
                    {
                        //EXECUTION NEVER REACHES HERE
                        if audio.isReadyForMoreMediaData
                        {
                            queue!.async() {
                                audio.append(sampleBuffer)
                            }
                            return
                        }
                    }
                }
                print ("\(string)")


                if let camera = self.videoWriterVideoInput, camera.isReadyForMoreMediaData {

                    //This is getting called!!!

                    queue!.async() {
                        self.videoWriterInputPixelBufferAdaptor.append(self.imageToBuffer(from: image!)!, withPresentationTime: timestamp)


                    }




                }
        }//End autoreleasepool


    }




}

我确定问题不在于我的设备或输入,因为我能够使用 AVCaptureMovieFileOutput 成功录制视频和音频。我也阅读了其他相关帖子但没有运气:

VAssetWriter audio with video together

为此我扯了好几天头发。我的错误很简单——委托方法被调用,但在我到达音频语句之前被返回。这些是需要在我的代码的音频处理部分之后移动到的罪魁祸首:

            if (connection.isVideoOrientationSupported) {
                connection.videoOrientation = currentVideoOrientation()
            } else
            {
                return
            }

            if (connection.isVideoStabilizationSupported) {
                //connection.preferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.auto
            }