启动音频队列失败 ≥˚˛ˇ

Failure starting audio queue ≥˚˛ˇ

我正在尝试创建一个应用程序,当用户不知道时,它会说一段会改变每个级别的文本。目标是说出一句话显示(是的,它是为孩子们制作的):

@IBAction func dontknow(_ sender: Any) {
    let utterance = AVSpeechUtterance(string: textLabel.text)
    utterance.voice = AVSpeechSynthesisVoice(language: "fr-FR")
    utterance.rate = 0.4

    let synthesizer = AVSpeechSynthesizer()
    synthesizer.speak(utterance)

}

应用程序的构造如下: 如果用户不知道 -> 他可以点击按钮说出文本 else 如果他是对的 -> 他进入下一个级别。

他第一次输入要说的按钮时,应用会说出一些内容,但是当用户尝试说出文本并在下一个级别单击要说的按钮时,没有任何反应。它只是抛出这个错误:Failure starting audio queue ≥˚˛ˇ

完整代码:

import UIKit
import AVFoundation
import Speech

class ReadViewController: UIViewController, SFSpeechRecognizerDelegate {
    var texts = ["Je mange des pâtes", "Bonjour Jean comment vas-tu", "Qui est-ce", "J'en ai marre", "Je ne te trouve pas gentil", "Pourquoi tu ne veux pas","Tu es si gentil", "Tu es beau", "Dans combien de temps", "Tu as fait de beaux rêves", "Cette application est une révolution"];
    var text = ""
    var transcriptedText = "";
    var effect:UIVisualEffect!




    @IBOutlet weak var dontknowButton: UIButton!
    @IBOutlet weak var listenButton: UIButton!
    @IBOutlet weak var visualEffectView: UIVisualEffectView!
    @IBOutlet var alertView: UIView!
    @IBOutlet weak var regameButton: UIButton!
    @IBOutlet weak var textView: UILabel!
    @IBOutlet weak var textLabel: UILabel!
    @IBOutlet weak var microphoneButton: UIButton!
    @IBOutlet weak var transci: UILabel!


    private let speechRecognizer = SFSpeechRecognizer(locale: Locale.init(identifier: "fr-FR"))  //1
    private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
    private var recognitionTask: SFSpeechRecognitionTask?
    private let audioEngine = AVAudioEngine()
    var recordingSession: AVAudioSession!
    var player: AVAudioPlayer!

    override func viewDidLoad() {
        super.viewDidLoad()
        listenButton.layer.cornerRadius = 10
        dontknowButton.layer.cornerRadius = dontknowButton.frame.width / 2
        self.restart()
        // Do any additional setup after loading the view.




        effect = visualEffectView.effect
        visualEffectView.layer.opacity = 0
        visualEffectView.effect = nil
        regameButton.layer.cornerRadius = 10


        microphoneButton.layer.cornerRadius = 10

        microphoneButton.isEnabled = false  //2

        speechRecognizer?.delegate = self  //3


        SFSpeechRecognizer.requestAuthorization { (authStatus) in  //4

            var isButtonEnabled = false

            switch authStatus {  //5
            case .authorized:
                isButtonEnabled = true

            case .denied:
                isButtonEnabled = false
                self.alert()

            case .restricted:
                isButtonEnabled = false
                self.alert()

            case .notDetermined:
                isButtonEnabled = false
                print("Speech recognition not yet authorized")
            }

            OperationQueue.main.addOperation() {
                self.microphoneButton.isEnabled = isButtonEnabled
            }


            //            self.effect = self.visualEffectView.effect
            //            self.visualEffectView.effect = nil


        }

    }
    func alert () {
        let alertController = UIAlertController (title: "Désolé", message: "Pour le bon fonctionnement de l'application, vous devez activer la reconnaissance vocale dans les réglages.", preferredStyle: .alert)

        let settingsAction = UIAlertAction(title: "Réglages", style: .default) { (_) -> Void in
            guard let settingsUrl = URL(string: UIApplicationOpenSettingsURLString) else {
                return
            }

            if UIApplication.shared.canOpenURL(settingsUrl) {
                UIApplication.shared.open(settingsUrl, completionHandler: { (success) in
                    print("Settings opened: \(success)") // Prints true
                })
            }
        }
        alertController.addAction(settingsAction)


        present(alertController, animated: true, completion: nil)
    }
    func restart() {

        var randomNumber = random(0..<(texts.count))
        text = texts[randomNumber]

        textLabel.text = "\(text)"

    }



    func startRecording() {

        if recognitionTask != nil {
            recognitionTask?.cancel()
            recognitionTask = nil
        }

        let audioSession = AVAudioSession.sharedInstance()
        do {
            try audioSession.setCategory(AVAudioSessionCategoryRecord)
            try audioSession.setMode(AVAudioSessionModeMeasurement)
            try audioSession.setActive(true, with: .notifyOthersOnDeactivation)
        } catch {
            print("audioSession properties weren't set because of an error.")
        }

        recognitionRequest = SFSpeechAudioBufferRecognitionRequest()

        var inputNode = audioEngine.inputNode

        guard let recognitionRequest = recognitionRequest else {
            fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")
        }

        recognitionRequest.shouldReportPartialResults = true

        recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in

            var isFinal = false

            if result != nil {

                self.transcriptedText = (result?.bestTranscription.formattedString)!
                self.transci.text = self.transcriptedText



                isFinal = (result?.isFinal)!


            }

            if error != nil || isFinal {
                self.audioEngine.stop()
                inputNode.removeTap(onBus: 0)

                self.recognitionRequest = nil
                self.recognitionTask = nil
                self.transci.text = ""

                self.microphoneButton.isEnabled = true
            }
        })

        let recordingFormat = inputNode.outputFormat(forBus: 0)
        inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
            self.recognitionRequest?.append(buffer)
        }

        audioEngine.prepare()

        do {
            try audioEngine.start()
        } catch {
            print("audioEngine couldn't start because of an error.")
        }


    }


    func speechRecognizer(_ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool) {
        if available {
            microphoneButton.isEnabled = true
        } else {
            microphoneButton.isEnabled = false
        }
    }





    override func didReceiveMemoryWarning() {
        super.didReceiveMemoryWarning()
        // Dispose of any resources that can be recreated.
    }




    func random(_ range:Range<Int>) -> Int
    {
        return range.lowerBound + Int(arc4random_uniform(UInt32(range.upperBound - range.lowerBound)))
    }


    @IBAction func start(_ sender: Any) {

        if audioEngine.isRunning {
            if self.transcriptedText == self.text {
                // When user won
                self.restart()
            }
            else {
                // When user loses

                animateIn()

            }
            audioEngine.stop()
            recognitionRequest?.endAudio()
            microphoneButton.isEnabled = false
            microphoneButton.setTitle("Commencer", for: .normal)

        } else {
            startRecording()
            microphoneButton.setTitle("Arrêter", for: .normal)
        }

    }
    @IBAction func listen(_ sender: Any) {
        let utterance = AVSpeechUtterance(string: "wesh")
        utterance.voice = AVSpeechSynthesisVoice(language: "fr-FR")!
        utterance.rate = 0.4

        let synthesizer = AVSpeechSynthesizer()
        synthesizer.speak(utterance)
    }
    @IBAction func reGameOn(_ sender: Any) {
        animateOut()
    }

    @IBAction func dontknow(_ sender: Any) {
        var randomNumber = random(0..<(texts.count))
        var tet = texts[randomNumber]
        let utterance = AVSpeechUtterance(string: textLabel.text)
        utterance.voice = AVSpeechSynthesisVoice(language: "fr-FR")!
        utterance.rate = 0.4

        let synthesizer = AVSpeechSynthesizer()
        synthesizer.speak(utterance)
        print(synthesizer.isSpeaking)
        print(synthesizer.isPaused)

    }



    func animateIn() {
        self.view.addSubview(alertView)
        alertView.center = self.view.center

        alertView.transform = CGAffineTransform.init(scaleX: 1.3, y: 1.3)
        alertView.alpha = 0

        UIView.animate(withDuration: 0.4) {
            self.alertView.layer.cornerRadius = 10
            self.visualEffectView.layer.opacity = 1
            self.visualEffectView.effect = self.effect
            self.alertView.alpha = 1
            self.alertView.transform = CGAffineTransform.identity
        }

    }


    func animateOut () {
        UIView.animate(withDuration: 0.3, animations: {
            self.alertView.layer.cornerRadius = 0
            self.alertView.transform = CGAffineTransform.init(scaleX: 1.3, y: 1.3)
            self.alertView.alpha = 0

            self.visualEffectView.effect = nil
            self.visualEffectView.layer.opacity = 0
        }) { (success:Bool) in
            self.alertView.removeFromSuperview()
        }
    }
}

在@IBAction func dontKnow 尝试最后添加以下代码。这可能有效。

do{
      let _ = try AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayback,
                                                               with: .duckOthers)
  }catch{
      print(error)
  }