ios语音识别错误域=kAFAssistantErrorDomain代码=216“(空)”

ios speech recognition Error Domain=kAFAssistantErrorDomain Code=216 "(null)"

基本上我正在学习 ios 语音识别模块,遵循本教程: https://medium.com/ios-os-x-development/speech-recognition-with-swift-in-ios-10-50d5f4e59c48

但是当我在我的 iphone6 上测试它时,我总是得到这个错误: 错误域=kAFAssistantErrorDomain 代码=216“(空)”

我在网上搜索过,但是很少找到关于这个的信息。

这是我的代码:

//
//  ViewController.swift
//  speech_sample
//
//  Created by Peizheng Ma on 6/22/17.
//  Copyright © 2017 Peizheng Ma. All rights reserved.
//

import UIKit
import AVFoundation
import Speech

class ViewController: UIViewController, SFSpeechRecognizerDelegate {

//MARK: speech recognize variables
let audioEngine = AVAudioEngine()
let speechRecognizer: SFSpeechRecognizer? = SFSpeechRecognizer(locale: Locale.init(identifier: "en-US"))
var request = SFSpeechAudioBufferRecognitionRequest()
var recognitionTask: SFSpeechRecognitionTask?
var isRecording = false

override func viewDidLoad() {
    // super.viewDidLoad()
    // get Authorization
    self.requestSpeechAuthorization()
}

override func didReceiveMemoryWarning() {
    super.didReceiveMemoryWarning()
    // Dispose of any resources that can be recreated.
}

//MARK: properties
@IBOutlet weak var detectText: UILabel!
@IBOutlet weak var startButton: UIButton!

//MARK: actions
@IBAction func startButtonTapped(_ sender: UIButton) {
    if isRecording == true {


        audioEngine.stop()
//            if let node = audioEngine.inputNode {
//                node.removeTap(onBus: 0)
//            }
        audioEngine.inputNode?.removeTap(onBus: 0)
        // Indicate that the audio source is finished and no more audio will be appended
        self.request.endAudio()

        // Cancel the previous task if it's running
        if let recognitionTask = recognitionTask {
            recognitionTask.cancel()
            self.recognitionTask = nil
        }


        //recognitionTask?.cancel()
        //self.recognitionTask = nil
        isRecording = false
        startButton.backgroundColor = UIColor.gray
    } else {
        self.recordAndRecognizeSpeech()
        isRecording = true
        startButton.backgroundColor = UIColor.red
    }
}

//MARK: show alert
func showAlert(title: String, message: String, handler: ((UIAlertAction) -> Swift.Void)? = nil) {
    DispatchQueue.main.async { [unowned self] in
        let alertController = UIAlertController(title: title, message: message, preferredStyle: .alert)
        alertController.addAction(UIAlertAction(title: "OK", style: .cancel, handler: handler))
        self.present(alertController, animated: true, completion: nil)
    }
}

//MARK: Recognize Speech
func recordAndRecognizeSpeech() {
    // Setup Audio Session
    guard let node = audioEngine.inputNode else { return }
    let recordingFormat = node.outputFormat(forBus: 0)
    node.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { buffer, _ in
        self.request.append(buffer)
    }
    audioEngine.prepare()
    do {
        try audioEngine.start()
    } catch {
        self.showAlert(title: "SpeechNote", message: "There has been an audio engine error.", handler: nil)
        return print(error)
    }
    guard let myRecognizer = SFSpeechRecognizer() else {
        self.showAlert(title: "SpeechNote", message: "Speech recognition is not supported for your current locale.", handler: nil)
        return
    }
    if !myRecognizer.isAvailable {
        self.showAlert(title: "SpeechNote", message: "Speech recognition is not currently available. Check back at a later time.", handler: nil)
        // Recognizer is not available right now
        return
    }
    recognitionTask = speechRecognizer?.recognitionTask(with: request, resultHandler: { result, error in
        if let result = result {

            let bestString = result.bestTranscription.formattedString
            self.detectText.text = bestString

//                var lastString: String = ""
//                for segment in result.bestTranscription.segments {
//                    let indexTo = bestString.index(bestString.startIndex, offsetBy: segment.substringRange.location)
//                    lastString = bestString.substring(from: indexTo)
//                }
//                self.checkForColorsSaid(resultString: lastString)
        } else if let error = error {
            self.showAlert(title: "SpeechNote", message: "There has been a speech recognition error.", handler: nil)
            print(error)
        }
    })
}

//MARK: - Check Authorization Status
func requestSpeechAuthorization() {
    SFSpeechRecognizer.requestAuthorization { authStatus in
        OperationQueue.main.addOperation {
            switch authStatus {
            case .authorized:
                self.startButton.isEnabled = true
            case .denied:
                self.startButton.isEnabled = false
                self.detectText.text = "User denied access to speech recognition"
            case .restricted:
                self.startButton.isEnabled = false
                self.detectText.text = "Speech recognition restricted on this device"
            case .notDetermined:
                self.startButton.isEnabled = false
                self.detectText.text = "Speech recognition not yet authorized"
            }
        }
    }
}


}

非常感谢。

嘿,我遇到了同样的错误,但现在它绝对有效fine.hope这段代码对你也有帮助:)。

import UIKit
import Speech

class SpeechVC: UIViewController {

@IBOutlet weak var slabel: UILabel!
@IBOutlet weak var sbutton: UIButton!

let audioEngine = AVAudioEngine()
let SpeechRecognizer : SFSpeechRecognizer? = SFSpeechRecognizer()
let request = SFSpeechAudioBufferRecognitionRequest()
var recognitionTask:SFSpeechRecognitionTask?

var isRecording = false
override func viewDidLoad() {
    super.viewDidLoad()


    self.requestSpeechAuthorization()

    // Do any additional setup after loading the view, typically from a nib.
}
func recordAndRecognizeSpeech()
{
    guard let node = audioEngine.inputNode else { return }
    let recordingFormat = node.outputFormat(forBus: 0)
    node.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { buffer , _ in

        self.request.append(buffer)
    }
    audioEngine.prepare()
    do
    {
        try audioEngine.start()
    }catch
    {
        return print(error)
    }
    guard let myRecognizer = SFSpeechRecognizer() else {
        return
    }
    if !myRecognizer.isAvailable
    {
        return
    }
    recognitionTask = SpeechRecognizer?.recognitionTask(with: request, resultHandler: { result, error in

        if let result = result
        {
            let bestString = result.bestTranscription.formattedString
            self.slabel.text = bestString

            var lastString : String = ""
            for segment in result.bestTranscription.segments
            {
                let indexTo = bestString.index(bestString.startIndex, offsetBy: segment.substringRange.location)
                lastString = bestString.substring(from: indexTo)
            }

        }else if let error = error
        {
            print(error)
        }
    })
}


@IBAction func startAction(_ sender: Any) {
    if isRecording == true
    {
        audioEngine.stop()
        recognitionTask?.cancel()
        isRecording = false
        sbutton.backgroundColor = UIColor.gray
    }
    else{
        self.recordAndRecognizeSpeech()
        isRecording = true
        sbutton.backgroundColor = UIColor.red
    }

}
func cancelRecording()
{
    audioEngine.stop()
    if let node = audioEngine.inputNode
    {
        audioEngine.inputNode?.removeTap(onBus: 0)
    }
    recognitionTask?.cancel()

}


func requestSpeechAuthorization()
{
    SFSpeechRecognizer.requestAuthorization { authStatus in
        OperationQueue.main.addOperation {
            switch authStatus
            {
            case .authorized :
                self.sbutton.isEnabled = true
            case .denied :
                self.sbutton.isEnabled = false
                self.slabel.text = "User denied access to speech recognition"
            case .restricted :
                self.sbutton.isEnabled = false
                self.slabel.text = "Speech Recognition is restricted on this Device"
            case .notDetermined :
                self.sbutton.isEnabled = false
                self.slabel.text = "Speech Recognition not yet authorized"
            }
        }

    }
}
}

我在遵循相同的(优秀的)教程时遇到了同样的问题,即使使用 GitHub 上的示例代码也是如此。要解决它,我必须做两件事:

首先,在代码的开头添加request.endAudio()以在startButtonTapped 动作中停止录制。这标志着录音的结束。我看到您已经在示例代码中这样做了。

其次,在 recordAndRecognizeSpeech 函数中,当 'recognitionTask' 启动时,如果没有检测到语音,则 'result' 将为 nil 并触发错误情况。因此,我在尝试分配结果之前测试了 result != nil

因此,这两个函数的代码如下所示: 1. 更新了 startButtonTapped:

@IBAction func startButtonTapped(_ sender: UIButton) {
    if isRecording {

        request.endAudio() // Added line to mark end of recording
        audioEngine.stop()

        if let node = audioEngine.inputNode {
            node.removeTap(onBus: 0)
        }
        recognitionTask?.cancel()

        isRecording = false
        startButton.backgroundColor = UIColor.gray

    } else {

        self.recordAndRecognizeSpeech()
        isRecording = true
        startButton.backgroundColor = UIColor.red
    }
}

和 2. 在 recordAndRecognizeSpeech 内从 recognitionTask = ... 行更新:

    recognitionTask = speechRecognizer?.recognitionTask(with: request, resultHandler: { (result, error) in
        if result != nil { // check to see if result is empty (i.e. no speech found)
            if let result = result {
                let bestString = result.bestTranscription.formattedString
                self.detectedTextLabel.text = bestString

                var lastString: String = ""
                for segment in result.bestTranscription.segments {
                    let indexTo = bestString.index(bestString.startIndex, offsetBy: segment.substringRange.location)
                    lastString = bestString.substring(from: indexTo)
                }
                self.checkForColoursSaid(resultString: lastString)

            } else if let error = error {
                self.sendAlert(message: "There has been a speech recognition error")
                print(error)
            }
        }

    }) 

希望对你有所帮助。

这将防止两个错误:上面提到的 Code=216'SFSpeechAudioBufferRecognitionRequest cannot be re-used' 错误。

  1. 停止识别 finish 而不是 cancel

  2. 停止音频

像这样:

    // stop recognition
    recognitionTask?.finish()
    recognitionTask = nil

    // stop audio
    request.endAudio()
    audioEngine.stop()
    audioEngine.inputNode.removeTap(onBus: 0) // Remove tap on bus when stopping recording.

P.S。 audioEngine.inputNode 似乎不再是可选值,因此我使用了 no if let 构造。

我遇到这个错误是因为我是 运行 模拟器上的应用程序。 运行 在普通设备上解决了这个问题。