SpeechRecognition:循环遍历一组问题;在问下一个问题之前等待每个 'oral' 个回复

SpeechRecognition: Looping through a set of questions; waiting for each 'oral' response before asking next question

我有一份包含 6 个问题的问卷。这些是通过 speechSynthesis 呈现的。在每个问题之后,我需要等待我将处理的口头答复,然后再提出下一个问题。我的代码是对它的尝试。代码确实通过回调。但是,如何顺序处理逻辑,'state question'、'listen'、'state next question'、'listen'...


//..ToDo: Because we need verbal response for each question,
//..   we need to change the recognition.onResult call back

function processPromptedInteraction(event)
{
    var speechToText = event.results[0][0].transcript;
    if (speechToText.includes('yes'))
    {    }
    else if (speechToText.includes('no'))
    {    }
    else
    {    }
}

var strQuestion = '';
for (i = 0; i < questions[i].length; i++) 
{
    recognition.onresult = processPromptedInteraction; //.. Callback function
    strQuestion = questions[i].question;
    say(strQuestion);
}

events 是异步的,因此您的代码不会等待用户一一回答问题。希望以下解决方案对您有用。如果我遗漏了什么,请告诉我。

注意: 出于浏览器安全考虑,代码可能不会 运行 这里。使用下面的 jsfiddle link 来 运行 代码。最终结果见浏览器控制台

https://jsfiddle.net/ajai/L8p4aqtr/

class Questions {
  constructor(questions) {
    this.questions = questions;
    this.currentIndex = 0;
    this.MAX = this.questions.length - 1;

    // answers hash
    this.answers = questions.reduce((hash, q) => {
      hash[q] = '';
      return hash;
    }, {});

    this.initSpeech();
  }

  initSpeech() {
    const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;

    this.speechSynthesis = window.speechSynthesis;

    this.recognition = new webkitSpeechRecognition();

    this.recognition.continuous = true;
    this.recognition.interimResults = false;

    this.recognition.onresult = this.recognize.bind(this);
  }

  recognize(event) {
    const last = event.results.length - 1;
    const result = event.results[last][0].transcript;

    if (result.includes('yes')) {
      this.setAnswer('Yes');
      this.next();
    } else if (result.includes('no')) {
      this.setAnswer('No');
      this.next();
    } else {
      // ask same question again
      this.say('Can\'t recognize your answer');
      this.ask();
    }
  }

  setAnswer(answer) {
    this.answers[this.questions[this.currentIndex]] = answer;
  }

  start() {
    this.currentIndex = 0;
    this.recognition.start();

    this.ask();

    return this;
  }

  stop() {
    this.recognition.stop();

    this.onComplete && this.onComplete(this.answers);
  }

  ask() {
    const questionToAsk = this.questions[this.currentIndex];
    this.say(questionToAsk);
  }

  say(msg) {
    const synth = new SpeechSynthesisUtterance(msg);
    this.speechSynthesis.speak(synth);
  }

  next() {
    if (this.currentIndex < this.MAX) {
      this.currentIndex++;
      this.ask();
    } else {
      this.stop();
    }
  }

  getAnswers() {
    return this.answers;
  }

  static create(questions) {
    return new Questions(questions);
  }
}

// const q = new Questions(['Question 1?', 'Question 2?', 'Question 3?']);
const q = Questions.create(['Question 1?', 'Question 2?', 'Question 3?']);

q.start().onComplete = function(result) {
  console.log(this.answers);
};