IBM Watson 语音转文本服务未在 Unity3d 中给出响应

IBM Watson Speech to Text Service is not giving response in Unity3d

我有一个 ExampleSstreaming class,实际上我是从 GitHub of IBM Watson SDK (speech to text service demo) 那里得到的。这是

public class ExampleStreaming : MonoBehaviour
{
  private int m_RecordingRoutine = 0;
  private string m_MicrophoneID = null;
  private AudioClip m_Recording = null;
  private int m_RecordingBufferSize = 5;
  private int m_RecordingHZ = 22050;

  private SpeechToText m_SpeechToText = new SpeechToText();

  void Start()
  {
    LogSystem.InstallDefaultReactors();
    Log.Debug("ExampleStreaming", "Start();");

    Active = true;
        Debug.Log("start");
    StartRecording();
  }

    public void Update() {
        Debug.Log(m_SpeechToText.IsListening);
    }

  public bool Active
  {
    get { return m_SpeechToText.IsListening; }
    set
    {
      if (value && !m_SpeechToText.IsListening)
      {
        m_SpeechToText.DetectSilence = true;
        m_SpeechToText.EnableWordConfidence = false;
        m_SpeechToText.EnableTimestamps = false;
        m_SpeechToText.SilenceThreshold = 0.03f;
        m_SpeechToText.MaxAlternatives = 1;
        m_SpeechToText.EnableContinousRecognition = true;
        m_SpeechToText.EnableInterimResults = true;
        m_SpeechToText.OnError = OnError;
        m_SpeechToText.StartListening(OnRecognize);
      }
      else if (!value && m_SpeechToText.IsListening)
      {
        m_SpeechToText.StopListening();
      }
    }
  }

  private void StartRecording()
  {
    if (m_RecordingRoutine == 0)
    {
            Debug.Log("m_RecordingRoutine");
            UnityObjectUtil.StartDestroyQueue();
      m_RecordingRoutine = Runnable.Run(RecordingHandler());
    }
  }

  private void StopRecording()
  {
    if (m_RecordingRoutine != 0)
    {
      Microphone.End(m_MicrophoneID);
      Runnable.Stop(m_RecordingRoutine);
      m_RecordingRoutine = 0;
    }
  }

  private void OnError(string error)
  {
    Active = false;

    Log.Debug("ExampleStreaming", "Error! {0}", error);
  }

  private IEnumerator RecordingHandler()
  {
    Log.Debug("ExampleStreaming", "devices: {0}", Microphone.devices);

        m_MicrophoneID = Microphone.devices[0];
        Debug.Log("m_MicrophoneID : " + m_MicrophoneID);
        m_Recording = Microphone.Start(m_MicrophoneID, true, m_RecordingBufferSize, m_RecordingHZ);
    yield return null;      // let m_RecordingRoutine get set..
        Debug.Log("m_Recording : " + m_Recording.length);
        if (m_Recording == null)
    {
            Debug.Log("m_Recording is null");
            StopRecording();
      yield break;
    }

    bool bFirstBlock = true;
    int midPoint = m_Recording.samples / 2;
    float[] samples = null;

    while (m_RecordingRoutine != 0 && m_Recording != null)
    {
      int writePos = Microphone.GetPosition(m_MicrophoneID);
      if (writePos > m_Recording.samples || !Microphone.IsRecording(m_MicrophoneID))
      {
        Log.Error("MicrophoneWidget", "Microphone disconnected.");

        StopRecording();
        yield break;
      }

      if ((bFirstBlock && writePos >= midPoint)
        || (!bFirstBlock && writePos < midPoint))
      {
        // front block is recorded, make a RecordClip and pass it onto our callback.
        samples = new float[midPoint];
        m_Recording.GetData(samples, bFirstBlock ? 0 : midPoint);

        AudioData record = new AudioData();
        record.MaxLevel = Mathf.Max(samples);
        record.Clip = AudioClip.Create("Recording", midPoint, m_Recording.channels, m_RecordingHZ, false);
        record.Clip.SetData(samples, 0);

        m_SpeechToText.OnListen(record);

        bFirstBlock = !bFirstBlock;
      }
      else
      {
        // calculate the number of samples remaining until we ready for a block of audio, 
        // and wait that amount of time it will take to record.
        int remaining = bFirstBlock ? (midPoint - writePos) : (m_Recording.samples - writePos);
        float timeRemaining = (float)remaining / (float)m_RecordingHZ;

        yield return new WaitForSeconds(timeRemaining);
      }

    }

    yield break;
  }

  private void OnRecognize(SpeechRecognitionEvent result)
  {
        Debug.Log("OnRecognize");
        if (result != null && result.results.Length > 0)
    {
      foreach (var res in result.results)
      {
        foreach (var alt in res.alternatives)
        {
          string text = alt.transcript;
                    Debug.Log(text);

          Log.Debug("ExampleStreaming", string.Format("{0} ({1}, {2:0.00})\n", text, res.final ? "Final" : "Interim", alt.confidence));
        }
      }
    }
  }
}

这是我为获取麦克风添加的行。我只是编辑它以在零索引处提供麦克风设备,这实际上是空的(我不知道为什么,这是故意留下的还是错误),在函数 RecordingHandler.

 m_MicrophoneID = Microphone.devices[0];

但不幸的是,它没有在 EventOnRecognize 中显示任何输出日志,我认为它应该执行。

它会在几秒钟后显示这些日志(因为我给出了音频的长度 5)。我做错了什么,我无法理解 语音到文本的方式

[DEBUG] OnListenClosed(), State = DISCONNECTED 
[DEBUG] KeepAlive exited.

我也尝试过 IBM Watson Speech To text Scene 它也没有显示任何内容。

我还不能实时流输出,但可以通过 watson 服务将音频剪辑转换为文本,这是简单的代码(花了三天时间)。

using UnityEngine;
using System.Collections;
using IBM.Watson.DeveloperCloud.Services.SpeechToText.v1;

public class AudioClipToTextWatson : MonoBehaviour {
    // Non-streaming
    SpeechToText m_SpeechToText = new SpeechToText();
    public AudioClip m_AudioClip = new AudioClip();
    public bool on = false;

    void Start () {
        m_AudioClip = Microphone.Start(Microphone.devices[0], false, 4, 44100);

            m_SpeechToText.Recognize(m_AudioClip, OnRecognize);
            //  Streaming
            m_SpeechToText.StartListening(OnRecognize);
            //  Stop listening
            m_SpeechToText.StopListening();
    }


    private void OnRecognize(SpeechRecognitionEvent result)
    {
        Debug.Log("result : " + result);
        if (result != null && result.results.Length > 0)
        {
            foreach (var res in result.results)
            {
                foreach (var alt in res.alternatives)
                {
                    string text = alt.transcript;
                    Debug.Log(text);
                    Debug.Log(res.final);
                }
            }
        }
    }

}

注意:您可以使用麦克风录制音频剪辑并将其转换为文本。如果您已经有了音频,请将其放到检查器中并注释掉 Start Event.

中的第一行

我解决了错误

我在使用 Unity 2018.3.14f1 时遇到了同样的问题。 我只是更改了播放器设置然后就可以正常工作了

文件 -> 构建设置 -> 播放器设置 -> 其他设置

配置

  • 脚本运行时版本:.Net 4x 等价物
  • API 兼容级别:.Net 4x