Microsoft 认知语音服务 - Android
Microsoft Cognitive Speech Service - Android
我正在 Android 应用程序上使用 Microsoft 认知服务 - 语音识别。当代码在我的主 activity 中时一切正常,但是当我想将与语音识别对应的部分移动到新的 class 时,它会抛出错误。
以下是代码示例:
主要activity
int m_waitSeconds = 0;
MicrophoneRecognitionClient micClient = null;
FinalResponseStatus isReceivedResponse = FinalResponseStatus.NotReceived;
Boolean speechIntent = false;
SpeechRecognitionMode speechMode = SpeechRecognitionMode.ShortPhrase;
public enum FinalResponseStatus { NotReceived, OK }
/**
* Gets the primary subscription key
*/
public String getPrimaryKey() {
return this.getString(R.string.primaryKey);
}
/**
* Gets the secondary subscription key
*/
public String getSecondaryKey() {
return this.getString(R.string.secondaryKey);
}
/**
* Gets the LUIS application identifier.
* @return The LUIS application identifier.
*/
private String getLuisAppId() {
return this.getString(R.string.luisAppID);
}
/**
* Gets the LUIS subscription identifier.
* @return The LUIS subscription identifier.
*/
private String getLuisSubscriptionID() {
return this.getString(R.string.luisSubscriptionID);
}
/**
* Gets the default locale.
* @return The default locale.
*/
private String getDefaultLocale() {
return "en-us";
}
/**
* Handles the Click event of the _startButton control.
*/
private void StartButton_Click(View v) {
this.m_waitSeconds = this.speechMode == SpeechRecognitionMode.ShortPhrase ? 20 : 200;
if (this.micClient == null) {
if (this.speechIntent) {
this.WriteLine("--- Start microphone dictation with Intent detection ----");
this.micClient = SpeechRecognitionServiceFactory.createMicrophoneClientWithIntent(
this,
this.getDefaultLocale(),
this,
this.getPrimaryKey(),
this.getSecondaryKey(),
this.getLuisAppId(),
this.getLuisSubscriptionID());
}
else
{
this.micClient = SpeechRecognitionServiceFactory.createMicrophoneClient(
this,
this.speechMode,
this.getDefaultLocale(),
this,
this.getPrimaryKey(),
this.getSecondaryKey());
}
}
this.micClient.startMicAndRecognition();
}
public void onFinalResponseReceived(final RecognitionResult response) {
boolean isFinalDicationMessage = this.speechMode == SpeechRecognitionMode.LongDictation &&
(response.RecognitionStatus == RecognitionStatus.EndOfDictation ||
response.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout);
if (null != this.micClient && ((this.speechMode == SpeechRecognitionMode.ShortPhrase) || isFinalDicationMessage)) {
// we got the final result, so it we can end the mic reco. No need to do this
// for dataReco, since we already called endAudio() on it as soon as we were done
// sending all the data.
this.micClient.endMicAndRecognition();
}
if (isFinalDicationMessage) {
this.isReceivedResponse = FinalResponseStatus.OK;
}
Confidence cMax = Confidence.Low;
int iMax = 0;
if (!isFinalDicationMessage && response.Results.length != 0) {
for (int i = 0; i < response.Results.length; i++) {
//get the text with highest confidence:
if(response.Results[i].Confidence.getValue() > cMax.getValue()){
cMax = response.Results[i].Confidence;
iMax = i;
}
}
this.WriteLine(response.Results[iMax].DisplayText);
}
}
/**
* Called when a final response is received and its intent is parsed
*/
public void onIntentReceived(final String payload) {
this.WriteLine("--- Intent received by onIntentReceived() ---");
this.WriteLine(payload);
this.WriteLine();
}
public void onPartialResponseReceived(final String response) {
this.WriteLine("--- Partial result received by onPartialResponseReceived() ---");
this.WriteLine(response);
this.WriteLine();
}
public void onError(final int errorCode, final String response) {
this.WriteLine("--- Error received by onError() ---");
this.WriteLine("Error code: " + SpeechClientStatus.fromInt(errorCode) + " " + errorCode);
this.WriteLine("Error text: " + response);
this.WriteLine();
}
/**
* Called when the microphone status has changed.
* @param recording The current recording state
*/
public void onAudioEvent(boolean recording) {
if (!recording) {
this.micClient.endMicAndRecognition();
}
}
/**
* Writes the line.
*/
private void WriteLine() {
this.WriteLine("");
}
/**
* Writes the line.
* @param text The line to write.
*/
private void WriteLine(String text) {
System.out.println(text);
}
在单独的class
public class SpeechRecognition implements ISpeechRecognitionServerEvents
{
int m_waitSeconds = 0;
private MicrophoneRecognitionClient micClient = null;
private FinalResponseStatus isReceivedResponse =FinalResponseStatus.NotReceived;
private Boolean speechIntent = false;
private SpeechRecognitionMode speechMode=SpeechRecognitionMode.ShortPhrase;
public enum FinalResponseStatus { NotReceived, OK }
/**
* Gets the primary subscription key
*/
public String getPrimaryKey() {
return Integer.toString(R.string.primaryKey);
}
/**
* Gets the secondary subscription key
*/
public String getSecondaryKey() {
return Integer.toString(R.string.secondaryKey);
}
/**
* Gets the LUIS application identifier.
* @return The LUIS application identifier.
*/
private String getLuisAppId() {
return Integer.toString(R.string.luisAppID);
}
/**
* Gets the LUIS subscription identifier.
* @return The LUIS subscription identifier.
*/
private String getLuisSubscriptionID() {
return Integer.toString(R.string.luisSubscriptionID);
}
/**
* Gets the default locale.
* @return The default locale.
*/
private String getDefaultLocale() {
return "en-us";
}
public void startSpeechRecognition() {
this.m_waitSeconds = this.speechMode == SpeechRecognitionMode.ShortPhrase ? 20 : 200;
if (this.micClient == null) {
if (this.speechIntent) {
this.micClient = SpeechRecognitionServiceFactory.createMicrophoneClientWithIntent(
this.getDefaultLocale(),
this,
this.getPrimaryKey(),
this.getSecondaryKey(),
this.getLuisAppId(),
this.getLuisSubscriptionID());
}
else
{
this.micClient = SpeechRecognitionServiceFactory.createMicrophoneClient(
this.speechMode,
this.getDefaultLocale(),
this,
this.getPrimaryKey(),
this.getSecondaryKey());
}
}
this.micClient.startMicAndRecognition();
}
public void endSpeechRecognition(){
this.micClient.endMicAndRecognition();
}
public void onFinalResponseReceived(final RecognitionResult response) {
boolean isFinalDicationMessage = this.speechMode == SpeechRecognitionMode.LongDictation &&
(response.RecognitionStatus == RecognitionStatus.EndOfDictation ||
response.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout);
if (null != this.micClient && ((this.speechMode == SpeechRecognitionMode.ShortPhrase) || isFinalDicationMessage)) {
// we got the final result, so it we can end the mic reco. No need to do this
// for dataReco, since we already called endAudio() on it as soon as we were done
// sending all the data.
this.micClient.endMicAndRecognition();
}
if (isFinalDicationMessage) {
this.isReceivedResponse = FinalResponseStatus.OK;
}
Confidence cMax = Confidence.Low;
int iMax = 0;
if (!isFinalDicationMessage && response.Results.length != 0) {
for (int i = 0; i < response.Results.length; i++) {
//get the text with highest confidence:
if(response.Results[i].Confidence.getValue() > cMax.getValue()){
cMax = response.Results[i].Confidence;
iMax = i;
}
}
System.out.println("Action to take: " + response.Results[iMax].DisplayText);
}
}
/**
* Called when a final response is received and its intent is parsed
*/
public void onIntentReceived(final String payload) {
System.out.println("--- Intent received by onIntentReceived() ---");
System.out.println(payload);
}
public void onPartialResponseReceived(final String response) {
System.out.println("--- Partial result received by onPartialResponseReceived() ---");
System.out.println(response);
}
public void onError(final int errorCode, final String response) {
System.err.println("--- Error received by onError() ---");
System.err.println("Error code: " + SpeechClientStatus.fromInt(errorCode) + " " + errorCode);
System.err.println("Error text: " + response);
}
/**
* Called when the microphone status has changed.
* @param recording The current recording state
*/
public void onAudioEvent(boolean recording) {
System.out.println("--- Microphone status change received by onAudioEvent() ---");
System.out.println("********* Microphone status: " + recording + " *********");
if (recording) {
System.out.println("Please start speaking.");
}
if (!recording) {
this.micClient.endMicAndRecognition();
}
}
}
你可以清楚地看到它基本上是相同的代码但是在我调用 this.micClient.startMicAndRecognition() 之后它在 onError 函数中给了我这个错误;
:
Error code: LoginFailed -1910505470
我解决了这个问题。
我在这行代码中得到了一个错误的主键:
public String getPrimaryKey() {
return Integer.toString(R.string.primaryKey);
}
这是因为主键太长,无法被处理器读取为整数,然后转换为字符串。我必须通过 getString(R.string.primaryKey) 从主 activity 获取主键,然后将其作为参数传递到 SpeechRecognition class.
的构造函数
我正在 Android 应用程序上使用 Microsoft 认知服务 - 语音识别。当代码在我的主 activity 中时一切正常,但是当我想将与语音识别对应的部分移动到新的 class 时,它会抛出错误。
以下是代码示例:
主要activity
int m_waitSeconds = 0;
MicrophoneRecognitionClient micClient = null;
FinalResponseStatus isReceivedResponse = FinalResponseStatus.NotReceived;
Boolean speechIntent = false;
SpeechRecognitionMode speechMode = SpeechRecognitionMode.ShortPhrase;
public enum FinalResponseStatus { NotReceived, OK }
/**
* Gets the primary subscription key
*/
public String getPrimaryKey() {
return this.getString(R.string.primaryKey);
}
/**
* Gets the secondary subscription key
*/
public String getSecondaryKey() {
return this.getString(R.string.secondaryKey);
}
/**
* Gets the LUIS application identifier.
* @return The LUIS application identifier.
*/
private String getLuisAppId() {
return this.getString(R.string.luisAppID);
}
/**
* Gets the LUIS subscription identifier.
* @return The LUIS subscription identifier.
*/
private String getLuisSubscriptionID() {
return this.getString(R.string.luisSubscriptionID);
}
/**
* Gets the default locale.
* @return The default locale.
*/
private String getDefaultLocale() {
return "en-us";
}
/**
* Handles the Click event of the _startButton control.
*/
private void StartButton_Click(View v) {
this.m_waitSeconds = this.speechMode == SpeechRecognitionMode.ShortPhrase ? 20 : 200;
if (this.micClient == null) {
if (this.speechIntent) {
this.WriteLine("--- Start microphone dictation with Intent detection ----");
this.micClient = SpeechRecognitionServiceFactory.createMicrophoneClientWithIntent(
this,
this.getDefaultLocale(),
this,
this.getPrimaryKey(),
this.getSecondaryKey(),
this.getLuisAppId(),
this.getLuisSubscriptionID());
}
else
{
this.micClient = SpeechRecognitionServiceFactory.createMicrophoneClient(
this,
this.speechMode,
this.getDefaultLocale(),
this,
this.getPrimaryKey(),
this.getSecondaryKey());
}
}
this.micClient.startMicAndRecognition();
}
public void onFinalResponseReceived(final RecognitionResult response) {
boolean isFinalDicationMessage = this.speechMode == SpeechRecognitionMode.LongDictation &&
(response.RecognitionStatus == RecognitionStatus.EndOfDictation ||
response.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout);
if (null != this.micClient && ((this.speechMode == SpeechRecognitionMode.ShortPhrase) || isFinalDicationMessage)) {
// we got the final result, so it we can end the mic reco. No need to do this
// for dataReco, since we already called endAudio() on it as soon as we were done
// sending all the data.
this.micClient.endMicAndRecognition();
}
if (isFinalDicationMessage) {
this.isReceivedResponse = FinalResponseStatus.OK;
}
Confidence cMax = Confidence.Low;
int iMax = 0;
if (!isFinalDicationMessage && response.Results.length != 0) {
for (int i = 0; i < response.Results.length; i++) {
//get the text with highest confidence:
if(response.Results[i].Confidence.getValue() > cMax.getValue()){
cMax = response.Results[i].Confidence;
iMax = i;
}
}
this.WriteLine(response.Results[iMax].DisplayText);
}
}
/**
* Called when a final response is received and its intent is parsed
*/
public void onIntentReceived(final String payload) {
this.WriteLine("--- Intent received by onIntentReceived() ---");
this.WriteLine(payload);
this.WriteLine();
}
public void onPartialResponseReceived(final String response) {
this.WriteLine("--- Partial result received by onPartialResponseReceived() ---");
this.WriteLine(response);
this.WriteLine();
}
public void onError(final int errorCode, final String response) {
this.WriteLine("--- Error received by onError() ---");
this.WriteLine("Error code: " + SpeechClientStatus.fromInt(errorCode) + " " + errorCode);
this.WriteLine("Error text: " + response);
this.WriteLine();
}
/**
* Called when the microphone status has changed.
* @param recording The current recording state
*/
public void onAudioEvent(boolean recording) {
if (!recording) {
this.micClient.endMicAndRecognition();
}
}
/**
* Writes the line.
*/
private void WriteLine() {
this.WriteLine("");
}
/**
* Writes the line.
* @param text The line to write.
*/
private void WriteLine(String text) {
System.out.println(text);
}
在单独的class
public class SpeechRecognition implements ISpeechRecognitionServerEvents
{
int m_waitSeconds = 0;
private MicrophoneRecognitionClient micClient = null;
private FinalResponseStatus isReceivedResponse =FinalResponseStatus.NotReceived;
private Boolean speechIntent = false;
private SpeechRecognitionMode speechMode=SpeechRecognitionMode.ShortPhrase;
public enum FinalResponseStatus { NotReceived, OK }
/**
* Gets the primary subscription key
*/
public String getPrimaryKey() {
return Integer.toString(R.string.primaryKey);
}
/**
* Gets the secondary subscription key
*/
public String getSecondaryKey() {
return Integer.toString(R.string.secondaryKey);
}
/**
* Gets the LUIS application identifier.
* @return The LUIS application identifier.
*/
private String getLuisAppId() {
return Integer.toString(R.string.luisAppID);
}
/**
* Gets the LUIS subscription identifier.
* @return The LUIS subscription identifier.
*/
private String getLuisSubscriptionID() {
return Integer.toString(R.string.luisSubscriptionID);
}
/**
* Gets the default locale.
* @return The default locale.
*/
private String getDefaultLocale() {
return "en-us";
}
public void startSpeechRecognition() {
this.m_waitSeconds = this.speechMode == SpeechRecognitionMode.ShortPhrase ? 20 : 200;
if (this.micClient == null) {
if (this.speechIntent) {
this.micClient = SpeechRecognitionServiceFactory.createMicrophoneClientWithIntent(
this.getDefaultLocale(),
this,
this.getPrimaryKey(),
this.getSecondaryKey(),
this.getLuisAppId(),
this.getLuisSubscriptionID());
}
else
{
this.micClient = SpeechRecognitionServiceFactory.createMicrophoneClient(
this.speechMode,
this.getDefaultLocale(),
this,
this.getPrimaryKey(),
this.getSecondaryKey());
}
}
this.micClient.startMicAndRecognition();
}
public void endSpeechRecognition(){
this.micClient.endMicAndRecognition();
}
public void onFinalResponseReceived(final RecognitionResult response) {
boolean isFinalDicationMessage = this.speechMode == SpeechRecognitionMode.LongDictation &&
(response.RecognitionStatus == RecognitionStatus.EndOfDictation ||
response.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout);
if (null != this.micClient && ((this.speechMode == SpeechRecognitionMode.ShortPhrase) || isFinalDicationMessage)) {
// we got the final result, so it we can end the mic reco. No need to do this
// for dataReco, since we already called endAudio() on it as soon as we were done
// sending all the data.
this.micClient.endMicAndRecognition();
}
if (isFinalDicationMessage) {
this.isReceivedResponse = FinalResponseStatus.OK;
}
Confidence cMax = Confidence.Low;
int iMax = 0;
if (!isFinalDicationMessage && response.Results.length != 0) {
for (int i = 0; i < response.Results.length; i++) {
//get the text with highest confidence:
if(response.Results[i].Confidence.getValue() > cMax.getValue()){
cMax = response.Results[i].Confidence;
iMax = i;
}
}
System.out.println("Action to take: " + response.Results[iMax].DisplayText);
}
}
/**
* Called when a final response is received and its intent is parsed
*/
public void onIntentReceived(final String payload) {
System.out.println("--- Intent received by onIntentReceived() ---");
System.out.println(payload);
}
public void onPartialResponseReceived(final String response) {
System.out.println("--- Partial result received by onPartialResponseReceived() ---");
System.out.println(response);
}
public void onError(final int errorCode, final String response) {
System.err.println("--- Error received by onError() ---");
System.err.println("Error code: " + SpeechClientStatus.fromInt(errorCode) + " " + errorCode);
System.err.println("Error text: " + response);
}
/**
* Called when the microphone status has changed.
* @param recording The current recording state
*/
public void onAudioEvent(boolean recording) {
System.out.println("--- Microphone status change received by onAudioEvent() ---");
System.out.println("********* Microphone status: " + recording + " *********");
if (recording) {
System.out.println("Please start speaking.");
}
if (!recording) {
this.micClient.endMicAndRecognition();
}
}
}
你可以清楚地看到它基本上是相同的代码但是在我调用 this.micClient.startMicAndRecognition() 之后它在 onError 函数中给了我这个错误; :
Error code: LoginFailed -1910505470
我解决了这个问题。 我在这行代码中得到了一个错误的主键:
public String getPrimaryKey() {
return Integer.toString(R.string.primaryKey);
}
这是因为主键太长,无法被处理器读取为整数,然后转换为字符串。我必须通过 getString(R.string.primaryKey) 从主 activity 获取主键,然后将其作为参数传递到 SpeechRecognition class.
的构造函数