REST API azure speech to text(已识别:Text=undefined)
REST API azure speech to text (RECOGNIZED: Text=undefined)
我正在尝试使用 azure api(语音转文本),但是当我执行代码时,它没有给我音频结果。
音频采用请求的格式 (.WAV)。
代码示例文档
const fs = require('fs');
const sdk = require("microsoft-cognitiveservices-speech-sdk");
const speechConfig = sdk.SpeechConfig.fromSubscription("---", "eastus2");
function fromFile() {
let pushStream = sdk.AudioInputStream.createPushStream();
fs.createReadStream("audio/aboutSpeechSdk.wav").on('data', function (arrayBuffer) {
pushStream.write(arrayBuffer.slice());
}).on('end', function () {
pushStream.close();
});
let audioConfig = sdk.AudioConfig.fromStreamInput(pushStream);
let recognizer = new sdk.SpeechRecognizer(speechConfig, audioConfig);
recognizer.recognizeOnceAsync(result => {
console.log(`RECOGNIZED: Text=${result.text}`);
recognizer.close();
});
}
fromFile();
根据您提供的代码,您似乎没有配置语音识别语言。请将代码 speechConfig.speechRecognitionLanguage = ""
添加到您的示例中。有关语言的更多详细信息,请参阅here
例如。你可以下载video做个测试
var sdk = require("microsoft-cognitiveservices-speech-sdk");
var fs = require("fs");
var subscriptionKey = "";
var serviceRegion = "";
var language = "en-US";
function openPushStream(filename) {
// create the push stream we need for the speech sdk.
var pushStream = sdk.AudioInputStream.createPushStream();
// open the file and push it to the push stream.
fs.createReadStream(filename)
.on("data", function (arrayBuffer) {
pushStream.write(arrayBuffer.slice());
})
.on("end", function () {
pushStream.close();
});
return pushStream;
}
var audioConfig = sdk.AudioConfig.fromStreamInput(
openPushStream("aboutSpeechSdk.wav")
);
var speechConfig = sdk.SpeechConfig.fromSubscription(
subscriptionKey,
serviceRegion
);
speechConfig.speechRecognitionLanguage = language;
var recognizer = new sdk.SpeechRecognizer(speechConfig, audioConfig);
recognizer.recognizeOnceAsync(
function (result) {
console.log(result.text);
recognizer.close();
recognizer = undefined;
},
function (err) {
console.log(err);
recognizer.close();
recognizer = undefined;
}
详情请参考blog
我正在尝试使用 azure api(语音转文本),但是当我执行代码时,它没有给我音频结果。 音频采用请求的格式 (.WAV)。
代码示例文档
const fs = require('fs');
const sdk = require("microsoft-cognitiveservices-speech-sdk");
const speechConfig = sdk.SpeechConfig.fromSubscription("---", "eastus2");
function fromFile() {
let pushStream = sdk.AudioInputStream.createPushStream();
fs.createReadStream("audio/aboutSpeechSdk.wav").on('data', function (arrayBuffer) {
pushStream.write(arrayBuffer.slice());
}).on('end', function () {
pushStream.close();
});
let audioConfig = sdk.AudioConfig.fromStreamInput(pushStream);
let recognizer = new sdk.SpeechRecognizer(speechConfig, audioConfig);
recognizer.recognizeOnceAsync(result => {
console.log(`RECOGNIZED: Text=${result.text}`);
recognizer.close();
});
}
fromFile();
根据您提供的代码,您似乎没有配置语音识别语言。请将代码 speechConfig.speechRecognitionLanguage = ""
添加到您的示例中。有关语言的更多详细信息,请参阅here
例如。你可以下载video做个测试
var sdk = require("microsoft-cognitiveservices-speech-sdk");
var fs = require("fs");
var subscriptionKey = "";
var serviceRegion = "";
var language = "en-US";
function openPushStream(filename) {
// create the push stream we need for the speech sdk.
var pushStream = sdk.AudioInputStream.createPushStream();
// open the file and push it to the push stream.
fs.createReadStream(filename)
.on("data", function (arrayBuffer) {
pushStream.write(arrayBuffer.slice());
})
.on("end", function () {
pushStream.close();
});
return pushStream;
}
var audioConfig = sdk.AudioConfig.fromStreamInput(
openPushStream("aboutSpeechSdk.wav")
);
var speechConfig = sdk.SpeechConfig.fromSubscription(
subscriptionKey,
serviceRegion
);
speechConfig.speechRecognitionLanguage = language;
var recognizer = new sdk.SpeechRecognizer(speechConfig, audioConfig);
recognizer.recognizeOnceAsync(
function (result) {
console.log(result.text);
recognizer.close();
recognizer = undefined;
},
function (err) {
console.log(err);
recognizer.close();
recognizer = undefined;
}
详情请参考blog