如何让麦克风直接流式传输到 watson speechToText 服务
how to get the microphone streamed directly to the watson speechToText service
我们想将麦克风直接通过管道传输到 waton 语音到文本服务,但似乎我们必须先通过 .wav?请查看以下代码,特别是我试图让麦克风直接流式传输到 speechToText 服务。我相信这是使用麦克风的最常见方式,而不是将其通过管道传输到 .wav 中,然后将 .wav 文件流式传输到 stt:
var mic;
var SpeechToTextV1 = require('watson-developer-cloud/speech-to-text/v1');
var fs = require('fs');
var watson = require('watson-developer-cloud');
var cp = require('child_process');
mic = cp.spawn('arecord', ['--device=plughw:1,0', '--format=S16_LE', '--rate=44100', '--channels=1']); //, '--duration=10'
mic.stderr.pipe(process.stderr);
stt();
function stt() {
console.log("openCMDS");
var speech_to_text = new SpeechToTextV1({
username: '',
password: ''
});
var params = {
content_type: 'audio/wav',
model: 'zh-CN_BroadbandModel',
continuous: true,
inactivity_timeout: -1
};
recognizeStream = speech_to_text.createRecognizeStream(params);
mic.stdout.pipe(recognizeStream);
//mic.stdout.pipe(require('fs').createWriteStream('test.wav'));
// Pipe in the audio.
fs.createReadStream('test.wav').pipe(recognizeStream);
recognizeStream.pipe(fs.createWriteStream('transcription.txt'));
recognizeStream.setEncoding('utf8');
console.log("start record");
recognizeStream.on('data', function(event) { onEvent('Data:', event); });
recognizeStream.on('error', function(event) { onEvent('Error:', event); });
recognizeStream.on('close', function(event) { onEvent('Close:', event); });
// Display events on the console.
function onEvent(name, event) {
console.log(name, JSON.stringify(event, null, 2));
}
}
Speech to Text 服务需要知道您尝试发送的音频格式。我看到的 99% 的问题是因为该服务期望使用与用户使用的音频格式不同的音频格式。
'--format=S16_LE', '--rate=44100', '--channels=1'
这看起来像是 44.1kHz PCM 格式。
在您的代码中指定:
content_type: 'audio/wav'
也许试试 audio/l16; rate=44100;
。您还可以录制不同格式的音频。
最后看一下javascript-speech-sdk。我们有如何从浏览器流式传输麦克风的示例。
更新
const mic = require('mic');
const SpeechToTextV1 = require('watson-developer-cloud/speech-to-text/v1');
const speechToText = new SpeechToTextV1({
username: 'YOUR USERNAME',
password: 'YOUR PASSWORD',
url: 'YOUR SERVICE URL',
version: 'v1'
});
// 1. Microphone settings
const micInstance = mic({
rate: 44100,
channels: 2,
debug: false,
exitOnSilence: 6
});
// 2. Service recognize settings
const recognizeStream = speechToText.createRecognizeStream({
content_type: 'audio/l16; rate=44100; channels=2',
model: 'zh-CN_BroadbandModel',
interim_results: true,
})
// 3. Start recording
const micInputStream = micInstance.getAudioStream();
micInstance.start();
console.log('Watson is listening, you may speak now.');
// 4. Pipe audio to service
const textStream = micInputStream.pipe(recognizeStream).setEncoding('utf8');
textStream.on('data', user_speech_text => console.log('Watson hears:', user_speech_text));
textStream.on('error', e => console.log(`error: ${e}`));
textStream.on('close', e => console.log(`close: ${e}`));
我们想将麦克风直接通过管道传输到 waton 语音到文本服务,但似乎我们必须先通过 .wav?请查看以下代码,特别是我试图让麦克风直接流式传输到 speechToText 服务。我相信这是使用麦克风的最常见方式,而不是将其通过管道传输到 .wav 中,然后将 .wav 文件流式传输到 stt:
var mic;
var SpeechToTextV1 = require('watson-developer-cloud/speech-to-text/v1');
var fs = require('fs');
var watson = require('watson-developer-cloud');
var cp = require('child_process');
mic = cp.spawn('arecord', ['--device=plughw:1,0', '--format=S16_LE', '--rate=44100', '--channels=1']); //, '--duration=10'
mic.stderr.pipe(process.stderr);
stt();
function stt() {
console.log("openCMDS");
var speech_to_text = new SpeechToTextV1({
username: '',
password: ''
});
var params = {
content_type: 'audio/wav',
model: 'zh-CN_BroadbandModel',
continuous: true,
inactivity_timeout: -1
};
recognizeStream = speech_to_text.createRecognizeStream(params);
mic.stdout.pipe(recognizeStream);
//mic.stdout.pipe(require('fs').createWriteStream('test.wav'));
// Pipe in the audio.
fs.createReadStream('test.wav').pipe(recognizeStream);
recognizeStream.pipe(fs.createWriteStream('transcription.txt'));
recognizeStream.setEncoding('utf8');
console.log("start record");
recognizeStream.on('data', function(event) { onEvent('Data:', event); });
recognizeStream.on('error', function(event) { onEvent('Error:', event); });
recognizeStream.on('close', function(event) { onEvent('Close:', event); });
// Display events on the console.
function onEvent(name, event) {
console.log(name, JSON.stringify(event, null, 2));
}
}
Speech to Text 服务需要知道您尝试发送的音频格式。我看到的 99% 的问题是因为该服务期望使用与用户使用的音频格式不同的音频格式。
'--format=S16_LE', '--rate=44100', '--channels=1'
这看起来像是 44.1kHz PCM 格式。
在您的代码中指定:
content_type: 'audio/wav'
也许试试 audio/l16; rate=44100;
。您还可以录制不同格式的音频。
最后看一下javascript-speech-sdk。我们有如何从浏览器流式传输麦克风的示例。
更新
const mic = require('mic');
const SpeechToTextV1 = require('watson-developer-cloud/speech-to-text/v1');
const speechToText = new SpeechToTextV1({
username: 'YOUR USERNAME',
password: 'YOUR PASSWORD',
url: 'YOUR SERVICE URL',
version: 'v1'
});
// 1. Microphone settings
const micInstance = mic({
rate: 44100,
channels: 2,
debug: false,
exitOnSilence: 6
});
// 2. Service recognize settings
const recognizeStream = speechToText.createRecognizeStream({
content_type: 'audio/l16; rate=44100; channels=2',
model: 'zh-CN_BroadbandModel',
interim_results: true,
})
// 3. Start recording
const micInputStream = micInstance.getAudioStream();
micInstance.start();
console.log('Watson is listening, you may speak now.');
// 4. Pipe audio to service
const textStream = micInputStream.pipe(recognizeStream).setEncoding('utf8');
textStream.on('data', user_speech_text => console.log('Watson hears:', user_speech_text));
textStream.on('error', e => console.log(`error: ${e}`));
textStream.on('close', e => console.log(`close: ${e}`));