如何将 websocket 二进制消息作为流发送到 Google 语音 API?
How to send websocket binary messages as stream to Google Speech API?
我正在尝试将 websocket 连接的音频流发送到 Google 语音 API。 Websocket 以 20 毫秒为增量发送二进制消息。它以增量方式发送它让我相信我将不得不以某种方式临时读取数据并将数据写入本地文件以避免终止与 Google 的连接。然而,这并不理想。
有没有办法直接将 websocket 流通过管道传输到 recognizeStream
?
Google streamingRecognize 来自文档的示例:
const request = {
config: {
encoding: encoding,
sampleRate: sampleRate
}
};
const recognizeStream = speech.createRecognizeStream(request)
.on('error', console.error)
.on('data', (data) => process.stdout.write(data.results));
record.start({
sampleRate: sampleRate,
threshold: 0
}).pipe(recognizeStream);
Websocket 连接:
var HttpDispatcher = require('httpdispatcher');
var dispatcher = new HttpDispatcher();
var WebSocketServer = require('websocket').server;
var server = http.createServer(handleRequest);
var wsServer = new WebSocketServer({
httpServer: server,
autoAcceptConnections: true,
});
function handleRequest(request, response){
try {
//log the request on console
console.log(request.url);
//Dispatch
dispatcher.dispatch(request, response);
} catch(err) {
console.log(err);
}
}
wsServer.on('connect', function(connection) {
console.log((new Date()) + ' Connection accepted' + ' - Protocol Version ' + connection.webSocketVersion);
connection.on('message', function(message) {
if (message.type === 'utf8') {
console.log(message.utf8Data);
}
else if (message.type === 'binary') {
//Send to Google Speech API by passing into recognizeStream
}
});
connection.on('close', function(reasonCode, description) {
console.log((new Date()) + ' Peer ' + connection.remoteAddress + ' disconnected.');
});
});
最好的解决方案是使用专门的流媒体解决方案而不是自己动手,这将处理所有缓冲区并为您提供适合 Google 语音 API.Try 使用某些东西的稳定流媒体喜欢,
这个其实很简单。如此简单以至于我因为没有看到它而感到有点害羞。根据 OP 中代码的确切编写方式,这完美地工作:
else if (message.type === 'binary') {
//Send to Google Speech API by passing into recognizeStream
recognizeStream.write(message.binaryData)
}
我正在尝试将 websocket 连接的音频流发送到 Google 语音 API。 Websocket 以 20 毫秒为增量发送二进制消息。它以增量方式发送它让我相信我将不得不以某种方式临时读取数据并将数据写入本地文件以避免终止与 Google 的连接。然而,这并不理想。
有没有办法直接将 websocket 流通过管道传输到 recognizeStream
?
Google streamingRecognize 来自文档的示例:
const request = {
config: {
encoding: encoding,
sampleRate: sampleRate
}
};
const recognizeStream = speech.createRecognizeStream(request)
.on('error', console.error)
.on('data', (data) => process.stdout.write(data.results));
record.start({
sampleRate: sampleRate,
threshold: 0
}).pipe(recognizeStream);
Websocket 连接:
var HttpDispatcher = require('httpdispatcher');
var dispatcher = new HttpDispatcher();
var WebSocketServer = require('websocket').server;
var server = http.createServer(handleRequest);
var wsServer = new WebSocketServer({
httpServer: server,
autoAcceptConnections: true,
});
function handleRequest(request, response){
try {
//log the request on console
console.log(request.url);
//Dispatch
dispatcher.dispatch(request, response);
} catch(err) {
console.log(err);
}
}
wsServer.on('connect', function(connection) {
console.log((new Date()) + ' Connection accepted' + ' - Protocol Version ' + connection.webSocketVersion);
connection.on('message', function(message) {
if (message.type === 'utf8') {
console.log(message.utf8Data);
}
else if (message.type === 'binary') {
//Send to Google Speech API by passing into recognizeStream
}
});
connection.on('close', function(reasonCode, description) {
console.log((new Date()) + ' Peer ' + connection.remoteAddress + ' disconnected.');
});
});
最好的解决方案是使用专门的流媒体解决方案而不是自己动手,这将处理所有缓冲区并为您提供适合 Google 语音 API.Try 使用某些东西的稳定流媒体喜欢,
这个其实很简单。如此简单以至于我因为没有看到它而感到有点害羞。根据 OP 中代码的确切编写方式,这完美地工作:
else if (message.type === 'binary') {
//Send to Google Speech API by passing into recognizeStream
recognizeStream.write(message.binaryData)
}