将麦克风录音机从浏览器发送到 google 语音到文本 - Javascript

Send microphone audio recorder from browser to google speech to text - Javascript

将麦克风录音机从浏览器发送到 google 语音到文本。不需要流媒体和套接字,也不需要通过 Node.js 到 Google 服务器的 HTTP 请求,也不需要通过客户端(浏览器)端的 HTTP 请求。

我面临的问题:

客户端实现完成,服务器端实现也完成。两种实现彼此独立工作。我正在从麦克风获取音频数据并且能够播放它,并且能够使用 Google.

给出的 audio.raw 示例测试服务器端实现

但是,当我尝试将麦克风数据从浏览器发送到我的节点服务器,然后再发送到 Google 服务器时,我收到编码问题:"Getting an empty response from google server".

我的问题是如何更改音频文件的编码,然后使用 Javascript 将其发送到 Google Speech to Text 服务器。

我玩过,我可以使用 Google API 和浏览器录音将语音转换为文本。我想知道配置对象是否可能是您遇到的问题的原因。

我使用的组件是 Node.js 服务器:server.js 和一个简单的客户端(index.html 和客户端-app.js)。都在同一个文件夹中。

我为此使用 Google Speech to Text Client Library,因此您需要添加 Google API 密钥文件 (APIKey.json) 以提供凭据。

如果您 运行 节点服务器,然后将您的浏览器指向 http://localhost:3000/,您应该可以测试代码。

我从 here 中提取了很多客户端代码,也使用了 Matt Diamond 的 Recorder.js 代码。

server.js

const express = require('express');
const multer = require('multer');
const fs = require('fs');

const upload = multer();

const app = express();
const port = 3000;

app.use(express.static('./'));

async function testGoogleTextToSpeech(audioBuffer) {
    const speech = require('@google-cloud/speech');
    const client = new speech.SpeechClient( { keyFilename: "APIKey.json"});

    const audio = {
    content: audioBuffer.toString('base64'),
    };
    const config = {
    languageCode: 'en-US',
    };
    const request = {
    audio: audio,
    config: config,
    };

    const [response] = await client.recognize(request);
    const transcription = response.results
    .map(result => result.alternatives[0].transcript)
    .join('\n');
    return transcription;
}

app.post('/upload_sound', upload.any(), async (req, res) => {
    console.log("Getting text transcription..");
    let transcription = await testGoogleTextToSpeech(req.files[0].buffer);
    console.log("Text transcription: " + transcription);
    res.status(200).send(transcription);
});

app.listen(port, () => {
    console.log(`Express server listening on port: ${port}...`);
});

index.html

<!DOCTYPE html>
<html>
<head>
    <meta charset="UTF-8">
    <title>Speech to text test</title>
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <link rel="stylesheet" type="text/css" href="https://bootswatch.com/4/cerulean/bootstrap.min.css">
</head>
<body style="padding:50px;">
    <h1>Speech to text test</h1>
    <div id="controls">
    <button id="recordButton">Record</button>
    <button id="transcribeButton" disabled>Get transcription</button>
    </div>
    <div id="output"></div>
    <script src="https://cdn.rawgit.com/mattdiamond/Recorderjs/08e7abd9/dist/recorder.js"></script>
    <script src="client-app.js"></script>
</body>
</html>

客户端-app.js

let rec = null;
let audioStream = null;

const recordButton = document.getElementById("recordButton");
const transcribeButton = document.getElementById("transcribeButton");

recordButton.addEventListener("click", startRecording);
transcribeButton.addEventListener("click", transcribeText);

function startRecording() {

    let constraints = { audio: true, video:false }

    recordButton.disabled = true;
    transcribeButton.disabled = false;

    navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
        const audioContext = new window.AudioContext();
        audioStream = stream;
        const input = audioContext.createMediaStreamSource(stream);
        rec = new Recorder(input, { numChannels:1 })
        rec.record()
    }).catch(function(err) {
        recordButton.disabled = false;
        transcribeButton.disabled = true;
    });
}

function transcribeText() {
    transcribeButton.disabled = true;
    recordButton.disabled = false;
    rec.stop();
    audioStream.getAudioTracks()[0].stop();
    rec.exportWAV(uploadSoundData);
}

function uploadSoundData(blob) {
    let filename = new Date().toISOString();
    let xhr = new XMLHttpRequest();
    xhr.onload = function(e) {
        if(this.readyState === 4) {
            document.getElementById("output").innerHTML = `<br><br><strong>Result: </strong>${e.target.responseText}`
        }
    };
    let formData = new FormData();
    formData.append("audio_data", blob, filename);
    xhr.open("POST", "/upload_sound", true);
    xhr.send(formData);
}

@terry-lennox 非常感谢。为了明确的答案。

但我使用 React 作为我的前端,所以得到了一个名为 recorder-js

的 npm 包

代码供以后看到的post参考。

import Recorder from 'recorder-js';

import micGrey from './mic-grey.svg';
import micWhite from './mic-white.svg';

import './App.css';

var recorder = null;
var audioStream = null;

class App extends Component {
  constructor(props) {
    super(props);
    this.mic = React.createRef();

    this.accessMic = this.accessMic.bind(this);
    this.handleClick = this.handleClick.bind(this);
    this.handleClick = this.handleClick.bind(this);
    this.handleSuccess = this.handleSuccess.bind(this);

    this.stopAccessingMic = this.stopAccessingMic.bind(this);
    this.getTextFromGoogle = this.getTextFromGoogle.bind(this);

    this.state = {
      isMicActive: false
    };
  }

  accessMic() {
    const audioContext = new (window.AudioContext ||
      window.webkitAudioContext)();

    recorder = new Recorder(audioContext);

    navigator.mediaDevices
      .getUserMedia({ audio: true })
      .then(this.handleSuccess)
      .catch(err => console.log('Uh oh... unable to get stream...', err));
  }

  handleSuccess(stream) {
    audioStream = stream;

    recorder.init(stream);
    recorder.start();
  }

  getTextFromGoogle(blob) {
    let filename = new Date().toISOString();
    let xhr = new XMLHttpRequest();
    xhr.onload = function(e) {
      if (this.readyState === 4) {
        console.log(e.target.responseText);
      }
    };
    let formData = new FormData();
    formData.append('audio_data', blob, filename);
    xhr.open('POST', 'http://localhost:3000/', true);
    xhr.send(formData);
  }

  handleClick() {
    const isMicActive = this.state.isMicActive;

    this.setState({
      isMicActive: !isMicActive
    });

    if (!isMicActive) {
      this.checkPermissions();
      this.accessMic();
    } else {
      this.stopAccessingMic();
    }
  }

  stopAccessingMic() {
    audioStream && audioStream.getTracks()[0].stop();
    recorder.stop().then(({ blob, buffer }) => {
      this.getTextFromGoogle(blob);
    });
  }

  checkPermissions() {
    navigator.permissions
      .query({ name: 'microphone' })
      .then(permissionObj => {
        console.log('Permission status - ', permissionObj.state);
      })
      .catch(error => {
        console.log('Permission status - Got error :', error);
      });
  }

  render() {
    return (
      <div className='App'>
        <div
          id='mic'
          ref={this.mic}
          onClick={this.handleClick}
          className={
            this.state.isMicActive ? 'mic-btn mic-btn-active' : 'mic-btn'
          }
        >
          <img src={this.state.isMicActive ? micWhite : micGrey} alt='mic' />
        </div>
      </div>
    );
  }
}
export default App;

以及参考的后端代码,我遇到了一个小改动,错误是 必须使用单声道(单声道)音频 来解决这个问题我参考了Link, Link。需要在config中添加audioChannelCount: 2.

var router = express.Router();
const multer = require('multer');
const fs = require('fs');

const upload = multer();

process.env.GOOGLE_APPLICATION_CREDENTIALS =
  'C:/Users/user/Desktop/Speech-to-Text-e851cb3889e5.json';

/* GET home page. */
router.post('/', upload.any(), async (req, res, next) => {
  console.log('Getting text transcription..');
  try {
    let transcription = await testGoogleTextToSpeech(req.files[0].buffer);
    console.log('Text transcription: ' + transcription);
    res.status(200).send(transcription);
  } catch (error) {
    console.log(error);
    res.status(400).send(error);
  }
});

async function testGoogleTextToSpeech(audioBuffer) {
  const speech = require('@google-cloud/speech');
  const client = new speech.SpeechClient();

  const audio = {
    content: audioBuffer.toString('base64')
  };
  const config = {
    languageCode: 'en-US',
    audioChannelCount: 2
  };
  const request = {
    audio: audio,
    config: config
  };

  try {
    const [response] = await client.recognize(request);
    const transcription = response.results
      .map(result => result.alternatives[0].transcript)
      .join('\n');
    return transcription;
  } catch (error) {
    return error;
  }
}
module.exports = router;