JavaScript : 如何将音频 blob 拆分为 1 秒的块并使用 recorder.js 导出到 wav 文件?
JavaScript : How to split an audio blob into 1 second chunks and export to wav files using recorder.js?
我想录制语音,将录制的语音(或音频 blob)自动拆分为 1 秒的块,将每个块导出为 wav 文件并发送到后端。这应该在用户说话时异步发生。
我目前使用以下 recorder.js 库来完成上述任务
https://cdn.rawgit.com/mattdiamond/Recorderjs/08e7abd9/dist/recorder.js
我的问题是,随着时间的推移,blob/wave 文件变得越来越大。我认为这是因为数据积累并使块大小变大。所以随着时间的推移,我实际上并没有发送连续的 1 秒块,而是累积的块。
我想不出这个问题是在我的代码中的什么地方引起的。这可能发生在 recorder.js 库中。如果有人使用 recorder js 或任何其他 JavaScript 方法来完成类似的任务,请查看此代码并让我知道它在哪里中断。
这是我的JS代码
var gumStream; // Stream from getUserMedia()
var rec; // Recorder.js object
var input; // MediaStreamAudioSourceNode we'll be recording
var recordingNotStopped; // User pressed record button and keep talking, still not stop button pressed
const trackLengthInMS = 1000; // Length of audio chunk in miliseconds
const maxNumOfSecs = 1000; // Number of mili seconds we support per recording (1 second)
// Shim for AudioContext when it's not available.
var AudioContext = window.AudioContext || window.webkitAudioContext;
var audioContext //audio context to help us record
var recordButton = document.getElementById("recordButton");
var stopButton = document.getElementById("stopButton");
//Event handlers for above 2 buttons
recordButton.addEventListener("click", startRecording);
stopButton.addEventListener("click", stopRecording);
//Asynchronous function to stop the recoding in each second and export blob to a wav file
const sleep = time => new Promise(resolve => setTimeout(resolve, time));
const asyncFn = async() => {
for (let i = 0; i < maxNumOfSecs; i++) {
if (recordingNotStopped) {
rec.record();
await sleep(trackLengthInMS);
rec.stop();
//stop microphone access
gumStream.getAudioTracks()[0].stop();
//Create the wav blob and pass it on to createWaveBlob
rec.exportWAV(createWaveBlob);
}
}
}
function startRecording() {
console.log("recordButton clicked");
recordingNotStopped = true;
var constraints = {
audio: true,
video: false
}
recordButton.disabled = true;
stopButton.disabled = false;
//Using the standard promise based getUserMedia()
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
//Create an audio context after getUserMedia is called
audioContext = new AudioContext();
// Assign to gumStream for later use
gumStream = stream;
//Use the stream
input = audioContext.createMediaStreamSource(stream);
//Create the Recorder object and configure to record mono sound (1 channel)
rec = new Recorder(input, {
numChannels: 1
});
//Call the asynchronous function to split and export audio
asyncFn();
console.log("Recording started");
}).catch(function(err) {
//Enable the record button if getUserMedia() fails
recordButton.disabled = false;
stopButton.disabled = true;
});
}
function stopRecording() {
console.log("stopButton clicked");
recordingNotStopped = false;
//disable the stop button and enable the record button to allow for new recordings
stopButton.disabled = true;
recordButton.disabled = false;
//Set the recorder to stop the recording
rec.stop();
//stop microphone access
gumStream.getAudioTracks()[0].stop();
}
function createWaveBlob(blob) {
var url = URL.createObjectURL(blob);
//Convert the blob to a wav file and call the sendBlob function to send the wav file to the server
var convertedfile = new File([blob], 'filename.wav');
sendBlob(convertedfile);
}
Recorder.js 保留它录制的音频的记录缓冲区。当调用 exportWAV
时,记录缓冲区被编码但不被清除。在再次调用 record
之前,您需要在录音机上调用 clear
,以便从记录缓冲区中清除之前的音频块。
上面的代码就是这样修复的。
//Extend the Recorder Class and add clear() method
Recorder.prototype.step = function () {
this.clear();
};
//After calling the exportWAV(), call the clear() method
rec.exportWAV(createWaveBlob);
rec.step();
我想录制语音,将录制的语音(或音频 blob)自动拆分为 1 秒的块,将每个块导出为 wav 文件并发送到后端。这应该在用户说话时异步发生。
我目前使用以下 recorder.js 库来完成上述任务 https://cdn.rawgit.com/mattdiamond/Recorderjs/08e7abd9/dist/recorder.js
我的问题是,随着时间的推移,blob/wave 文件变得越来越大。我认为这是因为数据积累并使块大小变大。所以随着时间的推移,我实际上并没有发送连续的 1 秒块,而是累积的块。
我想不出这个问题是在我的代码中的什么地方引起的。这可能发生在 recorder.js 库中。如果有人使用 recorder js 或任何其他 JavaScript 方法来完成类似的任务,请查看此代码并让我知道它在哪里中断。
这是我的JS代码
var gumStream; // Stream from getUserMedia()
var rec; // Recorder.js object
var input; // MediaStreamAudioSourceNode we'll be recording
var recordingNotStopped; // User pressed record button and keep talking, still not stop button pressed
const trackLengthInMS = 1000; // Length of audio chunk in miliseconds
const maxNumOfSecs = 1000; // Number of mili seconds we support per recording (1 second)
// Shim for AudioContext when it's not available.
var AudioContext = window.AudioContext || window.webkitAudioContext;
var audioContext //audio context to help us record
var recordButton = document.getElementById("recordButton");
var stopButton = document.getElementById("stopButton");
//Event handlers for above 2 buttons
recordButton.addEventListener("click", startRecording);
stopButton.addEventListener("click", stopRecording);
//Asynchronous function to stop the recoding in each second and export blob to a wav file
const sleep = time => new Promise(resolve => setTimeout(resolve, time));
const asyncFn = async() => {
for (let i = 0; i < maxNumOfSecs; i++) {
if (recordingNotStopped) {
rec.record();
await sleep(trackLengthInMS);
rec.stop();
//stop microphone access
gumStream.getAudioTracks()[0].stop();
//Create the wav blob and pass it on to createWaveBlob
rec.exportWAV(createWaveBlob);
}
}
}
function startRecording() {
console.log("recordButton clicked");
recordingNotStopped = true;
var constraints = {
audio: true,
video: false
}
recordButton.disabled = true;
stopButton.disabled = false;
//Using the standard promise based getUserMedia()
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
//Create an audio context after getUserMedia is called
audioContext = new AudioContext();
// Assign to gumStream for later use
gumStream = stream;
//Use the stream
input = audioContext.createMediaStreamSource(stream);
//Create the Recorder object and configure to record mono sound (1 channel)
rec = new Recorder(input, {
numChannels: 1
});
//Call the asynchronous function to split and export audio
asyncFn();
console.log("Recording started");
}).catch(function(err) {
//Enable the record button if getUserMedia() fails
recordButton.disabled = false;
stopButton.disabled = true;
});
}
function stopRecording() {
console.log("stopButton clicked");
recordingNotStopped = false;
//disable the stop button and enable the record button to allow for new recordings
stopButton.disabled = true;
recordButton.disabled = false;
//Set the recorder to stop the recording
rec.stop();
//stop microphone access
gumStream.getAudioTracks()[0].stop();
}
function createWaveBlob(blob) {
var url = URL.createObjectURL(blob);
//Convert the blob to a wav file and call the sendBlob function to send the wav file to the server
var convertedfile = new File([blob], 'filename.wav');
sendBlob(convertedfile);
}
Recorder.js 保留它录制的音频的记录缓冲区。当调用 exportWAV
时,记录缓冲区被编码但不被清除。在再次调用 record
之前,您需要在录音机上调用 clear
,以便从记录缓冲区中清除之前的音频块。
上面的代码就是这样修复的。
//Extend the Recorder Class and add clear() method
Recorder.prototype.step = function () {
this.clear();
};
//After calling the exportWAV(), call the clear() method
rec.exportWAV(createWaveBlob);
rec.step();