Azure Speech javascript SDK:以 mp3 格式输出音频
Azure Speech javascript SDK: Output audio in mp3
我使用 sdk.connection 方法将语音中的音频捕获到文本识别器。它创建我想转换成 MP3 的 PCM 音频。
这是初始化连接的方式:
const con = SpeechSDK.Connection.fromRecognizer(this.recognizer);
con.messageSent = args => {
// Only record outbound audio mesages that have data in them.
if (
args.message.path === "audio" &&
args.message.isBinaryMessage &&
args.message.binaryMessage !== null
) {
this.wavFragments[this.wavFragmentCount++] =
args.message.binaryMessage;
}
};
这是 wav 文件构建:
let byteCount = 0;
for (let i = 0; i < this.wavFragmentCount; i++) {
byteCount += this.wavFragments[i].byteLength;
}
// Output array.
const sentAudio = new Uint8Array(byteCount);
byteCount = 0;
for (let i = 0; i < this.wavFragmentCount; i++) {
sentAudio.set(new Uint8Array(this.wavFragments[i]), byteCount);
byteCount += this.wavFragments[i].byteLength;
} // Write the audio back to disk.
// Set the file size in the wave header:
const view = new DataView(sentAudio.buffer);
view.setUint32(4, byteCount, true);
view.setUint32(40, byteCount, true);
我尝试使用 lamejs 将 'sentAudio' 转换为 MP3。
import {lamejs} from "../../modules/lame.min.js";
const wavBlob = new Blob([sentAudio]);
const reader = new FileReader();
reader.onload = evt => {
const audioData = evt.target.result;
const wav = lamejs.WavHeader.readHeader(new DataView(audioData));
const mp3enc = new lamejs.Mp3Encoder(1, wav.sampleRate, 128);
const samples = new Int8Array(audioData, wav.dataOffset, wav.dataLen / 2);
let mp3Tmp = mp3enc.encodeBuffer(samples); // encode mp3
// Push encode buffer to mp3Data variable
const mp3Data = [];
mp3Data.push(mp3Tmp);
// Get end part of mp3
mp3Tmp = mp3enc.flush();
// Write last data to the output data, too
// mp3Data contains now the complete mp3Data
mp3Data.push(mp3Tmp);
const blob = new Blob(mp3Data, { type: "audio/mp3" });
this.createDownloadLink(blob, "mp3");
};
reader.readAsArrayBuffer(wavBlob);
MP3 Blob 为空或包含听不见的声音。
我也尝试过使用 this example 中描述的 'encodeMP3' 方法,但它给出了相同的输出。
是否有支持此 mp3 转换的现有解决方案?
问题请参考以下代码
let byteCount = 0;
for (let i= 0; i < wavFragmentCount; i++) {
byteCount += wavFragments[i].byteLength;
}
// Output array.
const sentAudio: Uint8Array = new Uint8Array(byteCount);
byteCount = 0;
for (let i: number = 0; i < wavFragmentCount; i++) {
sentAudio.set(new Uint8Array(wavFragments[i]), byteCount);
byteCount += wavFragments[i].byteLength;
}
// create wav file blob
const view = new DataView(sentAudio.buffer);
view.setUint32(4, byteCount, true);
view.setUint32(40, byteCount, true);
let wav = new Blob([view], { type: 'audio/wav' });
// read wave file as base64
var reader = new FileReader();
reader.readAsDataURL(wav);
reader.onload = () => {
var base64String = reader.result.toString();
base64String = base64String.split(',')[1];
// convert to buffer
var binary_string = window.atob(base64String);
var len = binary_string.length;
var bytes = new Uint8Array(len);
for (var i = 0; i < len; i++) {
bytes[i] = binary_string.charCodeAt(i);
}
// convert to mp3 with lamejs
var wavHdr = lamejs.WavHeader.readHeader(
new DataView(bytes.buffer)
);
console.log(wavHdr);
var wavSamples = new Int16Array(
bytes.buffer,
0,
wavHdr.dataLen / 2
);
let mp3 = this.wavToMp3(
wavHdr.channels,
wavHdr.sampleRate,
wavSamples
);
reader.readAsDataURL(mp3);
reader.onload = () => {
var base64String = reader.result;
console.log(base64String);
};
};
function wavToMp3(channels, sampleRate, samples) {
console.log(channels);
console.log(sampleRate);
var buffer = [];
var mp3enc = new lamejs.Mp3Encoder(channels, sampleRate, 128);
var remaining = samples.length;
var maxSamples = 1152;
for (var i = 0; remaining >= maxSamples; i += maxSamples) {
var mono = samples.subarray(i, i + maxSamples);
var mp3buf = mp3enc.encodeBuffer(mono);
if (mp3buf.length > 0) {
buffer.push(new Int8Array(mp3buf));
}
remaining -= maxSamples;
}
var d = mp3enc.flush();
if (d.length > 0) {
buffer.push(new Int8Array(d));
}
console.log('done encoding, size=', buffer.length);
var blob = new Blob(buffer, { type: 'audio/mp3' });
var bUrl = window.URL.createObjectURL(blob);
console.log('Blob created, URL:', bUrl);
return blob;
}
我使用 sdk.connection 方法将语音中的音频捕获到文本识别器。它创建我想转换成 MP3 的 PCM 音频。
这是初始化连接的方式:
const con = SpeechSDK.Connection.fromRecognizer(this.recognizer);
con.messageSent = args => {
// Only record outbound audio mesages that have data in them.
if (
args.message.path === "audio" &&
args.message.isBinaryMessage &&
args.message.binaryMessage !== null
) {
this.wavFragments[this.wavFragmentCount++] =
args.message.binaryMessage;
}
};
这是 wav 文件构建:
let byteCount = 0;
for (let i = 0; i < this.wavFragmentCount; i++) {
byteCount += this.wavFragments[i].byteLength;
}
// Output array.
const sentAudio = new Uint8Array(byteCount);
byteCount = 0;
for (let i = 0; i < this.wavFragmentCount; i++) {
sentAudio.set(new Uint8Array(this.wavFragments[i]), byteCount);
byteCount += this.wavFragments[i].byteLength;
} // Write the audio back to disk.
// Set the file size in the wave header:
const view = new DataView(sentAudio.buffer);
view.setUint32(4, byteCount, true);
view.setUint32(40, byteCount, true);
我尝试使用 lamejs 将 'sentAudio' 转换为 MP3。
import {lamejs} from "../../modules/lame.min.js";
const wavBlob = new Blob([sentAudio]);
const reader = new FileReader();
reader.onload = evt => {
const audioData = evt.target.result;
const wav = lamejs.WavHeader.readHeader(new DataView(audioData));
const mp3enc = new lamejs.Mp3Encoder(1, wav.sampleRate, 128);
const samples = new Int8Array(audioData, wav.dataOffset, wav.dataLen / 2);
let mp3Tmp = mp3enc.encodeBuffer(samples); // encode mp3
// Push encode buffer to mp3Data variable
const mp3Data = [];
mp3Data.push(mp3Tmp);
// Get end part of mp3
mp3Tmp = mp3enc.flush();
// Write last data to the output data, too
// mp3Data contains now the complete mp3Data
mp3Data.push(mp3Tmp);
const blob = new Blob(mp3Data, { type: "audio/mp3" });
this.createDownloadLink(blob, "mp3");
};
reader.readAsArrayBuffer(wavBlob);
MP3 Blob 为空或包含听不见的声音。 我也尝试过使用 this example 中描述的 'encodeMP3' 方法,但它给出了相同的输出。
是否有支持此 mp3 转换的现有解决方案?
问题请参考以下代码
let byteCount = 0;
for (let i= 0; i < wavFragmentCount; i++) {
byteCount += wavFragments[i].byteLength;
}
// Output array.
const sentAudio: Uint8Array = new Uint8Array(byteCount);
byteCount = 0;
for (let i: number = 0; i < wavFragmentCount; i++) {
sentAudio.set(new Uint8Array(wavFragments[i]), byteCount);
byteCount += wavFragments[i].byteLength;
}
// create wav file blob
const view = new DataView(sentAudio.buffer);
view.setUint32(4, byteCount, true);
view.setUint32(40, byteCount, true);
let wav = new Blob([view], { type: 'audio/wav' });
// read wave file as base64
var reader = new FileReader();
reader.readAsDataURL(wav);
reader.onload = () => {
var base64String = reader.result.toString();
base64String = base64String.split(',')[1];
// convert to buffer
var binary_string = window.atob(base64String);
var len = binary_string.length;
var bytes = new Uint8Array(len);
for (var i = 0; i < len; i++) {
bytes[i] = binary_string.charCodeAt(i);
}
// convert to mp3 with lamejs
var wavHdr = lamejs.WavHeader.readHeader(
new DataView(bytes.buffer)
);
console.log(wavHdr);
var wavSamples = new Int16Array(
bytes.buffer,
0,
wavHdr.dataLen / 2
);
let mp3 = this.wavToMp3(
wavHdr.channels,
wavHdr.sampleRate,
wavSamples
);
reader.readAsDataURL(mp3);
reader.onload = () => {
var base64String = reader.result;
console.log(base64String);
};
};
function wavToMp3(channels, sampleRate, samples) {
console.log(channels);
console.log(sampleRate);
var buffer = [];
var mp3enc = new lamejs.Mp3Encoder(channels, sampleRate, 128);
var remaining = samples.length;
var maxSamples = 1152;
for (var i = 0; remaining >= maxSamples; i += maxSamples) {
var mono = samples.subarray(i, i + maxSamples);
var mp3buf = mp3enc.encodeBuffer(mono);
if (mp3buf.length > 0) {
buffer.push(new Int8Array(mp3buf));
}
remaining -= maxSamples;
}
var d = mp3enc.flush();
if (d.length > 0) {
buffer.push(new Int8Array(d));
}
console.log('done encoding, size=', buffer.length);
var blob = new Blob(buffer, { type: 'audio/mp3' });
var bUrl = window.URL.createObjectURL(blob);
console.log('Blob created, URL:', bUrl);
return blob;
}