从 wavesurfer.js backend/web 音频 api 获取 PCM 数据
Getting PCM data from wavesurfer.js backend/web audio api
我正在使用 wavesurfer.js 在线创建多轨播放器,并希望导出具有平移等功能的组合轨道的混音版本。
首先,我有一个 audioFiles 数组,并用它来创建一个 wavesurfer 元素数组。
for(var i=0; i<audiofiles.length; i++){
spectrum[i] = WaveSurfer.create({
});
}
然后我从 wavesurfer 后端为每个这些创建一个缓冲区
for(var i=0; i<audiofiles.length; i++){
var ctx = spectrum[i].backend.ac;
var length = spectrum[i].getDuration() * sample_rate * 2;
var ctx_buffer = ctx.createBuffer(2, length, ctx.sampleRate);
// pass raw pcm buffer to download function
}
最后我在这里得到了一些关于下载功能的帮助
我目前的问题是我传递给下载功能的内容似乎格式不正确。我刚开始使用音频,不确定自己在做什么。
如果我将 ctx_buffer 变量传递给另一个问题中的函数(并使用它而不是直接从 pcm 文件中获取的缓冲区变量),我将成功下载但文件是空的,尽管它是正确的长度(在上面的长度中省略 *2 将得到一个空文件,正好是我原始文件长度的一半)。
此处 wavesurfer.js 中有一个 exportPCM() 函数 https://wavesurfer-js.org/docs/methods.html 但我也不确定它是如何工作的。
编辑
buttons.save_all.addEventListener("click", function(){
document.getElementById("download_icon").className = "fas fa-spinner loader";
document.getElementById("download_text").innerText = "Loading...";
var length = spectrum[0].getDuration()*44100*2;
for(var i=0; i<audiofiles.length; i++){
var ctx = spectrum[i].backend.ac;
var sample_rate = ctx.sampleRate;
var length = spectrum[i].getDuration()*sample_rate*2;
var ctx_buffer = ctx.createBuffer(2, length, ctx.sampleRate);
download_init(ctx_buffer,length,sample_rate).catch(showError);
}
}, false);
function showError(e) {
console.log(`ERROR: ${e}`);
}
var x = 0;
async function download_init(ctx_buffer,length,sample_rate) {
// const buffer = await (await fetch(url)).arrayBuffer()
const wavBytes = getWavBytes(ctx_buffer, {
numFrames: ctx_buffer.length,
numChannels: 1,
sampleRate: sample_rate,
isFloat: false
});
console.log(wavBytes);
blobs[x] = URL.createObjectURL(
new Blob([wavBytes], { type: 'audio/wav' })
)
document.getElementById("btn_download").href = blobs[0];
document.getElementById("btn_download").setAttribute('download', 'download.wav');
document.getElementById("btn_save_all").hidden = true;
document.getElementById("btn_download").hidden = false;
x++;
}
鉴于您当前有一个 AudioBuffer
对象数组,您可以交错包含在每个 AudioBuffer
中的 Float32Array PCM 数据,然后使用交错的 PCM 创建一个 RIFF/Wav文件下载。如果每个 AudioBuffer
是一个音轨,则阵列中的所有 left/right 通道必须单独组合并在最后交错。以下是如何从一首 AudioBuffer
曲目开始:
(贴出问题作者的解答移到答案部分).
感谢@AnthumChris 的最终代码:
buttons.save_all.addEventListener("click", function(){
document.getElementById("download_icon").className = "fas fa-spinner loader";
document.getElementById("download_text").innerText = "Loading...";
for (let i=0; i<audiofiles.length; i++) {
const track = spectrum[i];
const sampleRate = track.backend.ac.sampleRate;
const audioBuffer = track.backend.buffer;
const interleavedSamples = getInterleavedStereo(audioBuffer);
const file_name = `track-${i+1}.wav`;
download_init(interleavedSamples.buffer, sampleRate, file_name);
}
console.log(blobs);
}, false);
function showError(e) {
console.log(`ERROR: ${e}`);
}
var x = 0;
function download_init(ctx_buffer, sample_rate, file_name) {
const wavBytes = getWavBytes(ctx_buffer, {
numChannels: 2,
sampleRate: sample_rate,
isFloat: true
});
blobs[x] = URL.createObjectURL(
new Blob([wavBytes], { type: 'audio/wav' })
)
buttons.download.href = blobs[0];
buttons.download.setAttribute('download', file_name);
buttons.save_all.hidden = true;
buttons.download.hidden = false;
x++;
}
function getInterleavedStereo(audioBuffer) {
if (audioBuffer.numberOfChannels !== 2) {
throw Error('source audio is not stereo');
}
const [left, right] = [audioBuffer.getChannelData(0), audioBuffer.getChannelData(1)];
const interleaved = new Float32Array(left.length + right.length);
for (let src=0, dst=0; src < left.length; src++, dst+=2) {
interleaved[dst] = left[src];
interleaved[dst+1] = right[src];
}
return interleaved;
}
function getWavBytes(buffer, options) {
const type = options.isFloat ? Float32Array : Uint16Array;
const numFrames = buffer.byteLength / type.BYTES_PER_ELEMENT;
const headerBytes = getWavHeader(Object.assign({}, options, { numFrames }));
const wavBytes = new Uint8Array(headerBytes.length + buffer.byteLength);
wavBytes.set(headerBytes, 0);
wavBytes.set(new Uint8Array(buffer), headerBytes.length);
return wavBytes;
}
function getWavHeader(options) {
const numFrames = options.numFrames;
const numChannels = options.numChannels || 2;
const sampleRate = options.sampleRate || 44100;
const bytesPerSample = options.isFloat? 4 : 2;
const format = options.isFloat? 3 : 1;
const blockAlign = numChannels * bytesPerSample;
const byteRate = sampleRate * blockAlign;
const dataSize = numFrames * blockAlign;
const buffer = new ArrayBuffer(44);
const dv = new DataView(buffer);
let p = 0;
function writeString(s) {
for (let i=0; i<s.length; i++) {
dv.setUint8(p + i, s.charCodeAt(i));
}
p += s.length;
}
function writeUint32(d) {
dv.setUint32(p, d, true);
p += 4;
}
function writeUint16(d) {
dv.setUint16(p, d, true);
p += 2;
}
writeString('RIFF'); // ChunkID
writeUint32(dataSize + 36); // ChunkSize
writeString('WAVE'); // Format
writeString('fmt '); // Subchunk1ID
writeUint32(16); // Subchunk1Size
writeUint16(format); // AudioFormat
writeUint16(numChannels); // NumChannels
writeUint32(sampleRate); // SampleRate
writeUint32(byteRate); // ByteRate
writeUint16(blockAlign); // BlockAlign
writeUint16(bytesPerSample * 8); // BitsPerSample
writeString('data'); // Subchunk2ID
writeUint32(dataSize); // Subchunk2Size
return new Uint8Array(buffer);
}
我正在使用 wavesurfer.js 在线创建多轨播放器,并希望导出具有平移等功能的组合轨道的混音版本。
首先,我有一个 audioFiles 数组,并用它来创建一个 wavesurfer 元素数组。
for(var i=0; i<audiofiles.length; i++){
spectrum[i] = WaveSurfer.create({
});
}
然后我从 wavesurfer 后端为每个这些创建一个缓冲区
for(var i=0; i<audiofiles.length; i++){
var ctx = spectrum[i].backend.ac;
var length = spectrum[i].getDuration() * sample_rate * 2;
var ctx_buffer = ctx.createBuffer(2, length, ctx.sampleRate);
// pass raw pcm buffer to download function
}
最后我在这里得到了一些关于下载功能的帮助
我目前的问题是我传递给下载功能的内容似乎格式不正确。我刚开始使用音频,不确定自己在做什么。
如果我将 ctx_buffer 变量传递给另一个问题中的函数(并使用它而不是直接从 pcm 文件中获取的缓冲区变量),我将成功下载但文件是空的,尽管它是正确的长度(在上面的长度中省略 *2 将得到一个空文件,正好是我原始文件长度的一半)。
此处 wavesurfer.js 中有一个 exportPCM() 函数 https://wavesurfer-js.org/docs/methods.html 但我也不确定它是如何工作的。
编辑
buttons.save_all.addEventListener("click", function(){
document.getElementById("download_icon").className = "fas fa-spinner loader";
document.getElementById("download_text").innerText = "Loading...";
var length = spectrum[0].getDuration()*44100*2;
for(var i=0; i<audiofiles.length; i++){
var ctx = spectrum[i].backend.ac;
var sample_rate = ctx.sampleRate;
var length = spectrum[i].getDuration()*sample_rate*2;
var ctx_buffer = ctx.createBuffer(2, length, ctx.sampleRate);
download_init(ctx_buffer,length,sample_rate).catch(showError);
}
}, false);
function showError(e) {
console.log(`ERROR: ${e}`);
}
var x = 0;
async function download_init(ctx_buffer,length,sample_rate) {
// const buffer = await (await fetch(url)).arrayBuffer()
const wavBytes = getWavBytes(ctx_buffer, {
numFrames: ctx_buffer.length,
numChannels: 1,
sampleRate: sample_rate,
isFloat: false
});
console.log(wavBytes);
blobs[x] = URL.createObjectURL(
new Blob([wavBytes], { type: 'audio/wav' })
)
document.getElementById("btn_download").href = blobs[0];
document.getElementById("btn_download").setAttribute('download', 'download.wav');
document.getElementById("btn_save_all").hidden = true;
document.getElementById("btn_download").hidden = false;
x++;
}
鉴于您当前有一个 AudioBuffer
对象数组,您可以交错包含在每个 AudioBuffer
中的 Float32Array PCM 数据,然后使用交错的 PCM 创建一个 RIFF/Wav文件下载。如果每个 AudioBuffer
是一个音轨,则阵列中的所有 left/right 通道必须单独组合并在最后交错。以下是如何从一首 AudioBuffer
曲目开始:
(贴出问题作者的解答移到答案部分).
感谢@AnthumChris 的最终代码:
buttons.save_all.addEventListener("click", function(){
document.getElementById("download_icon").className = "fas fa-spinner loader";
document.getElementById("download_text").innerText = "Loading...";
for (let i=0; i<audiofiles.length; i++) {
const track = spectrum[i];
const sampleRate = track.backend.ac.sampleRate;
const audioBuffer = track.backend.buffer;
const interleavedSamples = getInterleavedStereo(audioBuffer);
const file_name = `track-${i+1}.wav`;
download_init(interleavedSamples.buffer, sampleRate, file_name);
}
console.log(blobs);
}, false);
function showError(e) {
console.log(`ERROR: ${e}`);
}
var x = 0;
function download_init(ctx_buffer, sample_rate, file_name) {
const wavBytes = getWavBytes(ctx_buffer, {
numChannels: 2,
sampleRate: sample_rate,
isFloat: true
});
blobs[x] = URL.createObjectURL(
new Blob([wavBytes], { type: 'audio/wav' })
)
buttons.download.href = blobs[0];
buttons.download.setAttribute('download', file_name);
buttons.save_all.hidden = true;
buttons.download.hidden = false;
x++;
}
function getInterleavedStereo(audioBuffer) {
if (audioBuffer.numberOfChannels !== 2) {
throw Error('source audio is not stereo');
}
const [left, right] = [audioBuffer.getChannelData(0), audioBuffer.getChannelData(1)];
const interleaved = new Float32Array(left.length + right.length);
for (let src=0, dst=0; src < left.length; src++, dst+=2) {
interleaved[dst] = left[src];
interleaved[dst+1] = right[src];
}
return interleaved;
}
function getWavBytes(buffer, options) {
const type = options.isFloat ? Float32Array : Uint16Array;
const numFrames = buffer.byteLength / type.BYTES_PER_ELEMENT;
const headerBytes = getWavHeader(Object.assign({}, options, { numFrames }));
const wavBytes = new Uint8Array(headerBytes.length + buffer.byteLength);
wavBytes.set(headerBytes, 0);
wavBytes.set(new Uint8Array(buffer), headerBytes.length);
return wavBytes;
}
function getWavHeader(options) {
const numFrames = options.numFrames;
const numChannels = options.numChannels || 2;
const sampleRate = options.sampleRate || 44100;
const bytesPerSample = options.isFloat? 4 : 2;
const format = options.isFloat? 3 : 1;
const blockAlign = numChannels * bytesPerSample;
const byteRate = sampleRate * blockAlign;
const dataSize = numFrames * blockAlign;
const buffer = new ArrayBuffer(44);
const dv = new DataView(buffer);
let p = 0;
function writeString(s) {
for (let i=0; i<s.length; i++) {
dv.setUint8(p + i, s.charCodeAt(i));
}
p += s.length;
}
function writeUint32(d) {
dv.setUint32(p, d, true);
p += 4;
}
function writeUint16(d) {
dv.setUint16(p, d, true);
p += 2;
}
writeString('RIFF'); // ChunkID
writeUint32(dataSize + 36); // ChunkSize
writeString('WAVE'); // Format
writeString('fmt '); // Subchunk1ID
writeUint32(16); // Subchunk1Size
writeUint16(format); // AudioFormat
writeUint16(numChannels); // NumChannels
writeUint32(sampleRate); // SampleRate
writeUint32(byteRate); // ByteRate
writeUint16(blockAlign); // BlockAlign
writeUint16(bytesPerSample * 8); // BitsPerSample
writeString('data'); // Subchunk2ID
writeUint32(dataSize); // Subchunk2Size
return new Uint8Array(buffer);
}