OfflineAudioContext 延迟偏移
OfflineAudioContext Latency Shift
我正在使用 OfflineAudioContext 下载应用了效果的输入文件。下载效果很好,速度非常快,但我 运行 遇到的问题是,当我应用增益时,我正在使用分析仪发出增益应该增加或减少的信号。
这在使用 AudioContext 播放音频时效果很好,但离线版本会导致增益时间发生非常明显的偏移。上升起得晚,下降起得晚。好像整体上有延迟变化。
有没有办法应对这种转变?我同意渲染过程需要更长的时间。
var chunks = [];
var fileInput = document.getElementById("input");
var process = document.getElementById("process");
//Load audio file listener
process.addEventListener(
"click",
function () {
// Web Audio
var audioCtx2 = new (AudioContext || webkitAudioContext)();
// Reset buttons and log
$("#log").empty();
$("#download_link").addClass("d-none");
$("#repeat_link").addClass("d-none");
// Check for file
if (fileInput.files[0] == undefined) {
if ($("#upload_err").hasClass("d-none")) {
$("#upload_err").removeClass("d-none");
}
return false;
}
var reader1 = new FileReader();
reader1.onload = function (ev) {
// console.log("Reader loaded.");
var tempBuffer = audioCtx2.createBufferSource();
// Decode audio
audioCtx2.decodeAudioData(ev.target.result).then(function (buffer) {
// console.log("Duration1 = " + buffer.duration);
var offlineAudioCtx = new OfflineAudioContext({
numberOfChannels: 2,
length: 44100 * buffer.duration,
sampleRate: 44100,
});
// console.log("test 1");
// Audio Buffer Source
var soundSource = offlineAudioCtx.createBufferSource();
var analyser2d = offlineAudioCtx.createAnalyser();
var dgate1 = offlineAudioCtx.createGain();
var dhpf = offlineAudioCtx.createBiquadFilter();
var dhum60 = offlineAudioCtx.createBiquadFilter();
var dcompressor = offlineAudioCtx.createDynamicsCompressor();
dhpf.type = "highpass";
dhpf.Q.value = 0.5;
dhum60.type = "notch";
dhum60.Q.value = 130;
dcompressor.knee.setValueAtTime(40, offlineAudioCtx.currentTime);
dcompressor.attack.setValueAtTime(0.1, offlineAudioCtx.currentTime);
dcompressor.release.setValueAtTime(0.2, offlineAudioCtx.currentTime);
var reader2 = new FileReader();
// console.log("Created Reader");
reader2.onload = function (ev) {
// console.log("Reading audio data to buffer...");
$("#log").append("<p>Buffering...</p>");
soundSource.buffer = buffer;
let context = offlineAudioCtx;
//Before Effects
analyser2d = context.createAnalyser();
analyser2d.fftSize = 2048;
analyser2d.smoothingTimeConstant = 0.85;
const sampleBuffer = new Float32Array(analyser2d.fftSize);
function loop() {
analyser2d.getFloatTimeDomainData(sampleBuffer);
let sumOfSquares = 0;
for (let i = 0; i < sampleBuffer.length; i++) {
sumOfSquares += sampleBuffer[i] ** 2;
}
const avgPowerDecibels = Math.round(
10 * Math.log10(sumOfSquares / sampleBuffer.length)
);
const gainset = avgPowerDecibels > -50 ? 1 : 0;
//real-time effects choices start
if (
document.getElementById("gate").getAttribute("data-active") ===
"true"
) {
dgate1.gain.setTargetAtTime(
gainset,
offlineAudioCtx.currentTime,
0.05
);
} else if (
document.getElementById("gate").getAttribute("data-active") ===
"false"
) {
dgate1.gain.setTargetAtTime(
1,
offlineAudioCtx.currentTime,
0.05
);
}
if (
document.getElementById("hpf").getAttribute("data-active") ===
"true"
) {
dhpf.frequency.value = 90;
} else if (
document.getElementById("hpf").getAttribute("data-active") ===
"false"
) {
dhpf.frequency.value = 0;
}
if (
document.getElementById("hum").getAttribute("data-active") ===
"true"
) {
dhum60.frequency.value = 60;
} else if (
document.getElementById("hum").getAttribute("data-active") ===
"false"
) {
dhum60.frequency.value = 0;
}
if (
document.getElementById("comp").getAttribute("data-active") ===
"true"
) {
dcompressor.threshold.setValueAtTime(
-30,
offlineAudioCtx.currentTime
);
dcompressor.ratio.setValueAtTime(
3.5,
offlineAudioCtx.currentTime
);
} else if (
document.getElementById("comp").getAttribute("data-active") ===
"false"
) {
dcompressor.threshold.setValueAtTime(
0,
offlineAudioCtx.currentTime
);
dcompressor.ratio.setValueAtTime(1, offlineAudioCtx.currentTime);
}
// Display value.
requestAnimationFrame(loop);
}
loop();
soundSource
.connect(analyser2d)
.connect(dhpf)
.connect(dhum60)
.connect(dgate1)
.connect(dcompressor);
dcompressor.connect(offlineAudioCtx.destination);
offlineAudioCtx
.startRendering()
.then(function (renderedBuffer) {
// console.log('Rendering completed successfully.');
$("#log").append("<p>Rendering new file...</p>");
//var song = offlineAudioCtx.createBufferSource();
console.log(
"OfflineAudioContext.length = " + offlineAudioCtx.length
);
split(renderedBuffer, offlineAudioCtx.length);
$("#log").append("<p>Finished!</p>");
})
.catch(function (err) {
// console.log('Rendering failed: ' + err);
$("#log").append("<p>Rendering failed.</p>");
});
soundSource.loop = false;
};
reader2.readAsArrayBuffer(fileInput.files[0]);
soundSource.start(0);
});
};
reader1.readAsArrayBuffer(fileInput.files[0]);
},
false
);
我已经包含了我认为是代码的相关部分。让我知道是否需要更多。谢谢!
这是行不通的,因为 OfflineAudioContext
比实时运行得更快(有时快数百倍)。获取分析器数据的循环每 16 毫秒左右完成一次。在实时系统中,这可能足够准确,但离线上下文可能 运行 比实时快得多,所以当您获取分析器数据时,已经过去了更多时间。 (您可以通过在循环中打印出上下文 currentTime 来看到这一点。它可能会增加(多得多)超过 16 毫秒。
执行此操作的最佳方法是使用 context.suspend(t) 在已知时间暂停上下文,以便您可以同步获取分析器数据。请注意,时间是四舍五入的,因此它可能不是您想要的准确时间,但可能足够接近。另请注意,AFAIK,Firefox 尚未实现此功能,Safari 也没有(但很快就会)。
这是一个简短的片段,说明如何使用 suspend
。虽然有不止一种方法。未经测试,但我认为总体思路是正确的:
// Grab the data every 16 ms, roughly. `suspend` rounds the time up to
// the nearest multiple of 128 frames (or 128/context.sampleRate time).
for (t = 0; t < <length of buffer>; t += 0.016) {
context.suspend(t)
.then(() => {
// Use analyser to grab the data and compute the values.
// Use setValueAtTime and friends to adjust the gain and compressor
// appropriately. Use context.currentTime to know at what time
// the context was suspended, and schedule the modifications to be
// at least 128 samples in the future. (I think).
})
.then(() => context.resume());
}
另一种方法是创建一个实时上下文并像现在一样处理它,当缓冲区播放完毕后,关闭上下文。当然,您必须添加一些东西(ScriptProcessorNode、AudioWorkletNode 或 MediaRecorder)来捕获呈现的数据。
如果 none 这些对您有用,那么我不确定还有哪些替代方案。
我正在使用 OfflineAudioContext 下载应用了效果的输入文件。下载效果很好,速度非常快,但我 运行 遇到的问题是,当我应用增益时,我正在使用分析仪发出增益应该增加或减少的信号。
这在使用 AudioContext 播放音频时效果很好,但离线版本会导致增益时间发生非常明显的偏移。上升起得晚,下降起得晚。好像整体上有延迟变化。
有没有办法应对这种转变?我同意渲染过程需要更长的时间。
var chunks = [];
var fileInput = document.getElementById("input");
var process = document.getElementById("process");
//Load audio file listener
process.addEventListener(
"click",
function () {
// Web Audio
var audioCtx2 = new (AudioContext || webkitAudioContext)();
// Reset buttons and log
$("#log").empty();
$("#download_link").addClass("d-none");
$("#repeat_link").addClass("d-none");
// Check for file
if (fileInput.files[0] == undefined) {
if ($("#upload_err").hasClass("d-none")) {
$("#upload_err").removeClass("d-none");
}
return false;
}
var reader1 = new FileReader();
reader1.onload = function (ev) {
// console.log("Reader loaded.");
var tempBuffer = audioCtx2.createBufferSource();
// Decode audio
audioCtx2.decodeAudioData(ev.target.result).then(function (buffer) {
// console.log("Duration1 = " + buffer.duration);
var offlineAudioCtx = new OfflineAudioContext({
numberOfChannels: 2,
length: 44100 * buffer.duration,
sampleRate: 44100,
});
// console.log("test 1");
// Audio Buffer Source
var soundSource = offlineAudioCtx.createBufferSource();
var analyser2d = offlineAudioCtx.createAnalyser();
var dgate1 = offlineAudioCtx.createGain();
var dhpf = offlineAudioCtx.createBiquadFilter();
var dhum60 = offlineAudioCtx.createBiquadFilter();
var dcompressor = offlineAudioCtx.createDynamicsCompressor();
dhpf.type = "highpass";
dhpf.Q.value = 0.5;
dhum60.type = "notch";
dhum60.Q.value = 130;
dcompressor.knee.setValueAtTime(40, offlineAudioCtx.currentTime);
dcompressor.attack.setValueAtTime(0.1, offlineAudioCtx.currentTime);
dcompressor.release.setValueAtTime(0.2, offlineAudioCtx.currentTime);
var reader2 = new FileReader();
// console.log("Created Reader");
reader2.onload = function (ev) {
// console.log("Reading audio data to buffer...");
$("#log").append("<p>Buffering...</p>");
soundSource.buffer = buffer;
let context = offlineAudioCtx;
//Before Effects
analyser2d = context.createAnalyser();
analyser2d.fftSize = 2048;
analyser2d.smoothingTimeConstant = 0.85;
const sampleBuffer = new Float32Array(analyser2d.fftSize);
function loop() {
analyser2d.getFloatTimeDomainData(sampleBuffer);
let sumOfSquares = 0;
for (let i = 0; i < sampleBuffer.length; i++) {
sumOfSquares += sampleBuffer[i] ** 2;
}
const avgPowerDecibels = Math.round(
10 * Math.log10(sumOfSquares / sampleBuffer.length)
);
const gainset = avgPowerDecibels > -50 ? 1 : 0;
//real-time effects choices start
if (
document.getElementById("gate").getAttribute("data-active") ===
"true"
) {
dgate1.gain.setTargetAtTime(
gainset,
offlineAudioCtx.currentTime,
0.05
);
} else if (
document.getElementById("gate").getAttribute("data-active") ===
"false"
) {
dgate1.gain.setTargetAtTime(
1,
offlineAudioCtx.currentTime,
0.05
);
}
if (
document.getElementById("hpf").getAttribute("data-active") ===
"true"
) {
dhpf.frequency.value = 90;
} else if (
document.getElementById("hpf").getAttribute("data-active") ===
"false"
) {
dhpf.frequency.value = 0;
}
if (
document.getElementById("hum").getAttribute("data-active") ===
"true"
) {
dhum60.frequency.value = 60;
} else if (
document.getElementById("hum").getAttribute("data-active") ===
"false"
) {
dhum60.frequency.value = 0;
}
if (
document.getElementById("comp").getAttribute("data-active") ===
"true"
) {
dcompressor.threshold.setValueAtTime(
-30,
offlineAudioCtx.currentTime
);
dcompressor.ratio.setValueAtTime(
3.5,
offlineAudioCtx.currentTime
);
} else if (
document.getElementById("comp").getAttribute("data-active") ===
"false"
) {
dcompressor.threshold.setValueAtTime(
0,
offlineAudioCtx.currentTime
);
dcompressor.ratio.setValueAtTime(1, offlineAudioCtx.currentTime);
}
// Display value.
requestAnimationFrame(loop);
}
loop();
soundSource
.connect(analyser2d)
.connect(dhpf)
.connect(dhum60)
.connect(dgate1)
.connect(dcompressor);
dcompressor.connect(offlineAudioCtx.destination);
offlineAudioCtx
.startRendering()
.then(function (renderedBuffer) {
// console.log('Rendering completed successfully.');
$("#log").append("<p>Rendering new file...</p>");
//var song = offlineAudioCtx.createBufferSource();
console.log(
"OfflineAudioContext.length = " + offlineAudioCtx.length
);
split(renderedBuffer, offlineAudioCtx.length);
$("#log").append("<p>Finished!</p>");
})
.catch(function (err) {
// console.log('Rendering failed: ' + err);
$("#log").append("<p>Rendering failed.</p>");
});
soundSource.loop = false;
};
reader2.readAsArrayBuffer(fileInput.files[0]);
soundSource.start(0);
});
};
reader1.readAsArrayBuffer(fileInput.files[0]);
},
false
);
我已经包含了我认为是代码的相关部分。让我知道是否需要更多。谢谢!
这是行不通的,因为 OfflineAudioContext
比实时运行得更快(有时快数百倍)。获取分析器数据的循环每 16 毫秒左右完成一次。在实时系统中,这可能足够准确,但离线上下文可能 运行 比实时快得多,所以当您获取分析器数据时,已经过去了更多时间。 (您可以通过在循环中打印出上下文 currentTime 来看到这一点。它可能会增加(多得多)超过 16 毫秒。
执行此操作的最佳方法是使用 context.suspend(t) 在已知时间暂停上下文,以便您可以同步获取分析器数据。请注意,时间是四舍五入的,因此它可能不是您想要的准确时间,但可能足够接近。另请注意,AFAIK,Firefox 尚未实现此功能,Safari 也没有(但很快就会)。
这是一个简短的片段,说明如何使用 suspend
。虽然有不止一种方法。未经测试,但我认为总体思路是正确的:
// Grab the data every 16 ms, roughly. `suspend` rounds the time up to
// the nearest multiple of 128 frames (or 128/context.sampleRate time).
for (t = 0; t < <length of buffer>; t += 0.016) {
context.suspend(t)
.then(() => {
// Use analyser to grab the data and compute the values.
// Use setValueAtTime and friends to adjust the gain and compressor
// appropriately. Use context.currentTime to know at what time
// the context was suspended, and schedule the modifications to be
// at least 128 samples in the future. (I think).
})
.then(() => context.resume());
}
另一种方法是创建一个实时上下文并像现在一样处理它,当缓冲区播放完毕后,关闭上下文。当然,您必须添加一些东西(ScriptProcessorNode、AudioWorkletNode 或 MediaRecorder)来捕获呈现的数据。
如果 none 这些对您有用,那么我不确定还有哪些替代方案。