网络音频 API 将单声道拆分为立体声
Web Audio API split mono to stereo
假设我有一个连接到目标节点的源节点。
即使音频是单声道,我也希望能够独立控制每只耳朵的音量,就像我拥有带分离器和合并节点的立体声音频时一样。
已经尝试在单声道源节点上使用拆分器和合并器节点,但右声道是空的。
立体声示例:
var audioCtx = new AudioContext();
var source = audioCtx.createMediaElementSource(myAudio);
var gainNodeL = audioCtx.createGain();
var gainNodeR = audioCtx.createGain();
var splitter = audioCtx.createChannelSplitter(2);
var merger = audioCtx.createChannelMerger(2);
source.connect(splitter);
splitter.connect(gainNodeL, 0);
splitter.connect(gainNodeR, 1);
gainNodeL.connect(merger, 0, 0);
gainNodeR.connect(merger, 0, 1);
merger.connect(audioCtx.createMediaStreamDestination());
当我对单声道音频执行此操作时,右声道是空的。
如果信号只是单声道(或者换句话说,它的 channelCount 是 1),则不需要 ChannelSplitterNode。我稍微修改了示例。它现在确实分离了振荡器的单声道信号。
var audioCtx = new AudioContext();
var oscillator = audioCtx.createOscillator();
var gainNodeL = audioCtx.createGain();
var gainNodeR = audioCtx.createGain();
var merger = audioCtx.createChannelMerger(2);
oscillator.connect(gainNodeL);
oscillator.connect(gainNodeR);
gainNodeL.connect(merger, 0, 0);
gainNodeR.connect(merger, 0, 1);
merger.connect(audioCtx.destination);
oscillator.start();
function left () {
gainNodeL.gain.value = 1;
gainNodeR.gain.value = 0;
}
function right () {
gainNodeL.gain.value = 0;
gainNodeR.gain.value = 1;
}
function center () {
gainNodeL.gain.value = 1;
gainNodeR.gain.value = 1;
}
假设我有一个连接到目标节点的源节点。 即使音频是单声道,我也希望能够独立控制每只耳朵的音量,就像我拥有带分离器和合并节点的立体声音频时一样。
已经尝试在单声道源节点上使用拆分器和合并器节点,但右声道是空的。
立体声示例:
var audioCtx = new AudioContext();
var source = audioCtx.createMediaElementSource(myAudio);
var gainNodeL = audioCtx.createGain();
var gainNodeR = audioCtx.createGain();
var splitter = audioCtx.createChannelSplitter(2);
var merger = audioCtx.createChannelMerger(2);
source.connect(splitter);
splitter.connect(gainNodeL, 0);
splitter.connect(gainNodeR, 1);
gainNodeL.connect(merger, 0, 0);
gainNodeR.connect(merger, 0, 1);
merger.connect(audioCtx.createMediaStreamDestination());
当我对单声道音频执行此操作时,右声道是空的。
如果信号只是单声道(或者换句话说,它的 channelCount 是 1),则不需要 ChannelSplitterNode。我稍微修改了示例。它现在确实分离了振荡器的单声道信号。
var audioCtx = new AudioContext();
var oscillator = audioCtx.createOscillator();
var gainNodeL = audioCtx.createGain();
var gainNodeR = audioCtx.createGain();
var merger = audioCtx.createChannelMerger(2);
oscillator.connect(gainNodeL);
oscillator.connect(gainNodeR);
gainNodeL.connect(merger, 0, 0);
gainNodeR.connect(merger, 0, 1);
merger.connect(audioCtx.destination);
oscillator.start();
function left () {
gainNodeL.gain.value = 1;
gainNodeR.gain.value = 0;
}
function right () {
gainNodeL.gain.value = 0;
gainNodeR.gain.value = 1;
}
function center () {
gainNodeL.gain.value = 1;
gainNodeR.gain.value = 1;
}