网络音频api,优雅地停止声音
Web audio api, stop sound gracefully
网络音频api提供停止声音的方法.stop()
。
我希望我的声音在停止前降低音量。为此,我使用了增益节点。但是,我遇到了一些奇怪的问题,有些声音无法播放,我也不知道为什么。
这是我所做的简化版本:
https://jsfiddle.net/01p1t09n/1/
如果删除带有 setTimeout()
的行,您会听到每个声音都会播放。当 setTimeout 存在时,并不是所有的声音都会播放。真正让我困惑的是,我相应地使用 push
和 shift
来找到正确的声音来源,但它似乎是另一个停止播放的声音。我能看到这种情况发生的唯一方法是 AudioContext.decodeAudioData
不是同步的。只需尝试 jsfiddle 以更好地理解并明显地戴上耳机。
这是 jsfiddle 的代码:
let url = "https://raw.githubusercontent.com/gleitz/midi-js-soundfonts/gh-pages/MusyngKite/acoustic_guitar_steel-mp3/A4.mp3";
let soundContainer = {};
let notesMap = {"A4": [] };
let _AudioContext_ = AudioContext || webkitAudioContext;
let audioContext = new _AudioContext_();
var oReq = new XMLHttpRequest();
oReq.open("GET", url, true);
oReq.responseType = "arraybuffer";
oReq.onload = function (oEvent) {
var arrayBuffer = oReq.response;
makeLoop(arrayBuffer);
};
oReq.send(null);
function makeLoop(arrayBuffer){
soundContainer["A4"] = arrayBuffer;
let currentTime = audioContext.currentTime;
for(let i = 0; i < 10; i++){
//playing at same intervals
play("A4", currentTime + i * 0.5);
setTimeout( () => stop("A4"), 500 + i * 500); //remove this line you will hear all the sounds.
}
}
function play(notePlayed, start) {
audioContext.decodeAudioData(soundContainer[notePlayed], (buffer) => {
let source;
let gainNode;
source = audioContext.createBufferSource();
gainNode = audioContext.createGain();
// pushing notes in note map
notesMap[notePlayed].push({ source, gainNode });
source.buffer = buffer;
source.connect(gainNode);
gainNode.connect(audioContext.destination);
gainNode.gain.value = 1;
source.start(start);
});
}
function stop(notePlayed){
let note = notesMap[notePlayed].shift();
note.source.stop();
}
这里只是为了解释为什么我这样做,你可以跳过它,只是为了解释为什么我不用stop()
我之所以这样做是因为我想优雅地停止声音,所以如果有可能不使用 setTimeout 就可以这样做,我很乐意接受。
基本上我在顶部有一张包含我的声音的地图(音符,如 A1、A#1、B1...)。
soundMap = {"A": [], "lot": [], "of": [], "sounds": []};
和一个 play()
fct,我在播放声音后填充数组:
play(sound) {
// sound is just { soundName, velocity, start}
let source;
let gainNode;
// sound container is just a map from soundname to the sound data.
this.audioContext.decodeAudioData(this.soundContainer[sound.soundName], (buffer) => {
source = this.audioContext.createBufferSource();
gainNode = this.audioContext.createGain();
gainNode.gain.value = sound.velocity;
// pushing sound in sound map
this.soundMap[sound.soundName].push({ source, gainNode });
source.buffer = buffer;
source.connect(gainNode);
gainNode.connect(this.audioContext.destination);
source.start(sound.start);
});
}
现在停止声音的部分:
stop(sound){
//remember above, soundMap is a map from "soundName" to {gain, source}
let dasound = this.soundMap[sound.soundName].shift();
let gain = dasound.gainNode.gain.value - 0.1;
// we lower the gain via incremental values to not have the sound stop abruptly
let i = 0;
for(; gain > 0; i++, gain -= 0.1){ // watchout funky syntax
((gain, i) => {
setTimeout(() => dasound.gainNode.gain.value = gain, 50 * i );
})(gain, i)
}
// we stop the source after the gain is set at 0. stop is in sec
setTimeout(() => note.source.stop(), i * 50);
}
啊啊啊啊啊啊啊啊!通过最终费心阅读文档中的 "everything"(对角线),我终于找到了很多东西。让我告诉你,这 api 是一颗未经雕琢的钻石。无论如何,他们实际上有我想要的 Audio param :
The AudioParam interface represents an audio-related parameter, usually a parameter of an AudioNode (such as GainNode.gain). An
AudioParam can be set to a specific value or a change in value, and
can be scheduled to happen at a specific time and following a specific
pattern.
它有一个功能linearRampToValueAtTime()
他们甚至有一个我问的例子!
// create audio context
var AudioContext = window.AudioContext || window.webkitAudioContext;
var audioCtx = new AudioContext();
// set basic variables for example
var myAudio = document.querySelector('audio');
var pre = document.querySelector('pre');
var myScript = document.querySelector('script');
pre.innerHTML = myScript.innerHTML;
var linearRampPlus = document.querySelector('.linear-ramp-plus');
var linearRampMinus = document.querySelector('.linear-ramp-minus');
// Create a MediaElementAudioSourceNode
// Feed the HTMLMediaElement into it
var source = audioCtx.createMediaElementSource(myAudio);
// Create a gain node and set it's gain value to 0.5
var gainNode = audioCtx.createGain();
// connect the AudioBufferSourceNode to the gainNode
// and the gainNode to the destination
gainNode.gain.setValueAtTime(0, audioCtx.currentTime);
source.connect(gainNode);
gainNode.connect(audioCtx.destination);
// set buttons to do something onclick
linearRampPlus.onclick = function() {
gainNode.gain.linearRampToValueAtTime(1.0, audioCtx.currentTime + 2);
}
linearRampMinus.onclick = function() {
gainNode.gain.linearRampToValueAtTime(0, audioCtx.currentTime + 2);
}
它们也有不同类型的计时,比如指数而不是线性斜坡,我想这更适合这种情况。
网络音频api提供停止声音的方法.stop()
。
我希望我的声音在停止前降低音量。为此,我使用了增益节点。但是,我遇到了一些奇怪的问题,有些声音无法播放,我也不知道为什么。
这是我所做的简化版本:
https://jsfiddle.net/01p1t09n/1/
如果删除带有 setTimeout()
的行,您会听到每个声音都会播放。当 setTimeout 存在时,并不是所有的声音都会播放。真正让我困惑的是,我相应地使用 push
和 shift
来找到正确的声音来源,但它似乎是另一个停止播放的声音。我能看到这种情况发生的唯一方法是 AudioContext.decodeAudioData
不是同步的。只需尝试 jsfiddle 以更好地理解并明显地戴上耳机。
这是 jsfiddle 的代码:
let url = "https://raw.githubusercontent.com/gleitz/midi-js-soundfonts/gh-pages/MusyngKite/acoustic_guitar_steel-mp3/A4.mp3";
let soundContainer = {};
let notesMap = {"A4": [] };
let _AudioContext_ = AudioContext || webkitAudioContext;
let audioContext = new _AudioContext_();
var oReq = new XMLHttpRequest();
oReq.open("GET", url, true);
oReq.responseType = "arraybuffer";
oReq.onload = function (oEvent) {
var arrayBuffer = oReq.response;
makeLoop(arrayBuffer);
};
oReq.send(null);
function makeLoop(arrayBuffer){
soundContainer["A4"] = arrayBuffer;
let currentTime = audioContext.currentTime;
for(let i = 0; i < 10; i++){
//playing at same intervals
play("A4", currentTime + i * 0.5);
setTimeout( () => stop("A4"), 500 + i * 500); //remove this line you will hear all the sounds.
}
}
function play(notePlayed, start) {
audioContext.decodeAudioData(soundContainer[notePlayed], (buffer) => {
let source;
let gainNode;
source = audioContext.createBufferSource();
gainNode = audioContext.createGain();
// pushing notes in note map
notesMap[notePlayed].push({ source, gainNode });
source.buffer = buffer;
source.connect(gainNode);
gainNode.connect(audioContext.destination);
gainNode.gain.value = 1;
source.start(start);
});
}
function stop(notePlayed){
let note = notesMap[notePlayed].shift();
note.source.stop();
}
这里只是为了解释为什么我这样做,你可以跳过它,只是为了解释为什么我不用stop()
我之所以这样做是因为我想优雅地停止声音,所以如果有可能不使用 setTimeout 就可以这样做,我很乐意接受。
基本上我在顶部有一张包含我的声音的地图(音符,如 A1、A#1、B1...)。
soundMap = {"A": [], "lot": [], "of": [], "sounds": []};
和一个 play()
fct,我在播放声音后填充数组:
play(sound) {
// sound is just { soundName, velocity, start}
let source;
let gainNode;
// sound container is just a map from soundname to the sound data.
this.audioContext.decodeAudioData(this.soundContainer[sound.soundName], (buffer) => {
source = this.audioContext.createBufferSource();
gainNode = this.audioContext.createGain();
gainNode.gain.value = sound.velocity;
// pushing sound in sound map
this.soundMap[sound.soundName].push({ source, gainNode });
source.buffer = buffer;
source.connect(gainNode);
gainNode.connect(this.audioContext.destination);
source.start(sound.start);
});
}
现在停止声音的部分:
stop(sound){
//remember above, soundMap is a map from "soundName" to {gain, source}
let dasound = this.soundMap[sound.soundName].shift();
let gain = dasound.gainNode.gain.value - 0.1;
// we lower the gain via incremental values to not have the sound stop abruptly
let i = 0;
for(; gain > 0; i++, gain -= 0.1){ // watchout funky syntax
((gain, i) => {
setTimeout(() => dasound.gainNode.gain.value = gain, 50 * i );
})(gain, i)
}
// we stop the source after the gain is set at 0. stop is in sec
setTimeout(() => note.source.stop(), i * 50);
}
啊啊啊啊啊啊啊啊!通过最终费心阅读文档中的 "everything"(对角线),我终于找到了很多东西。让我告诉你,这 api 是一颗未经雕琢的钻石。无论如何,他们实际上有我想要的 Audio param :
The AudioParam interface represents an audio-related parameter, usually a parameter of an AudioNode (such as GainNode.gain). An AudioParam can be set to a specific value or a change in value, and can be scheduled to happen at a specific time and following a specific pattern.
它有一个功能linearRampToValueAtTime()
他们甚至有一个我问的例子!
// create audio context
var AudioContext = window.AudioContext || window.webkitAudioContext;
var audioCtx = new AudioContext();
// set basic variables for example
var myAudio = document.querySelector('audio');
var pre = document.querySelector('pre');
var myScript = document.querySelector('script');
pre.innerHTML = myScript.innerHTML;
var linearRampPlus = document.querySelector('.linear-ramp-plus');
var linearRampMinus = document.querySelector('.linear-ramp-minus');
// Create a MediaElementAudioSourceNode
// Feed the HTMLMediaElement into it
var source = audioCtx.createMediaElementSource(myAudio);
// Create a gain node and set it's gain value to 0.5
var gainNode = audioCtx.createGain();
// connect the AudioBufferSourceNode to the gainNode
// and the gainNode to the destination
gainNode.gain.setValueAtTime(0, audioCtx.currentTime);
source.connect(gainNode);
gainNode.connect(audioCtx.destination);
// set buttons to do something onclick
linearRampPlus.onclick = function() {
gainNode.gain.linearRampToValueAtTime(1.0, audioCtx.currentTime + 2);
}
linearRampMinus.onclick = function() {
gainNode.gain.linearRampToValueAtTime(0, audioCtx.currentTime + 2);
}
它们也有不同类型的计时,比如指数而不是线性斜坡,我想这更适合这种情况。