如何使用 javascript 直接从网页录制
How to record directly from a webpage using javascript
我正在制作一个小音乐应用程序。
我希望能够在不依赖麦克风的情况下录制浏览器中发出的声音。
到目前为止,我所看到的关于 MediaRecorder api 的一切都表明它依赖于麦克风。
理想情况下,我想在不使用外部库的情况下实现这个目标。
作为参考,下面是我如何制作非常简单的声音。
var congo = new Audio('http://www.denhaku.com/r_box/sr16/sr16perc/hi conga.wav');
var drumpad = document.getElementById('drumpad');
drumpad.addEventListener('click', function(){
congo.play();
});
谢谢
编辑:为了更清楚,我将如何在不依赖计算机内置麦克风的情况下录制我包含的代码片段发出的声音。例如,假设用户正在使用鼓垫发出声音并且他们戴着耳机,那么麦克风将毫无用处。即使他们不戴耳机,他们仍然会听到很多背景噪音。我想将正在录制的声音与用户打开此应用程序的特定浏览器选项卡中制作的音乐隔离开来。
So far, everything I have seen about the MediaRecorder api suggests that it relies on the mic.
否,MediaRecorder API does rely on MediaStreams, but these MediaStreams don't have to be LocalMediaStreams (i.e from gUM):
如果加载的媒体符合 same-origin 策略,您可以从 MediaElement (<audio>
, <video>
)'s captureStream()
方法获取 MediaStream。
但这将 return 每个 MediaElement 一个 MediaStream,在您的情况下,这可能不是最佳解决方案。
相反,跳转到 Web Audio API,它更适合 drum-pad 这样的应用程序。
Web Audio API 确实有一个 createMediaStreamDestination()
method which will return a MediaStreamAudioDestinationNode,它将在其 .stream
属性 中包含一个 MediaStream。您将连接到此 MediaStreamAudioDestinationNode 的所有其他节点都将在 MediaStream 中播出,并且您将能够从 MediaRecorder 对其进行录制。
让我们回收这个 以包括录音机:
(function myFirstDrumKit() {
const db_url = 'https://dl.dropboxusercontent.com/s/'; // all our medias are stored on dropbox
// we'll need to first load all the audios
function initAudios() {
const promises = drum.parts.map(part => {
return fetch(db_url + part.audio_src) // fetch the file
.then(resp => resp.arrayBuffer()) // as an arrayBuffer
.then(buf => drum.a_ctx.decodeAudioData(buf)) // then decode its audio data
.then(AudioBuf => {
part.buf = AudioBuf; // store the audioBuffer (won't change)
return Promise.resolve(part); // done
});
});
return Promise.all(promises); // when all are loaded
}
function initImages() {
// in this version we have only an static image,
// but we could have multiple per parts, with the same logic as for audios
var img = new Image();
img.src = db_url + drum.bg_src;
drum.bg = img;
return new Promise((res, rej) => {
img.onload = res;
img.onerror = rej;
});
}
let general_solo = false;
let part_solo = false;
const drum = {
a_ctx: new AudioContext(),
generate_sound: (part) => {
// called each time we need to play a source
const source = drum.a_ctx.createBufferSource();
source.buffer = part.buf;
source.connect(drum.gain);
// to keep only one playing at a time
// simply store this sourceNode, and stop the previous one
if(general_solo){
// stop all playing sources
drum.parts.forEach(p => (p.source && p.source.stop(0)));
}
else if (part_solo && part.source) {
// stop only the one of this part
part.source.stop(0);
}
// store the source
part.source = source;
source.start(0);
},
parts: [{
name: 'hihat',
x: 90,
y: 116,
w: 160,
h: 70,
audio_src: 'kbgd2jm7ezk3u3x/hihat.mp3'
},
{
name: 'snare',
x: 79,
y: 192,
w: 113,
h: 58,
audio_src: 'h2j6vm17r07jf03/snare.mp3'
},
{
name: 'kick',
x: 80,
y: 250,
w: 200,
h: 230,
audio_src: '1cdwpm3gca9mlo0/kick.mp3'
},
{
name: 'tom',
x: 290,
y: 210,
w: 110,
h: 80,
audio_src: 'h8pvqqol3ovyle8/tom.mp3'
}
],
bg_src: '0jkaeoxls18n3y5/_drumkit.jpg?dl=0',
//////////////////////
/// The recording part
//////////////////////
record: function record(e) {
const btn = document.getElementById('record');
const chunks = [];
// init a new MediaRecorder with our StreamNode's stream
const recorder = new MediaRecorder(drum.streamNode.stream);
// save every chunks
recorder.ondataavailable = e => chunks.push(e.data);
// once we're done recording
recorder.onstop = e => {
// export our recording
const blob = new Blob(chunks);
const url = URL.createObjectURL(blob);
// here in an <audio> element
const a = new Audio(url);
a.controls = true;
document.getElementById('records').appendChild(a);
// reset default click handler
btn.onclick = drum.record;
btn.textContent = 'record';
}
btn.onclick = function () {
recorder.stop();
};
// start recording
recorder.start();
btn.textContent = 'stop recording';
}
};
drum.gain = drum.a_ctx.createGain();
drum.gain.gain.value = .5;
drum.gain.connect(drum.a_ctx.destination);
// for recording
drum.streamNode = drum.a_ctx.createMediaStreamDestination();
drum.gain.connect(drum.streamNode);
document.getElementById('record').onclick = drum.record;
/////////////
//Unrelated to current question
////////////
function initCanvas() {
const c = drum.canvas = document.createElement('canvas');
const ctx = drum.ctx = c.getContext('2d');
c.width = drum.bg.width;
c.height = drum.bg.height;
ctx.drawImage(drum.bg, 0, 0);
document.body.appendChild(c);
addEvents(c);
}
const isHover = (x, y) =>
(drum.parts.filter(p => (p.x < x && p.x + p.w > x && p.y < y && p.y + p.h > y))[0] || false);
function addEvents(canvas) {
let mouse_hovered = false;
canvas.addEventListener('mousemove', e => {
mouse_hovered = isHover(e.pageX - canvas.offsetLeft, e.pageY - canvas.offsetTop)
if (mouse_hovered) {
canvas.style.cursor = 'pointer';
} else {
canvas.style.cursor = 'default';
}
})
canvas.addEventListener('mousedown', e => {
e.preventDefault();
if (mouse_hovered) {
drum.generate_sound(mouse_hovered);
}
});
const checkboxes = document.querySelectorAll('input');
checkboxes[0].onchange = function() {
general_solo = this.checked;
general_solo && (checkboxes[1].checked = part_solo = true);
};
checkboxes[1].onchange = function() {
part_solo = this.checked;
!part_solo && (checkboxes[0].checked = general_solo = false);
};
}
Promise.all([initAudios(), initImages()])
.then(initCanvas);
})()
label{float: right}
<button id="record">record</button>
<label>general solo<input type="checkbox"></label><br>
<label>part solo<input type="checkbox"></label><br>
<div id="records"></div>
我正在制作一个小音乐应用程序。 我希望能够在不依赖麦克风的情况下录制浏览器中发出的声音。 到目前为止,我所看到的关于 MediaRecorder api 的一切都表明它依赖于麦克风。 理想情况下,我想在不使用外部库的情况下实现这个目标。
作为参考,下面是我如何制作非常简单的声音。
var congo = new Audio('http://www.denhaku.com/r_box/sr16/sr16perc/hi conga.wav');
var drumpad = document.getElementById('drumpad');
drumpad.addEventListener('click', function(){
congo.play();
});
谢谢
编辑:为了更清楚,我将如何在不依赖计算机内置麦克风的情况下录制我包含的代码片段发出的声音。例如,假设用户正在使用鼓垫发出声音并且他们戴着耳机,那么麦克风将毫无用处。即使他们不戴耳机,他们仍然会听到很多背景噪音。我想将正在录制的声音与用户打开此应用程序的特定浏览器选项卡中制作的音乐隔离开来。
So far, everything I have seen about the MediaRecorder api suggests that it relies on the mic.
否,MediaRecorder API does rely on MediaStreams, but these MediaStreams don't have to be LocalMediaStreams (i.e from gUM):
如果加载的媒体符合 same-origin 策略,您可以从 MediaElement (<audio>
, <video>
)'s captureStream()
方法获取 MediaStream。
但这将 return 每个 MediaElement 一个 MediaStream,在您的情况下,这可能不是最佳解决方案。
相反,跳转到 Web Audio API,它更适合 drum-pad 这样的应用程序。
Web Audio API 确实有一个 createMediaStreamDestination()
method which will return a MediaStreamAudioDestinationNode,它将在其 .stream
属性 中包含一个 MediaStream。您将连接到此 MediaStreamAudioDestinationNode 的所有其他节点都将在 MediaStream 中播出,并且您将能够从 MediaRecorder 对其进行录制。
让我们回收这个
(function myFirstDrumKit() {
const db_url = 'https://dl.dropboxusercontent.com/s/'; // all our medias are stored on dropbox
// we'll need to first load all the audios
function initAudios() {
const promises = drum.parts.map(part => {
return fetch(db_url + part.audio_src) // fetch the file
.then(resp => resp.arrayBuffer()) // as an arrayBuffer
.then(buf => drum.a_ctx.decodeAudioData(buf)) // then decode its audio data
.then(AudioBuf => {
part.buf = AudioBuf; // store the audioBuffer (won't change)
return Promise.resolve(part); // done
});
});
return Promise.all(promises); // when all are loaded
}
function initImages() {
// in this version we have only an static image,
// but we could have multiple per parts, with the same logic as for audios
var img = new Image();
img.src = db_url + drum.bg_src;
drum.bg = img;
return new Promise((res, rej) => {
img.onload = res;
img.onerror = rej;
});
}
let general_solo = false;
let part_solo = false;
const drum = {
a_ctx: new AudioContext(),
generate_sound: (part) => {
// called each time we need to play a source
const source = drum.a_ctx.createBufferSource();
source.buffer = part.buf;
source.connect(drum.gain);
// to keep only one playing at a time
// simply store this sourceNode, and stop the previous one
if(general_solo){
// stop all playing sources
drum.parts.forEach(p => (p.source && p.source.stop(0)));
}
else if (part_solo && part.source) {
// stop only the one of this part
part.source.stop(0);
}
// store the source
part.source = source;
source.start(0);
},
parts: [{
name: 'hihat',
x: 90,
y: 116,
w: 160,
h: 70,
audio_src: 'kbgd2jm7ezk3u3x/hihat.mp3'
},
{
name: 'snare',
x: 79,
y: 192,
w: 113,
h: 58,
audio_src: 'h2j6vm17r07jf03/snare.mp3'
},
{
name: 'kick',
x: 80,
y: 250,
w: 200,
h: 230,
audio_src: '1cdwpm3gca9mlo0/kick.mp3'
},
{
name: 'tom',
x: 290,
y: 210,
w: 110,
h: 80,
audio_src: 'h8pvqqol3ovyle8/tom.mp3'
}
],
bg_src: '0jkaeoxls18n3y5/_drumkit.jpg?dl=0',
//////////////////////
/// The recording part
//////////////////////
record: function record(e) {
const btn = document.getElementById('record');
const chunks = [];
// init a new MediaRecorder with our StreamNode's stream
const recorder = new MediaRecorder(drum.streamNode.stream);
// save every chunks
recorder.ondataavailable = e => chunks.push(e.data);
// once we're done recording
recorder.onstop = e => {
// export our recording
const blob = new Blob(chunks);
const url = URL.createObjectURL(blob);
// here in an <audio> element
const a = new Audio(url);
a.controls = true;
document.getElementById('records').appendChild(a);
// reset default click handler
btn.onclick = drum.record;
btn.textContent = 'record';
}
btn.onclick = function () {
recorder.stop();
};
// start recording
recorder.start();
btn.textContent = 'stop recording';
}
};
drum.gain = drum.a_ctx.createGain();
drum.gain.gain.value = .5;
drum.gain.connect(drum.a_ctx.destination);
// for recording
drum.streamNode = drum.a_ctx.createMediaStreamDestination();
drum.gain.connect(drum.streamNode);
document.getElementById('record').onclick = drum.record;
/////////////
//Unrelated to current question
////////////
function initCanvas() {
const c = drum.canvas = document.createElement('canvas');
const ctx = drum.ctx = c.getContext('2d');
c.width = drum.bg.width;
c.height = drum.bg.height;
ctx.drawImage(drum.bg, 0, 0);
document.body.appendChild(c);
addEvents(c);
}
const isHover = (x, y) =>
(drum.parts.filter(p => (p.x < x && p.x + p.w > x && p.y < y && p.y + p.h > y))[0] || false);
function addEvents(canvas) {
let mouse_hovered = false;
canvas.addEventListener('mousemove', e => {
mouse_hovered = isHover(e.pageX - canvas.offsetLeft, e.pageY - canvas.offsetTop)
if (mouse_hovered) {
canvas.style.cursor = 'pointer';
} else {
canvas.style.cursor = 'default';
}
})
canvas.addEventListener('mousedown', e => {
e.preventDefault();
if (mouse_hovered) {
drum.generate_sound(mouse_hovered);
}
});
const checkboxes = document.querySelectorAll('input');
checkboxes[0].onchange = function() {
general_solo = this.checked;
general_solo && (checkboxes[1].checked = part_solo = true);
};
checkboxes[1].onchange = function() {
part_solo = this.checked;
!part_solo && (checkboxes[0].checked = general_solo = false);
};
}
Promise.all([initAudios(), initImages()])
.then(initCanvas);
})()
label{float: right}
<button id="record">record</button>
<label>general solo<input type="checkbox"></label><br>
<label>part solo<input type="checkbox"></label><br>
<div id="records"></div>