您能否将 AudioContext/Analyser 附加到已加载 src 的 HTML 音频节点?
Can you attach an AudioContext/Analyser to an HTML audio node with a src already loaded?
我有一个基本的 HTML 音频源,我只是想将一个分析器节点连接到它的输出,以便在您播放文件时显示可视化工具。有什么想法吗?
当我尝试映射节点时,我也卡住了。当我创建一个 AudioContext 并将其附加到源时,我没有得到任何输出。我确定我只是 routing/doing 错了。有什么帮助吗?提前致谢!
js:
$(document).ready(function()
{
const audio = document.getElementById("audio");
const audioCtx = new AudioContext();
const canvas = document.getElementById("canvas");
const canvasCtx = canvas.getContext("2d");
const co = "https://cors-anywhere.herokuapp.com/";
const audioSrc = "https://pineprojectmusic.s3-us-west-2.amazonaws.com/StreamingSongs/Radio.mp3";
var track;
// Fetch mp3
audio.addEventListener("play", function()
{
// Circumvent Chrome autoplay AudioContext issues
if (audioCtx.state === "suspended")
{
audioCtx.resume()
};
// Where we add connect all of the analyser stuff
// track = audioCtx.createMediaElementSource(audio);
// track.connect(audioCtx.destination);
if (this.dataset.playing === "false")
{
alert("Playing");
audio.play();
this.dataset.playing = true;
} else if (this.dataset.playing === "false")
{
alert("Stopped");
audio.pause();
this.dataset.playing = true;
}
}, false);
function setUpContext()
{
if (typeof audioCtx != 'undefined')
{
audioCtx.resume();
}
}
// var source = ctx.createMediaElementSource(audio);
// Where we fetch the mp3 file from S3 Bucket
/*
fetch(co + audioSrc)
.then(response => response.arrayBuffer())
.then(data => loadSong(data)); */
function loadSong(data)
{
console.log(data);
// This is where we assign the arrayBuffer into a stream and the src of the file.
data.decodeAudioData(data);
var mediaSrc = new MediaSource();
var mediaBuffer = mediaSrc.createMediaElementSource(decodedAudio);
audio.src.connect(analyser);
};
}); // end of DOM event
#audio:focus {
outline: none;
}
#thefile {
position: fixed;
top: 10px;
left: 10px;
z-index: 100;
}
#canvas {
position: fixed;
left: 0;
top: 0;
width: 100%;
height: 100%;
}
audio {
position: fixed;
left: 10px;
bottom: 10px;
width: calc(100% - 20px);
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<div id="content">
<canvas id="canvas"></canvas>
<audio id="audio" controls controlsList="nodownload" src="https://pineprojectmusic.s3-us-west-2.amazonaws.com/StreamingSongs/Radio.mp3">
</audio>
</div>
看起来路由有误。
规范很好地描述了 Modular Routing 主题。
所以重点是:
在 AudioContext 中,信号从源到目的地,可选择通过其他音频节点。
因此我们必须以正确的顺序连接它们:
源 --> 分析器 --> 目标
参见下面的示例:
const audioCtx = new(window.AudioContext || window.webkitAudioContext)();
// Get the source
const audio = document.querySelector('audio');
audio.onplay = () => audioCtx.resume();
const source = audioCtx.createMediaElementSource(audio);
// Create an analyser
const analyser = audioCtx.createAnalyser();
analyser.fftSize = 2 ** 8;
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
// Connect parts
source.connect(analyser);
analyser.connect(audioCtx.destination);
// Visualisation
const section = document.querySelector('section');
const v = (new Array(bufferLength)).fill().map(e => (e = document.createElement('i')) && section.appendChild(e) && e);
setInterval(() => {
analyser.getByteTimeDomainData(dataArray);
dataArray.forEach((d, i) => v[i].style.setProperty('--c', Math.abs(128 - d) * 2.8125 | 0))
}, 15);
html,
body {
width: 100vw;
height: 100vh;
margin: 0;
padding: 0;
background: #000
}
section {
display: flex;
align-items: center;
height: 100vh
}
section i {
flex: 1 1 100%;
height: calc(100vh * var(--c)/360);
border-radius: 55%;
background: hsl(var(--c), 95%, 45%);
}
audio {
position: fixed;
width: calc(100% - 2em);
bottom: 1em;
left: 1em;
opacity: .3
}
<section></section>
<audio controls src="https://ice3.somafm.com/defcon-128-mp3" crossorigin="anonymous">
希望对您有所帮助。
我发现有两个问题为我解决了:
- 我忘记在我的 S3 CORS 策略 (derp) 中添加
<AllowedHeader>
条目。
- 由于网络浏览器的新自动播放功能,您必须在 用户交互后 实例化 AudioConext 的新实例(我将我的放在 "play" 事件中音频标签的监听器)。
音频现在通过了,看来我们要开始比赛了!希望这对将来的人有帮助! :)
代码(希望以后能帮助到同一条船上的人):
// Declare variables
const url = "https://pineprojectmusic.s3-us-west-2.amazonaws.com/StreamingSongs/Radio.mp3";
var AudioContext = (window.AudioContext || window.webkitAudioContext);
// if (audioCtx) { audioCtx = new (audioCtx) };
var source, analyser, ctx;
// Housekeeping when DOM loads
document.addEventListener("DOMContentLoaded", function ()
{
// Make Audio File
const audio = new Audio();
audio.id = "audio";
audio.src = url;
audio.controls = true;
audio.controlsList = "nodownload";
audio.crossOrigin = "anonymous";
audio.autoload = "auto";
// Create Canvas
const canvas = document.createElement("CANVAS");
canvas.id = "canvas";
canvasCtx = canvas.getContext("2d");
// Insert it into HTML
document.getElementById("audio_container").appendChild(audio);
document.getElementById("audio_container").appendChild(canvas);
audio.addEventListener("play", playAudio);
function playAudio()
{
// Set up routes
const audioCtx = new(AudioContext);
if (!analyser)
{
analyser = audioCtx.createAnalyser();
analyser.fftSize = 256;
};
if (!source)
{
source = audioCtx.createMediaElementSource(audio);
source.connect(audioCtx.destination);
};
audioAnalyser();
};
我有一个基本的 HTML 音频源,我只是想将一个分析器节点连接到它的输出,以便在您播放文件时显示可视化工具。有什么想法吗?
当我尝试映射节点时,我也卡住了。当我创建一个 AudioContext 并将其附加到源时,我没有得到任何输出。我确定我只是 routing/doing 错了。有什么帮助吗?提前致谢!
js:
$(document).ready(function()
{
const audio = document.getElementById("audio");
const audioCtx = new AudioContext();
const canvas = document.getElementById("canvas");
const canvasCtx = canvas.getContext("2d");
const co = "https://cors-anywhere.herokuapp.com/";
const audioSrc = "https://pineprojectmusic.s3-us-west-2.amazonaws.com/StreamingSongs/Radio.mp3";
var track;
// Fetch mp3
audio.addEventListener("play", function()
{
// Circumvent Chrome autoplay AudioContext issues
if (audioCtx.state === "suspended")
{
audioCtx.resume()
};
// Where we add connect all of the analyser stuff
// track = audioCtx.createMediaElementSource(audio);
// track.connect(audioCtx.destination);
if (this.dataset.playing === "false")
{
alert("Playing");
audio.play();
this.dataset.playing = true;
} else if (this.dataset.playing === "false")
{
alert("Stopped");
audio.pause();
this.dataset.playing = true;
}
}, false);
function setUpContext()
{
if (typeof audioCtx != 'undefined')
{
audioCtx.resume();
}
}
// var source = ctx.createMediaElementSource(audio);
// Where we fetch the mp3 file from S3 Bucket
/*
fetch(co + audioSrc)
.then(response => response.arrayBuffer())
.then(data => loadSong(data)); */
function loadSong(data)
{
console.log(data);
// This is where we assign the arrayBuffer into a stream and the src of the file.
data.decodeAudioData(data);
var mediaSrc = new MediaSource();
var mediaBuffer = mediaSrc.createMediaElementSource(decodedAudio);
audio.src.connect(analyser);
};
}); // end of DOM event
#audio:focus {
outline: none;
}
#thefile {
position: fixed;
top: 10px;
left: 10px;
z-index: 100;
}
#canvas {
position: fixed;
left: 0;
top: 0;
width: 100%;
height: 100%;
}
audio {
position: fixed;
left: 10px;
bottom: 10px;
width: calc(100% - 20px);
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<div id="content">
<canvas id="canvas"></canvas>
<audio id="audio" controls controlsList="nodownload" src="https://pineprojectmusic.s3-us-west-2.amazonaws.com/StreamingSongs/Radio.mp3">
</audio>
</div>
看起来路由有误。
规范很好地描述了 Modular Routing 主题。
所以重点是:
在 AudioContext 中,信号从源到目的地,可选择通过其他音频节点。
因此我们必须以正确的顺序连接它们:
源 --> 分析器 --> 目标
参见下面的示例:
const audioCtx = new(window.AudioContext || window.webkitAudioContext)();
// Get the source
const audio = document.querySelector('audio');
audio.onplay = () => audioCtx.resume();
const source = audioCtx.createMediaElementSource(audio);
// Create an analyser
const analyser = audioCtx.createAnalyser();
analyser.fftSize = 2 ** 8;
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
// Connect parts
source.connect(analyser);
analyser.connect(audioCtx.destination);
// Visualisation
const section = document.querySelector('section');
const v = (new Array(bufferLength)).fill().map(e => (e = document.createElement('i')) && section.appendChild(e) && e);
setInterval(() => {
analyser.getByteTimeDomainData(dataArray);
dataArray.forEach((d, i) => v[i].style.setProperty('--c', Math.abs(128 - d) * 2.8125 | 0))
}, 15);
html,
body {
width: 100vw;
height: 100vh;
margin: 0;
padding: 0;
background: #000
}
section {
display: flex;
align-items: center;
height: 100vh
}
section i {
flex: 1 1 100%;
height: calc(100vh * var(--c)/360);
border-radius: 55%;
background: hsl(var(--c), 95%, 45%);
}
audio {
position: fixed;
width: calc(100% - 2em);
bottom: 1em;
left: 1em;
opacity: .3
}
<section></section>
<audio controls src="https://ice3.somafm.com/defcon-128-mp3" crossorigin="anonymous">
希望对您有所帮助。
我发现有两个问题为我解决了:
- 我忘记在我的 S3 CORS 策略 (derp) 中添加
<AllowedHeader>
条目。 - 由于网络浏览器的新自动播放功能,您必须在 用户交互后 实例化 AudioConext 的新实例(我将我的放在 "play" 事件中音频标签的监听器)。
音频现在通过了,看来我们要开始比赛了!希望这对将来的人有帮助! :)
代码(希望以后能帮助到同一条船上的人):
// Declare variables
const url = "https://pineprojectmusic.s3-us-west-2.amazonaws.com/StreamingSongs/Radio.mp3";
var AudioContext = (window.AudioContext || window.webkitAudioContext);
// if (audioCtx) { audioCtx = new (audioCtx) };
var source, analyser, ctx;
// Housekeeping when DOM loads
document.addEventListener("DOMContentLoaded", function ()
{
// Make Audio File
const audio = new Audio();
audio.id = "audio";
audio.src = url;
audio.controls = true;
audio.controlsList = "nodownload";
audio.crossOrigin = "anonymous";
audio.autoload = "auto";
// Create Canvas
const canvas = document.createElement("CANVAS");
canvas.id = "canvas";
canvasCtx = canvas.getContext("2d");
// Insert it into HTML
document.getElementById("audio_container").appendChild(audio);
document.getElementById("audio_container").appendChild(canvas);
audio.addEventListener("play", playAudio);
function playAudio()
{
// Set up routes
const audioCtx = new(AudioContext);
if (!analyser)
{
analyser = audioCtx.createAnalyser();
analyser.fftSize = 256;
};
if (!source)
{
source = audioCtx.createMediaElementSource(audio);
source.connect(audioCtx.destination);
};
audioAnalyser();
};