Google Chrome Javascript 获取用户音频时出现问题 - 不允许启动 AudioContext
Google Chrome Javascript issue in getting user audio - The AudioContext was not allowed to start
我有这个 Javascript 代码,当用户点击麦克风按钮时,我用它来捕获用户的音频输入。此代码在 Mozila Firefox 中有效,但当我在 Google Chrome 中使用它时,它不起作用并在控制台中显示此 warning/error - The AudioContext was not allowed to start. It must be resumed (or created) after a user gesture on the page.
var r = function() {
var e = {}
, t = void 0
, n = getBotConfig()
, r = new Audio("data:audio/wav;base64,")
, o = !1;
if (!n.isIE()) {
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var i = new AudioContext;
e.toggleRecording = function(e, t, n, r, s, a, c) {
e.classList.contains("recording") ? (e.classList.remove("recording"),
o = !1,
t.emit("end-recording", {
session_id: a,
bot_id: c
}),
document.getElementById("btnToggle").setAttribute("style", "background-color:transparent"),
document.getElementsByClassName("fa-microphone")[0] && document.getElementsByClassName("fa-microphone")[0].setAttribute("style", "color:" + s)) : (e.classList.add("recording"),
o = !0,
t.emit("start-recording", {
numChannels: 1,
bps: 16,
fps: parseInt(i.sampleRate),
session_id: a,
bot_id: c
}),
document.getElementById("btnToggle").setAttribute("style", "background-color:" + n),
document.getElementsByClassName("fa-microphone")[0] && document.getElementsByClassName("fa-microphone")[0].setAttribute("style", "color:" + r))
}
,
e.onAudioTTS = function(e) {
try {
r.pause(),
c(e)
} catch (t) {
c(e)
}
}
,
e.initAudio = function(e, n, r) {
console.log("audio initiated"),
t = e,
navigator.getUserMedia || (navigator.getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia),
navigator.cancelAnimationFrame || (navigator.cancelAnimationFrame = navigator.webkitCancelAnimationFrame || navigator.mozCancelAnimationFrame),
navigator.requestAnimationFrame || (navigator.requestAnimationFrame = navigator.webkitRequestAnimationFrame || navigator.mozRequestAnimationFrame),
navigator.getUserMedia({
audio: !0
}, a, function(e) {
alert("Error getting audio"),
console.log(e)
})
}
;
var s = function(e) {
var t = i.createChannelSplitter(2)
, n = i.createChannelMerger(2);
return e.connect(t),
t.connect(n, 0, 0),
t.connect(n, 0, 1),
n
}
, a = function(e) {
var n = i.createGain()
, r = i.createMediaStreamSource(e)
, a = r;
a = s(a),
a.connect(n);
var c = (i.createScriptProcessor || i.createJavaScriptNode).call(i, 1024, 1, 1);
c.onaudioprocess = function(e) {
if (o) {
for (var n = e.inputBuffer.getChannelData(0), r = new ArrayBuffer(2 * n.length), i = new DataView(r), s = 0, a = 0; s < n.length; s++,
a += 2) {
var c = Math.max(-1, Math.min(1, n[s]));
i.setInt16(a, c < 0 ? 32768 * c : 32767 * c, !0)
}
t.emit("write-audio", r)
}
}
,
n.connect(c),
c.connect(i.destination);
var u = i.createGain();
u.gain.value = 0,
n.connect(u),
u.connect(i.destination)
}
, c = function(e) {
r.src = "data:audio/wav;base64," + e,
r.play()
};
return e
}
};
warning/error 出现在第 var i = new AudioContext;
行。它以前也可以在 Google Chrome 浏览器上工作,但现在不工作了。 Google 开发者页面上的描述说 resume()
必须用于但我不确定我应该如何以及在何处执行此操作。
您应该可以在调用 play()
之前的某处调用 resume()
。重要的是在用户中调用它 action/event - 就像点击麦克风按钮一样。
Key Point: If an AudioContext is created prior to the document
receiving a user gesture, it will be created in the "suspended" state,
and you will need to call resume() after a user gesture is received.
来自 https://developers.google.com/web/updates/2017/09/autoplay-policy-changes
It used to work before on Google Chrome browser as well but now it's
not working.
最近在 chrome 更新中实施了新政策。
你应该打电话给,getAudioContext().resume();
某处。
如果您的问题与访问麦克风时的 p5.js 有关,请在 setup
中执行此操作
function setup() {
mic = new p5.AudioIn();
mic.start();
getAudioContext().resume();
}
或添加touchStarted
功能文件。您必须点击网页才能触发此功能。
function touchStarted() {
getAudioContext().resume();
}
我有这个 Javascript 代码,当用户点击麦克风按钮时,我用它来捕获用户的音频输入。此代码在 Mozila Firefox 中有效,但当我在 Google Chrome 中使用它时,它不起作用并在控制台中显示此 warning/error - The AudioContext was not allowed to start. It must be resumed (or created) after a user gesture on the page.
var r = function() {
var e = {}
, t = void 0
, n = getBotConfig()
, r = new Audio("data:audio/wav;base64,")
, o = !1;
if (!n.isIE()) {
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var i = new AudioContext;
e.toggleRecording = function(e, t, n, r, s, a, c) {
e.classList.contains("recording") ? (e.classList.remove("recording"),
o = !1,
t.emit("end-recording", {
session_id: a,
bot_id: c
}),
document.getElementById("btnToggle").setAttribute("style", "background-color:transparent"),
document.getElementsByClassName("fa-microphone")[0] && document.getElementsByClassName("fa-microphone")[0].setAttribute("style", "color:" + s)) : (e.classList.add("recording"),
o = !0,
t.emit("start-recording", {
numChannels: 1,
bps: 16,
fps: parseInt(i.sampleRate),
session_id: a,
bot_id: c
}),
document.getElementById("btnToggle").setAttribute("style", "background-color:" + n),
document.getElementsByClassName("fa-microphone")[0] && document.getElementsByClassName("fa-microphone")[0].setAttribute("style", "color:" + r))
}
,
e.onAudioTTS = function(e) {
try {
r.pause(),
c(e)
} catch (t) {
c(e)
}
}
,
e.initAudio = function(e, n, r) {
console.log("audio initiated"),
t = e,
navigator.getUserMedia || (navigator.getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia),
navigator.cancelAnimationFrame || (navigator.cancelAnimationFrame = navigator.webkitCancelAnimationFrame || navigator.mozCancelAnimationFrame),
navigator.requestAnimationFrame || (navigator.requestAnimationFrame = navigator.webkitRequestAnimationFrame || navigator.mozRequestAnimationFrame),
navigator.getUserMedia({
audio: !0
}, a, function(e) {
alert("Error getting audio"),
console.log(e)
})
}
;
var s = function(e) {
var t = i.createChannelSplitter(2)
, n = i.createChannelMerger(2);
return e.connect(t),
t.connect(n, 0, 0),
t.connect(n, 0, 1),
n
}
, a = function(e) {
var n = i.createGain()
, r = i.createMediaStreamSource(e)
, a = r;
a = s(a),
a.connect(n);
var c = (i.createScriptProcessor || i.createJavaScriptNode).call(i, 1024, 1, 1);
c.onaudioprocess = function(e) {
if (o) {
for (var n = e.inputBuffer.getChannelData(0), r = new ArrayBuffer(2 * n.length), i = new DataView(r), s = 0, a = 0; s < n.length; s++,
a += 2) {
var c = Math.max(-1, Math.min(1, n[s]));
i.setInt16(a, c < 0 ? 32768 * c : 32767 * c, !0)
}
t.emit("write-audio", r)
}
}
,
n.connect(c),
c.connect(i.destination);
var u = i.createGain();
u.gain.value = 0,
n.connect(u),
u.connect(i.destination)
}
, c = function(e) {
r.src = "data:audio/wav;base64," + e,
r.play()
};
return e
}
};
warning/error 出现在第 var i = new AudioContext;
行。它以前也可以在 Google Chrome 浏览器上工作,但现在不工作了。 Google 开发者页面上的描述说 resume()
必须用于但我不确定我应该如何以及在何处执行此操作。
您应该可以在调用 play()
之前的某处调用 resume()
。重要的是在用户中调用它 action/event - 就像点击麦克风按钮一样。
Key Point: If an AudioContext is created prior to the document receiving a user gesture, it will be created in the "suspended" state, and you will need to call resume() after a user gesture is received.
来自 https://developers.google.com/web/updates/2017/09/autoplay-policy-changes
It used to work before on Google Chrome browser as well but now it's not working.
最近在 chrome 更新中实施了新政策。
你应该打电话给,getAudioContext().resume();
某处。
如果您的问题与访问麦克风时的 p5.js 有关,请在 setup
function setup() {
mic = new p5.AudioIn();
mic.start();
getAudioContext().resume();
}
或添加touchStarted
功能文件。您必须点击网页才能触发此功能。
function touchStarted() {
getAudioContext().resume();
}