正确处理麦克风音频的 React Hooks
Correct handling of React Hooks for microphone audio
我正在尝试编写一个 React Hook 来处理流式音频到使用 Meyda 分析的 AudioContext。
我已经设法让流工作并且能够提取我想要的数据。但是,我在取消初始化音频时遇到了问题。
如果有人能给我一些正确设置此挂钩的指导,我将不胜感激。
当我使用这些挂钩离开页面时,当前收到以下错误:
Warning: Can't perform a React state update on an unmounted component. This is a no-op, but it indicates a memory leak in your application. To fix, cancel all subscriptions and asynchronous tasks in a useEffect cleanup function.
我曾尝试在挂钩的末尾添加清理功能,但我的尝试经常以音频立即中断或出现任何其他奇怪的错误而告终。
带 Meyda 分析仪的麦克风音频挂钩
export const useMeydaAnalyser = () => {
const [running, setRunning] = useState(false);
const [features, setFeatures] = useState(null);
const featuresRef = useRef(features);
const audioContext = useRef(new AudioContext());
const getMedia = async() => {
try {
return await navigator
.mediaDevices
.getUserMedia({audio: true, video: false});
} catch(err) {
console.log('Error:', err);
}
};
useEffect(
() => {
const audio = audioContext.current;
let unmounted = false;
if(!running) {
getMedia().then(stream => {
if (unmounted) return;
setRunning(true);
const source = audio.createMediaStreamSource(stream);
const analyser = Meyda.createMeydaAnalyzer({
audioContext: audio,
source: source,
bufferSize: 1024,
featureExtractors: [
'amplitudeSpectrum',
'mfcc',
'rms',
],
callback: nextFeatures => {
if(!isEqual(featuresRef.current, nextFeatures)) {
setFeatures(nextFeatures);
}
},
});
analyser.start();
});
}
return () => {
unmounted = true;
}
},
[running, audioContext],
);
useEffect(
() => {
featuresRef.current = features;
},
[features],
);
return features;
};
音频视图
import React, {useEffect} from 'react';
import { useMeydaAnalyser } from '../hooks/use-meyda-audio';
const AudioViewDemo = () => {
const audioContext = new AudioContext();
const features = useMeydaAnalyser(audioContext);
useEffect(() => {
// Todo: Handle Audio features
console.log(features);
// setAudioData(features);
}, [features]);
return (
<div>
RMS: {features && features.rms}
</div>
);
};
export default AudioViewDemo;
错误应该是没有关闭造成的AudioContext
。您需要在清理功能中关闭 AudioContext
。
注意在使用AudioContext
之前先判断状态是否为off,因为getMedia
是异步的,所以如果组件加载后很快就卸载了,那么AudioContext
是关闭的时候它被使用了。
const getMedia = async () => {
try {
return await navigator.mediaDevices.getUserMedia({
audio: true,
video: false,
})
} catch (err) {
console.log('Error:', err)
}
}
const useMeydaAnalyser = () => {
const [analyser, setAnalyser] = useState(null)
const [running, setRunning] = useState(false)
const [features, setFeatures] = useState(null)
useEffect(() => {
const audioContext = new AudioContext()
let newAnalyser
getMedia().then(stream => {
if (audioContext.state === 'closed') {
return
}
const source = audioContext.createMediaStreamSource(stream)
newAnalyser = Meyda.createMeydaAnalyzer({
audioContext: audioContext,
source: source,
bufferSize: 1024,
featureExtractors: ['amplitudeSpectrum', 'mfcc', 'rms'],
callback: features => {
console.log(features)
setFeatures(features)
},
})
setAnalyser(newAnalyser)
})
return () => {
if (newAnalyser) {
newAnalyser.stop()
}
if (audioContext) {
audioContext.close()
}
}
}, [])
useEffect(() => {
if (analyser) {
if (running) {
analyser.start()
} else {
analyser.stop()
}
}
}, [running, analyser])
return [running, setRunning, features]
}
我正在尝试编写一个 React Hook 来处理流式音频到使用 Meyda 分析的 AudioContext。
我已经设法让流工作并且能够提取我想要的数据。但是,我在取消初始化音频时遇到了问题。
如果有人能给我一些正确设置此挂钩的指导,我将不胜感激。
当我使用这些挂钩离开页面时,当前收到以下错误:
Warning: Can't perform a React state update on an unmounted component. This is a no-op, but it indicates a memory leak in your application. To fix, cancel all subscriptions and asynchronous tasks in a useEffect cleanup function.
我曾尝试在挂钩的末尾添加清理功能,但我的尝试经常以音频立即中断或出现任何其他奇怪的错误而告终。
带 Meyda 分析仪的麦克风音频挂钩
export const useMeydaAnalyser = () => {
const [running, setRunning] = useState(false);
const [features, setFeatures] = useState(null);
const featuresRef = useRef(features);
const audioContext = useRef(new AudioContext());
const getMedia = async() => {
try {
return await navigator
.mediaDevices
.getUserMedia({audio: true, video: false});
} catch(err) {
console.log('Error:', err);
}
};
useEffect(
() => {
const audio = audioContext.current;
let unmounted = false;
if(!running) {
getMedia().then(stream => {
if (unmounted) return;
setRunning(true);
const source = audio.createMediaStreamSource(stream);
const analyser = Meyda.createMeydaAnalyzer({
audioContext: audio,
source: source,
bufferSize: 1024,
featureExtractors: [
'amplitudeSpectrum',
'mfcc',
'rms',
],
callback: nextFeatures => {
if(!isEqual(featuresRef.current, nextFeatures)) {
setFeatures(nextFeatures);
}
},
});
analyser.start();
});
}
return () => {
unmounted = true;
}
},
[running, audioContext],
);
useEffect(
() => {
featuresRef.current = features;
},
[features],
);
return features;
};
音频视图
import React, {useEffect} from 'react';
import { useMeydaAnalyser } from '../hooks/use-meyda-audio';
const AudioViewDemo = () => {
const audioContext = new AudioContext();
const features = useMeydaAnalyser(audioContext);
useEffect(() => {
// Todo: Handle Audio features
console.log(features);
// setAudioData(features);
}, [features]);
return (
<div>
RMS: {features && features.rms}
</div>
);
};
export default AudioViewDemo;
错误应该是没有关闭造成的AudioContext
。您需要在清理功能中关闭 AudioContext
。
注意在使用AudioContext
之前先判断状态是否为off,因为getMedia
是异步的,所以如果组件加载后很快就卸载了,那么AudioContext
是关闭的时候它被使用了。
const getMedia = async () => {
try {
return await navigator.mediaDevices.getUserMedia({
audio: true,
video: false,
})
} catch (err) {
console.log('Error:', err)
}
}
const useMeydaAnalyser = () => {
const [analyser, setAnalyser] = useState(null)
const [running, setRunning] = useState(false)
const [features, setFeatures] = useState(null)
useEffect(() => {
const audioContext = new AudioContext()
let newAnalyser
getMedia().then(stream => {
if (audioContext.state === 'closed') {
return
}
const source = audioContext.createMediaStreamSource(stream)
newAnalyser = Meyda.createMeydaAnalyzer({
audioContext: audioContext,
source: source,
bufferSize: 1024,
featureExtractors: ['amplitudeSpectrum', 'mfcc', 'rms'],
callback: features => {
console.log(features)
setFeatures(features)
},
})
setAnalyser(newAnalyser)
})
return () => {
if (newAnalyser) {
newAnalyser.stop()
}
if (audioContext) {
audioContext.close()
}
}
}, [])
useEffect(() => {
if (analyser) {
if (running) {
analyser.start()
} else {
analyser.stop()
}
}
}, [running, analyser])
return [running, setRunning, features]
}