Chromium Audio Worklet 丢失。一些最初,然后偶尔以 30 秒为间隔

Chromium Audio Worklet dropouts. Some initially, then occasionally in 30s intervals

我注意到在将 Chromium Audio worklet 与 MediaDevices.getUserMedia 媒体流(麦克风)一起使用时,会出现频繁且可重复的丢失。这不是 100% 可重现的,但是当它们确实发生时它们确实倾向于遵循模式:

(每次尝试的时间范围略有不同)

  1. 0:00 -> 0:00.2:没有收到样本。 (能够 100% 重现,但这感觉像是一个单独的问题,我目前不一定要追踪)
  2. 0:00.2 -> 0:00.5:样本已收到
  3. 0:00.5 -> 0:00.6 :发生丢失,没有收到样本(能够重现约 20% 的时间)。
  4. 0:00.6 -> 0:30.0:样本已收到
  5. 从现在开始每隔 30 秒,偶尔会发生 dropouts。往往最常发生在第一个 30 岁大关。 (前 30 秒标记我也可以重现大约 20% 的时间)。

这是一个说明行为的代码笔: https://codepen.io/GJStevenson/pen/GRErPbm

const startRecordingButton = document.getElementById('startRecordingButton');
let mediaStreamSourceNode;
let isRecording = false;
let timer;

const workletString = `
const formatTimeString = s => {
   const m = (s / 60).toFixed(2);
   const h = (m / 60).toFixed(2);
   const ms = Math.trunc(s * 1000) % 1000;
   const ss = Math.trunc(s) % 60;
   const mm = Math.trunc(m) % 60;
   const hh = Math.trunc(h);
   return hh + ":" + mm + ":" + ss + "." + ms;
};

class RecorderWorklet extends AudioWorkletProcessor {
    constructor(options) {
        super(options);
        this.sampleRate = 0;
        this.sampleCount = 0;

        this.port.onmessage = event => {
            if (event.data.message === 'init') {
                this.sampleRate = event.data.sampleRate;
            }
        }
    }

    process(inputs) {
        if (inputs.length > 0 && inputs[0].length > 0) {
            this.sampleCount += inputs[0][0].length; 
            //console.debug(formatTimeString(this.sampleCount/this.sampleRate), ' : ', inputs[0][0]);

            if (inputs[0][0].includes(0)) {
                console.log('Dropped Samples at: ', formatTimeString(this.sampleCount/this.sampleRate), ' : ', ...inputs[0][0])
            }
        }
        return true;
    }
}

registerProcessor('recorder-worklet', RecorderWorklet);
`;

async function listAudioInputs() {
    const devices = await navigator.mediaDevices.enumerateDevices();
    return devices.filter((device) => device.kind === 'audioinput');
}

async function getDefaultInput(fallbackToFirstInput = true) {
    const audioInputs = await listAudioInputs();
    const defaultDevice = audioInputs.find((device) => device.deviceId === 'default');
    if (defaultDevice) {
        return defaultDevice;
    }
    return fallbackToFirstInput && audioInputs.length > 0 ? audioInputs[0] : undefined;
}

async function getAudioStream(device) {
    const constraints = {
        audio: {
            deviceId: device.deviceId,
        },
    };
    return navigator.mediaDevices.getUserMedia(constraints);
}

async function createRecordingPipeline(device) {
    const stream = await getAudioStream(device);
    const audioTracks = stream.getAudioTracks();

    const sampleRate = audioTracks[0].getSettings().sampleRate;
    console.log('Sample Rate: ', sampleRate);
    const context = new AudioContext({ sampleRate, latencyHint: 'interactive' });

    const blob = new Blob([workletString], { type: 'text/javascript' });
    const workletUrl = URL.createObjectURL(blob);

    await context.audioWorklet.addModule(workletUrl);
    const workletNode = new AudioWorkletNode(context, 'recorder-worklet');

    workletNode.port.postMessage({
        message: 'init',
        sampleRate: sampleRate
    });

    mediaStreamSourceNode = context.createMediaStreamSource(stream);
    mediaStreamSourceNode.connect(workletNode)
                         .connect(context.destination);
}

function formatTimeString(s) {
   const m = (s / 60).toFixed(2);
   const h = (m / 60).toFixed(2);
   const ms = Math.trunc(s * 1000) % 1000;
   const ss = Math.trunc(s) % 60;
   const mm = Math.trunc(m) % 60;
   const hh = Math.trunc(h);
   return hh + ":" + mm + ":" + ss + "." + ms;
};

async function startRecording() {
    const device = await getDefaultInput();
    await createRecordingPipeline(device);

    let timeElapsed = 0;
    timer = setInterval(() => {
        timeElapsed++;
        console.log('Time: ', formatTimeString(timeElapsed));
    }, 1000);
  
    startRecordingButton.innerText = "Stop Recording";
}

async function stopRecording() {
    if (mediaStreamSourceNode) {
        mediaStreamSourceNode.mediaStream.getAudioTracks().forEach(track => {
           track.stop();
        });
        mediaStreamSourceNode.disconnect();
    }
    mediaStreamSourceNode = null;
    clearInterval(timer);
    
    startRecordingButton.innerText = "Start Recording";
}

async function toggleRecording() {
    if (!isRecording) {
        await startRecording();
    } else {
        await stopRecording();
    }
    isRecording = !isRecording;
}
<button onclick="toggleRecording()" id="startRecordingButton">Start Recording</button>

丢弃的样本在控制台中看起来像这样:

有什么问题吗?

编辑:运行 chrome://tracing 以捕获掉落样本的踪迹。 https://www.dropbox.com/s/veg1vgsg9nn03ty/trace_dropped-sample-trace.json.gz?dl=0。丢弃的样本发生在 ~.53s -> .61s

在打开 Chromium 问题后得到了一些答案。总结来自 https://bugs.chromium.org/p/chromium/issues/detail?id=1248169 的一些回复:

0:00 -> 0:00.2 : No samples are received. (Able to reproduce 100% of the time, but this feels like a separate issue that I wasn't necessarily tracking down at the moment)

这些前几个零样本是底层缓冲区的初始值,并且是预期的。

0:00.5 -> 0:00.6 : Dropout occurs, no samples received (Able to reproduce ~20% of the time).

如果 AudioWorklet 线程以 RT 优先级运行,则不应发生这种情况。这很难重现,所以暂时搁置。

Every 30s from here on out, occasionally dropouts will occur. Tends to happen the most often at the first 30s mark. (First 30s mark I can reproduce around 20% of the time as well).

此丢失是配置目标源但未使用它的结果。静默 30 秒后,AudioWorklet 线程切换到低优先级线程,导致 dropout。

如此变化

mediaStreamSourceNode.connect(workletNode)
                     .connect(context.destination);

mediaStreamSourceNode.connect(workletNode);

已解决问题。