从浏览器中的网络摄像头获取 ReadableStream
Get ReadableStream from Webcam in Browser
我想在浏览器中将网络摄像头输入作为 ReadableStream 以管道传输到 WritableStream。我曾尝试使用 MediaRecorder API,但该流被分成单独的 blob,而我想要一个连续的流。我在想解决方案可能是将 MediaRecorder 块通过管道传输到统一缓冲区,并作为连续流从中读取,但我不确定如何让中间缓冲区工作。
mediaRecorder = new MediaRecorder(stream, recorderOptions);
mediaRecorder.ondataavailable = handleDataAvailable;
mediaRecorder.start(1000);
async function handleDataAvailable(event) {
if (event.data.size > 0) {
const data: Blob = event.data;
// I think I need to pipe to an intermediate stream? Not sure how tho
data.stream().pipeTo(writable);
}
}
目前我们无法真正访问 MediaStream 的原始数据,我们拥有的最接近视频的是 MediaRecorder API 但这将编码数据并按块而不是流工作。
但是,有一个新的 MediaCapture Transform W3C group working on a MediaStreamTrackProcessor 界面完全可以满足您的需求,并且已经在 Chrome 中的 chrome://flags/#enable-experimental-web-platform-features
标志下可用。
读取生成的流时,根据您通过的曲目类型,您将可以访问 VideoFrames or AudioFrames which are being added by the new WebCodecs API.
if( window.MediaStreamTrackProcessor ) {
const track = getCanvasTrack();
const processor = new MediaStreamTrackProcessor( track );
const reader = processor.readable.getReader();
readChunk();
function readChunk() {
reader.read().then( ({ done, value }) => {
// value is a VideoFrame
// we can read the data in each of its planes into an ArrayBufferView
const channels = value.planes.map( (plane) => {
const arr = new Uint8Array(plane.length);
plane.readInto(arr);
return arr;
});
value.close(); // close the VideoFrame when we're done with it
log.textContent = "planes data (15 first values):\n" +
channels.map( (arr) => JSON.stringify( [...arr.subarray(0,15)] ) ).join("\n");
if( !done ) {
readChunk();
}
});
}
}
else {
console.error("your browser doesn't support this API yet");
}
function getCanvasTrack() {
// just some noise...
const canvas = document.getElementById("canvas");
const ctx = canvas.getContext("2d");
const img = new ImageData(300, 150);
const data = new Uint32Array(img.data.buffer);
const track = canvas.captureStream().getVideoTracks()[0];
anim();
return track;
function anim() {
for( let i=0; i<data.length;i++ ) {
data[i] = Math.random() * 0xFFFFFF + 0xFF000000;
}
ctx.putImageData(img, 0, 0);
if( track.readyState === "live" ) {
requestAnimationFrame(anim);
}
}
}
<pre id="log"></pre>
<p>
Source<br>
<canvas id="canvas"></canvas>
</p>
我想在浏览器中将网络摄像头输入作为 ReadableStream 以管道传输到 WritableStream。我曾尝试使用 MediaRecorder API,但该流被分成单独的 blob,而我想要一个连续的流。我在想解决方案可能是将 MediaRecorder 块通过管道传输到统一缓冲区,并作为连续流从中读取,但我不确定如何让中间缓冲区工作。
mediaRecorder = new MediaRecorder(stream, recorderOptions);
mediaRecorder.ondataavailable = handleDataAvailable;
mediaRecorder.start(1000);
async function handleDataAvailable(event) {
if (event.data.size > 0) {
const data: Blob = event.data;
// I think I need to pipe to an intermediate stream? Not sure how tho
data.stream().pipeTo(writable);
}
}
目前我们无法真正访问 MediaStream 的原始数据,我们拥有的最接近视频的是 MediaRecorder API 但这将编码数据并按块而不是流工作。
但是,有一个新的 MediaCapture Transform W3C group working on a MediaStreamTrackProcessor 界面完全可以满足您的需求,并且已经在 Chrome 中的 chrome://flags/#enable-experimental-web-platform-features
标志下可用。
读取生成的流时,根据您通过的曲目类型,您将可以访问 VideoFrames or AudioFrames which are being added by the new WebCodecs API.
if( window.MediaStreamTrackProcessor ) {
const track = getCanvasTrack();
const processor = new MediaStreamTrackProcessor( track );
const reader = processor.readable.getReader();
readChunk();
function readChunk() {
reader.read().then( ({ done, value }) => {
// value is a VideoFrame
// we can read the data in each of its planes into an ArrayBufferView
const channels = value.planes.map( (plane) => {
const arr = new Uint8Array(plane.length);
plane.readInto(arr);
return arr;
});
value.close(); // close the VideoFrame when we're done with it
log.textContent = "planes data (15 first values):\n" +
channels.map( (arr) => JSON.stringify( [...arr.subarray(0,15)] ) ).join("\n");
if( !done ) {
readChunk();
}
});
}
}
else {
console.error("your browser doesn't support this API yet");
}
function getCanvasTrack() {
// just some noise...
const canvas = document.getElementById("canvas");
const ctx = canvas.getContext("2d");
const img = new ImageData(300, 150);
const data = new Uint32Array(img.data.buffer);
const track = canvas.captureStream().getVideoTracks()[0];
anim();
return track;
function anim() {
for( let i=0; i<data.length;i++ ) {
data[i] = Math.random() * 0xFFFFFF + 0xFF000000;
}
ctx.putImageData(img, 0, 0);
if( track.readyState === "live" ) {
requestAnimationFrame(anim);
}
}
}
<pre id="log"></pre>
<p>
Source<br>
<canvas id="canvas"></canvas>
</p>