在 web worker 中解码图像

Decode images in web worker

在我们的 WebGL 应用程序中,我试图在 Web Worker 中加载和解码纹理图像,以避免在主线程中出现渲染问题。在 worker 中使用 createImageBitmap 并将图像位图传输回主线程效果很好,但在 Chrome 中,这将使用 three 或更多(可能取决于内核数量?)单独的工作线程 (ThreadPoolForegroundWorker) 与主线程和我自己的工作线程一起将产生五个线程。

我猜这会导致我在四核上的剩余渲染干扰,因为我可以在 Chrome 的 DevTools 的性能功能中看到一些莫名其妙的长时间。

那么,我可以通过某种方式限制 createImageBitmap 使用的 worker 数量吗?即使我将图像作为 blob 或数组缓冲区传输到主线程并从那里激活 createImageBitmap,它的工作人员也会与我自己的工作人员和主线程竞争。

我曾尝试在 worker 中创建常规图像,而不是在那里显式解码它们,但 Image 未在 worker 上下文中定义,如果我想将它们创建为元素,则 document 也未定义。而且常规的Images也是不可传输的,所以在主线程上创建它们并传输给worker似乎也不可行。

期待任何建议...

没有理由在 worker 中使用 createImageBitmap(嗯,见底部)。浏览器已经在单独的线程中解码图像。在工人身上做这件事不会给你带来任何好处。更大的问题是当您最终将图像传递给 WebGL 时,ImageBitmap 无法知道您将如何使用该图像。如果您要求的格式与 ImageBitmap 解码的格式不同,那么 WebGL 必须转换 and/or 再次解码,您不能给 ImageBitmap 足够的信息来告诉它您希望它解码的格式。

此外,Chrome 中的 WebGL 必须将图像数据从渲染进程传输到 GPU 进程,这对于大图像来说是一个相对较大的副本(RGBA 的 1024x1024 是 4meg)

更好的 API IMO 将允许您告诉 ImageBitmap 您想要什么格式以及您想要它的位置(CPU,GPU)。这样浏览器就可以异步准备图像并且完成后不需要繁重的工作。

无论如何,这里有一个测试。如果您取消选中“更新纹理”,那么它仍然会下载和解码纹理,但不会调用 gl.texImage2D 来上传纹理。在那种情况下,我看不到卡顿(不能证明这是问题所在,但我认为这是问题所在)

const m4 = twgl.m4;
const gl = document.querySelector('#webgl').getContext('webgl');
const ctx = document.querySelector('#graph').getContext('2d');

let update = true;
document.querySelector('#update').addEventListener('change', function() {
  update = this.checked;
});

const vs = `
attribute vec4 position;
uniform mat4 matrix;
varying vec2 v_texcoord;
void main() {
  gl_Position = matrix * position;
  v_texcoord = position.xy;
}
`

const fs = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D tex;
void main() {
  gl_FragColor = texture2D(tex, v_texcoord);
}
`;

const program = twgl.createProgram(gl, [vs, fs]);
const posLoc = gl.getAttribLocation(program, 'position');
const matLoc = gl.getUniformLocation(program, 'matrix');

const buf = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buf);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
  0, 0,
  1, 0,
  0, 1,
  0, 1,
  1, 0,
  1, 1,
]), gl.STATIC_DRAW);

gl.enableVertexAttribArray(posLoc);
gl.vertexAttribPointer(posLoc, 2, gl.FLOAT, false, 0, 0);

const tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.texImage2D(
    gl.TEXTURE_2D, 0, gl.RGBA, 1, 1, 0, gl.RGBA, gl.UNSIGNED_BYTE,
    new Uint8Array([0, 0, 255, 255]));
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);

const m = m4.identity();
let frameCount = 0;
let previousTime = 0;
let imgNdx = 0;
let imgAspect = 1;

const imageUrls = [
  'https://i.imgur.com/KjUybBD.png',
  'https://i.imgur.com/AyOufBk.jpg',
  'https://i.imgur.com/UKBsvV0.jpg',
  'https://i.imgur.com/TSiyiJv.jpg',
];

async function loadNextImage() {
  const url = `${imageUrls[imgNdx]}?cachebust=${performance.now()}`;
  imgNdx = (imgNdx + 1) % imageUrls.length;
  const res = await fetch(url, {mode: 'cors'});
  const blob = await res.blob();
  const bitmap = await createImageBitmap(blob, {
    premultiplyAlpha: 'none',
    colorSpaceConversion: 'none',
  });
  if (update) {
    gl.bindTexture(gl.TEXTURE_2D, tex);
    gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, bitmap);
    imgAspect = bitmap.width / bitmap.height;
  }
  setTimeout(loadNextImage, 1000);
}
loadNextImage();

function render(currentTime) {
  const deltaTime = currentTime - previousTime;
  previousTime = currentTime;
  
  {
    const {width, height} = ctx.canvas;
    const x = frameCount % width;
    const y = 1000 / deltaTime / 60 * height / 2;
    ctx.fillStyle = frameCount % (width * 2) < width ? 'red' : 'blue';
    ctx.clearRect(x, 0, 1, height);
    ctx.fillRect(x, y, 1, height);
    ctx.clearRect(0, 0, 30, 15);
    ctx.fillText((1000 / deltaTime).toFixed(1), 2, 10);
  }

  gl.useProgram(program);
  const dispAspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
  m4.scaling([1 / dispAspect, 1, 1], m);
  m4.rotateZ(m, currentTime * 0.001, m);
  m4.scale(m, [imgAspect, 1, 1], m);
  m4.translate(m, [-0.5, -0.5, 0], m);
  gl.uniformMatrix4fv(matLoc, false, m);
  gl.drawArrays(gl.TRIANGLES, 0, 6);
  
  ++frameCount;
  requestAnimationFrame(render);
}
requestAnimationFrame(render);
canvas { border: 1px solid black; margin: 2px; }
#ui { position: absolute; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<div id="ui"><input type="checkbox" id="update" checked><label for="update">Update Texture</label></div>
<canvas id="webgl"></canvas>
<canvas id="graph"></canvas>

我很确定你可以保证没有卡顿的唯一方法是在工作人员中自己解码图像,作为数组缓冲区传输到主线程,然后上传到 WebGL 一帧几行 gl.bufferSubData.

const m4 = twgl.m4;
const gl = document.querySelector('#webgl').getContext('webgl');
const ctx = document.querySelector('#graph').getContext('2d');

const vs = `
attribute vec4 position;
uniform mat4 matrix;
varying vec2 v_texcoord;
void main() {
  gl_Position = matrix * position;
  v_texcoord = position.xy;
}
`

const fs = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D tex;
void main() {
  gl_FragColor = texture2D(tex, v_texcoord);
}
`;

const program = twgl.createProgram(gl, [vs, fs]);
const posLoc = gl.getAttribLocation(program, 'position');
const matLoc = gl.getUniformLocation(program, 'matrix');

const buf = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buf);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
  0, 0,
  1, 0,
  0, 1,
  0, 1,
  1, 0,
  1, 1,
]), gl.STATIC_DRAW);

gl.enableVertexAttribArray(posLoc);
gl.vertexAttribPointer(posLoc, 2, gl.FLOAT, false, 0, 0);

function createTexture(gl) {
  const tex = gl.createTexture();
  gl.bindTexture(gl.TEXTURE_2D, tex);
  gl.texImage2D(
      gl.TEXTURE_2D, 0, gl.RGBA, 1, 1, 0, gl.RGBA, gl.UNSIGNED_BYTE,
      new Uint8Array([0, 0, 255, 255]));
  gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
  gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
  gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
  return tex;
}

let drawingTex = createTexture(gl);
let loadingTex = createTexture(gl);

const m = m4.identity();
let frameCount = 0;
let previousTime = 0;

const workerScript = `
const ctx = new OffscreenCanvas(1, 1).getContext('2d');
let imgNdx = 0;
let imgAspect = 1;

const imageUrls = [
  'https://i.imgur.com/KjUybBD.png',
  'https://i.imgur.com/AyOufBk.jpg',
  'https://i.imgur.com/UKBsvV0.jpg',
  'https://i.imgur.com/TSiyiJv.jpg',
];

async function loadNextImage() {
  const url = \`${imageUrls[imgNdx]}?cachebust=${performance.now()}\`;
  imgNdx = (imgNdx + 1) % imageUrls.length;
  const res = await fetch(url, {mode: 'cors'});
  const blob = await res.blob();
  const bitmap = await createImageBitmap(blob, {
    premultiplyAlpha: 'none',
    colorSpaceConversion: 'none',
  });
  ctx.canvas.width = bitmap.width;
  ctx.canvas.height = bitmap.height;
  ctx.drawImage(bitmap, 0, 0);
  const imgData = ctx.getImageData(0, 0, ctx.canvas.width, ctx.canvas.height);
  const data = new Uint8Array(imgData.data);
  postMessage({
    width: imgData.width,
    height: imgData.height,
    data: data.buffer,
  }, [data.buffer]);
}

onmessage = loadNextImage;
`;
const blob = new Blob([workerScript], {type: 'application/javascript'});
const worker = new Worker(URL.createObjectURL(blob));
let imgAspect = 1;
worker.onmessage = async(e) => {
  const {width, height, data} = e.data;
  
  gl.bindTexture(gl.TEXTURE_2D, loadingTex);
    gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, width, height, 0, gl.RGBA, gl.UNSIGNED_BYTE, null);  
  
  const maxRows = 20;
  for (let y = 0; y < height; y += maxRows) {
    const rows = Math.min(maxRows, height - y);
    gl.bindTexture(gl.TEXTURE_2D, loadingTex);
    gl.texSubImage2D(gl.TEXTURE_2D, 0, 0, y, width, rows, gl.RGBA, gl.UNSIGNED_BYTE, new Uint8Array(data, y * width * 4, rows * width * 4));  
    await waitRAF();
  }
  const temp = loadingTex;
  loadingTex = drawingTex;
  drawingTex = temp;
  imgAspect = width / height;
  await waitMS(1000);
  worker.postMessage('');
};
worker.postMessage('');

function waitRAF() {
  return new Promise(resolve => requestAnimationFrame(resolve));
}

function waitMS(ms = 0) {
  return new Promise(resolve => setTimeout(resolve, ms));
}

function render(currentTime) {
  const deltaTime = currentTime - previousTime;
  previousTime = currentTime;
  
  {
    const {width, height} = ctx.canvas;
    const x = frameCount % width;
    const y = 1000 / deltaTime / 60 * height / 2;
    ctx.fillStyle = frameCount % (width * 2) < width ? 'red' : 'blue';
    ctx.clearRect(x, 0, 1, height);
    ctx.fillRect(x, y, 1, height);
    ctx.clearRect(0, 0, 30, 15);
    ctx.fillText((1000 / deltaTime).toFixed(1), 2, 10);
  }

  gl.useProgram(program);
  const dispAspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
  m4.scaling([1 / dispAspect, 1, 1], m);
  m4.rotateZ(m, currentTime * 0.001, m);
  m4.scale(m, [imgAspect, 1, 1], m);
  m4.translate(m, [-0.5, -0.5, 0], m);
  gl.bindTexture(gl.TEXTURE_2D, drawingTex);
  gl.uniformMatrix4fv(matLoc, false, m);
  gl.drawArrays(gl.TRIANGLES, 0, 6);
  
  ++frameCount;
  requestAnimationFrame(render);
}
requestAnimationFrame(render);
canvas { border: 1px solid black; margin: 2px; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas id="webgl"></canvas>
<canvas id="graph"></canvas>

注意:我也不知道这是否有效。几个吓人的地方和定义的浏览器实现

  1. 调整 canvas 大小的性能问题是什么?代码正在调整 worker 中的 OffscreenCanvas 的大小。由于 GPU 反响,这可能是一项繁重的操作。

  2. 将位图绘制成canvas的性能如何?同样,GPU 性能问题很大,因为浏览器必须将图像传输到 GPU 才能将其绘制到 GPU 2D canvas.

  3. getImageData 的性能如何?再一次,浏览器必须冻结 GPU 才能读取 GPU 内存以获取图像数据。

  4. 调整纹理可能会影响性能。

  5. 目前只有Chrome支持OffscreenCanvas

1、2、3 和 5 都可以通过自己解码 jpg, png 图片来解决,虽然它真的很糟糕浏览器有解码图片的代码只是你无法访问解码代码以任何有用的方式。

对于 4,如果这是一个问题,可以通过分配最大图像大小的纹理然后将较小的纹理复制到矩形区域来解决。假设这是一个问题

const m4 = twgl.m4;
const gl = document.querySelector('#webgl').getContext('webgl');
const ctx = document.querySelector('#graph').getContext('2d');

const vs = `
attribute vec4 position;
uniform mat4 matrix;
varying vec2 v_texcoord;
void main() {
  gl_Position = matrix * position;
  v_texcoord = position.xy;
}
`

const fs = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D tex;
void main() {
  gl_FragColor = texture2D(tex, v_texcoord);
}
`;

const program = twgl.createProgram(gl, [vs, fs]);
const posLoc = gl.getAttribLocation(program, 'position');
const matLoc = gl.getUniformLocation(program, 'matrix');

const buf = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buf);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
  0, 0,
  1, 0,
  0, 1,
  0, 1,
  1, 0,
  1, 1,
]), gl.STATIC_DRAW);

gl.enableVertexAttribArray(posLoc);
gl.vertexAttribPointer(posLoc, 2, gl.FLOAT, false, 0, 0);

function createTexture(gl) {
  const tex = gl.createTexture();
  gl.bindTexture(gl.TEXTURE_2D, tex);
  gl.texImage2D(
      gl.TEXTURE_2D, 0, gl.RGBA, 1, 1, 0, gl.RGBA, gl.UNSIGNED_BYTE,
      new Uint8Array([0, 0, 255, 255]));
  gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
  gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
  gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
  return tex;
}

let drawingTex = createTexture(gl);
let loadingTex = createTexture(gl);

const m = m4.identity();
let frameCount = 0;
let previousTime = 0;

const workerScript = `
importScripts(
    // from https://github.com/eugeneware/jpeg-js
    'https://greggman.github.io/doodles/js/JPG-decoder.js',
    // from https://github.com/photopea/UPNG.js
    'https://greggman.github.io/doodles/js/UPNG.js',
);

let imgNdx = 0;
let imgAspect = 1;

const imageUrls = [
  'https://i.imgur.com/KjUybBD.png',
  'https://i.imgur.com/AyOufBk.jpg',
  'https://i.imgur.com/UKBsvV0.jpg',
  'https://i.imgur.com/TSiyiJv.jpg',
];

function decodePNG(arraybuffer) {
  return UPNG.decode(arraybuffer)
}

function decodeJPG(arrayBuffer) {
  return decode(new Uint8Array(arrayBuffer), true);
}

const decoders = {
  'image/png': decodePNG,
  'image/jpeg': decodeJPG,
  'image/jpg': decodeJPG,
};

async function loadNextImage() {
  const url = \`${imageUrls[imgNdx]}?cachebust=${performance.now()}\`;
  imgNdx = (imgNdx + 1) % imageUrls.length;
  const res = await fetch(url, {mode: 'cors'});
  const arrayBuffer = await res.arrayBuffer();
  const type = res.headers.get('Content-Type');
  let decoder = decoders[type];
  if (!decoder) {
    console.error('unknown image type:', type);
  }
  const imgData = decoder(arrayBuffer);
  postMessage({
    width: imgData.width,
    height: imgData.height,
    arrayBuffer: imgData.data.buffer,
  }, [imgData.data.buffer]);
}

onmessage = loadNextImage;
`;
const blob = new Blob([workerScript], {type: 'application/javascript'});
const worker = new Worker(URL.createObjectURL(blob));
let imgAspect = 1;
worker.onmessage = async(e) => {
  const {width, height, arrayBuffer} = e.data;
  
  gl.bindTexture(gl.TEXTURE_2D, loadingTex);
    gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, width, height, 0, gl.RGBA, gl.UNSIGNED_BYTE, null);  
  
  const maxRows = 20;
  for (let y = 0; y < height; y += maxRows) {
    const rows = Math.min(maxRows, height - y);
    gl.bindTexture(gl.TEXTURE_2D, loadingTex);
    gl.texSubImage2D(gl.TEXTURE_2D, 0, 0, y, width, rows, gl.RGBA, gl.UNSIGNED_BYTE, new Uint8Array(arrayBuffer, y * width * 4, rows * width * 4));  
    await waitRAF();
  }
  const temp = loadingTex;
  loadingTex = drawingTex;
  drawingTex = temp;
  imgAspect = width / height;
  await waitMS(1000);
  worker.postMessage('');
};
worker.postMessage('');

function waitRAF() {
  return new Promise(resolve => requestAnimationFrame(resolve));
}

function waitMS(ms = 0) {
  return new Promise(resolve => setTimeout(resolve, ms));
}

function render(currentTime) {
  const deltaTime = currentTime - previousTime;
  previousTime = currentTime;
  
  {
    const {width, height} = ctx.canvas;
    const x = frameCount % width;
    const y = 1000 / deltaTime / 60 * height / 2;
    ctx.fillStyle = frameCount % (width * 2) < width ? 'red' : 'blue';
    ctx.clearRect(x, 0, 1, height);
    ctx.fillRect(x, y, 1, height);
    ctx.clearRect(0, 0, 30, 15);
    ctx.fillText((1000 / deltaTime).toFixed(1), 2, 10);
  }

  gl.useProgram(program);
  const dispAspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
  m4.scaling([1 / dispAspect, 1, 1], m);
  m4.rotateZ(m, currentTime * 0.001, m);
  m4.scale(m, [imgAspect, 1, 1], m);
  m4.translate(m, [-0.5, -0.5, 0], m);
  gl.bindTexture(gl.TEXTURE_2D, drawingTex);
  gl.uniformMatrix4fv(matLoc, false, m);
  gl.drawArrays(gl.TRIANGLES, 0, 6);
  
  ++frameCount;
  requestAnimationFrame(render);
}
requestAnimationFrame(render);
canvas { border: 1px solid black; margin: 2px; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas id="webgl"></canvas>
<canvas id="graph"></canvas>

注意 jpeg 解码器很慢。如果您找到或做出更快的,请post发表评论


更新

我只想说 ImageBitmap 应该 足够快,我上面关于它没有足够信息的一些评论可能不完全正确。

我目前的理解是 ImageBitmap 是为了加快上传速度。它应该通过给它一个 blob 并异步将该图像加载到 GPU 中来工作。当你用它调用 texImage2D 时,浏览器可以将该图像“blit”(用 GPU 渲染)到你的纹理中。我不知道为什么第一个例子中有卡顿,但我每 6 张左右的图像就会看到卡顿。

另一方面,虽然将图像上传到 GPU 是 ImageBitmap 的重点,但浏览器不需要上传到 GPU。即使用户没有 GPU,ImageBitmap 仍然应该工作。关键是浏览器决定如何实现该功能以及它是快还是慢或无卡顿完全取决于浏览器。