使用 2 个 renderTargets 在 three.js 中进行持久性后处理

Persistence postprocessing in three.js with 2 renderTargets

我正在尝试实施 this effect。如视频中所述,我必须制作 2 个额外的 renderTarget,将当前图像与 renderTarget #1 混合到 renderTarget #2,但我在 three.js 中实现它时遇到了困难。你可以在这里查看我的代码

let w = window.innerWidth
let h = window.innerHeight

const scene = new THREE.Scene()
const camera = new THREE.PerspectiveCamera(60, w / h, 0.1, 1000)
const renderer = new THREE.WebGLRenderer()
const clock = new THREE.Clock()

let frontBuffer = createRenderTarget()
let backBuffer  = frontBuffer.clone()
let readBuffer  = frontBuffer
let writeBuffer = backBuffer

const renderScene = new THREE.Scene()
const renderCamera = new THREE.OrthographicCamera(-w / 2, w / 2, -h / 2, h / 2, -1000, 1000)
const renderMaterial = new THREE.ShaderMaterial({
  uniforms: {
    tDiffuse: { value: writeBuffer.texture }
  },
  vertexShader: `
    varying vec2 vUv;

    void main () {
      gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);

      vUv = uv;
    }
  `,
  fragmentShader: `
    uniform sampler2D tDiffuse;

    varying vec2 vUv;

    void main () {
      gl_FragColor = texture2D(tDiffuse, vUv);
    }
  `
})
const renderMesh = new THREE.Mesh(
  new THREE.PlaneBufferGeometry(w, h),
  renderMaterial
)
renderMesh.rotation.x += Math.PI
renderScene.add(renderMesh)
  
let timeElapsed = 0
let shape

setMainScene()
renderFrame()

function createRenderTarget () {
  let type = THREE.FloatType
  if( renderer.extensions.get( 'OES_texture_float_linear' ) === null ) type = THREE.HalfFloatType

  let renderTarget = new THREE.WebGLRenderTarget( 1, 1, {
    type,
    wrapS: THREE.ClampToEdgeWrapping,
    wrapT: THREE.ClampToEdgeWrapping,
    format: THREE.RGBAFormat,
    minFilter: THREE.NearestFilter,
    magFilter: THREE.NearestFilter,
    stencilBuffer: false,
    depthBuffer: true
  })
  
  renderTarget.texture.generateMipmaps = false
  renderTarget.setSize(w, h)

  return renderTarget
}

function swapBuffers () {
  if (readBuffer === frontBuffer) {
    readBuffer  = backBuffer
    writeBuffer = frontBuffer
  } else {
    readBuffer  = frontBuffer
    writeBuffer = backBuffer
  }
}

function setMainScene () {
  renderer.setSize(w, h)
  renderer.setClearColor(0x111111)
  renderer.setPixelRatio(window.devicePixelRatio || 1)
  document.body.appendChild(renderer.domElement)

  camera.position.set(0, 20, 100)
  camera.lookAt(new THREE.Vector3())

  shape = new THREE.Mesh(
    new THREE.SphereBufferGeometry(10, 20, 20),
    new THREE.MeshBasicMaterial({ color: 0xFF0000 })
  )
  scene.add(shape)
}

function renderFrame () {
  requestAnimationFrame(renderFrame)

  renderer.render(scene, camera, writeBuffer)
  renderer.render(renderScene, renderCamera)
  swapBuffers()
  
  timeElapsed += clock.getDelta()
  
  shape.position.x = Math.sin(timeElapsed) * 20.0
  shape.position.y = Math.cos(timeElapsed * Math.PI) * 20.0
}
* { margin: 0; padding: 0; }
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/88/three.min.js"></script>

首先,我创建了两个额外的帧缓冲区:

let frontBuffer = createRenderTarget()
let backBuffer  = frontBuffer.clone()
let readBuffer  = frontBuffer
let writeBuffer = backBuffer

function createRenderTarget () {
  let type = THREE.FloatType
  if( renderer.extensions.get( 'OES_texture_float_linear' ) === null ) type = THREE.HalfFloatType

  let renderTarget = new THREE.WebGLRenderTarget( 1, 1, {
    type,
    wrapS: THREE.ClampToEdgeWrapping,
    wrapT: THREE.ClampToEdgeWrapping,
    format: THREE.RGBAFormat,
    minFilter: THREE.NearestFilter,
    magFilter: THREE.NearestFilter,
    stencilBuffer: false,
    depthBuffer: true
  })

  renderTarget.texture.generateMipmaps = false
  renderTarget.setSize(w, h)

  return renderTarget
}

然后我创建了一个额外的场景,一个覆盖屏幕的平面(我将渲染我的主要场景)和一个正交相机。我将主场景渲染的结果图像作为制服传递到我的 post- 处理平面:

const renderScene = new THREE.Scene()
const renderCamera = new THREE.OrthographicCamera(-w / 2, w / 2, -h / 2, h / 2, -1000, 1000)
const renderMaterial = new THREE.ShaderMaterial({
  uniforms: {
    tDiffuse: { value: writeBuffer.texture }
  },
  vertexShader: `
    varying vec2 vUv;

    void main () {
      gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);

      vUv = uv;
    }
  `,
  fragmentShader: `
    uniform sampler2D tDiffuse;

    varying vec2 vUv;

    void main () {
      gl_FragColor = texture2D(tDiffuse, vUv);
    }
  `
})

最后,在我的动画循环中,我首先将主场景渲染到当前 fbo,然后渲染我的 post 处理平面并交换缓冲区:

function swapBuffers () {
  if (readBuffer === frontBuffer) {
    readBuffer  = backBuffer
    writeBuffer = frontBuffer
  } else {
    readBuffer  = frontBuffer
    writeBuffer = backBuffer
  }
}

function renderFrame () {
  requestAnimationFrame(renderFrame)

  renderer.render(scene, camera, writeBuffer)
  renderer.render(renderScene, renderCamera)
  swapBuffers()

  timeElapsed += clock.getDelta()

  shape.position.x = Math.sin(timeElapsed) * 20.0
  shape.position.y = Math.cos(timeElapsed * Math.PI) * 20.0
}

一切都很好,我可以在 post 处理平面上看到我的主场景渲染,但我不明白如何将它与之前的帧缓冲区混合。我想我目前的实现是非常错误的,但是信息很少,我根本无法理解如何实现这种混合。

我尝试将两个缓冲区作为纹理传递,然后在 GLSL 中将它们混合,如下所示:

// js
uniforms: {
    tDiffuse1: { value: writeBuffer.texture },
    tDiffuse2: { value: readBuffer.texture }
  }

// glsl
gl_FragColor = mix(texture2D(tDiffuse1, vUv), texture2D(tDiffuse2, vUv), 0.5);

但在视觉上我看不到任何混合。

您需要 3 个渲染目标。我们称它们为 sceneTargetpreviousTargetresultTarget

第 1 步:将场景渲染到 sceneTarget

您现在在 sceneTarget.texture

中有了您的场景

第 2 步:将 sceneTarget.texturepreviousTarget.texture 混合成 resultTarget

这个你需要 2 个纹理作为输入,就像你在问题底部提到的那样。您需要更新 material 制服以每帧使用正确的纹理

renderMaterial.uniforms.tDiffuse1.value = previousTarget.texture;
renderMaterial.uniforms.tDiffuse2.value = sceneTarget.texture;

现在您在 resultTarget.texture

中得到了混合结果

第 3 步:将 resultTarget.texture 渲染到 canvas。

现在你可以真正看到结果了。

第 4 步:交换 resultTargetpreviousTarget

let w = window.innerWidth
let h = window.innerHeight

const scene = new THREE.Scene()
const camera = new THREE.PerspectiveCamera(60, w / h, 0.1, 1000)
const renderer = new THREE.WebGLRenderer()
const clock = new THREE.Clock()

let sceneTarget = createRenderTarget()
let previousTarget  = sceneTarget.clone();
let resultTarget  = sceneTarget.clone();

const blendScene = new THREE.Scene();
const blendCamera = new THREE.OrthographicCamera(-w/2, w/2, -h/2, h/2, -1000, 1000);
const blendMaterial = new THREE.ShaderMaterial({
  uniforms: {
    tDiffuse1: { value: previousTarget.texture },
    tDiffuse2: { value: sceneTarget.texture },
  },
  vertexShader: `
    varying vec2 vUv;

    void main () {
      gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);

      vUv = uv;
    }
  `,
  fragmentShader: `
    uniform sampler2D tDiffuse1;
    uniform sampler2D tDiffuse2;

    varying vec2 vUv;

    void main () {
      gl_FragColor = mix(texture2D(tDiffuse1, vUv), texture2D(tDiffuse2, vUv), 0.25);
    }
  `,
});
const blendMesh = new THREE.Mesh(
  new THREE.PlaneBufferGeometry(w, h),
  blendMaterial
);
blendMesh.rotation.x = Math.PI;
blendScene.add(blendMesh);

const resultScene = new THREE.Scene();
const resultCamera = new THREE.OrthographicCamera(-w/2, w/2, -h/2, h/2, -1000, 1000);
const resultMaterial = new THREE.MeshBasicMaterial({
  map: resultTarget.texture,
});
const resultMesh = new THREE.Mesh(
  new THREE.PlaneBufferGeometry(w, h),
  resultMaterial
);
resultMesh.rotation.x = Math.PI;
resultScene.add(resultMesh);

let shape

setMainScene()
renderFrame(0)

function createRenderTarget () {
  let type = THREE.FloatType
  if( renderer.extensions.get( 'OES_texture_float_linear' ) === null ) type = THREE.HalfFloatType

  let renderTarget = new THREE.WebGLRenderTarget( 1, 1, {
    type,
    wrapS: THREE.ClampToEdgeWrapping,
    wrapT: THREE.ClampToEdgeWrapping,
    format: THREE.RGBAFormat,
    minFilter: THREE.NearestFilter,
    magFilter: THREE.NearestFilter,
    stencilBuffer: false,
    depthBuffer: true
  })
  
  renderTarget.texture.generateMipmaps = false
  renderTarget.setSize(w, h)

  return renderTarget
}

function swapBuffers () {
  const temp = previousTarget;
  previousTarget = resultTarget;
  resultTarget = temp;
}

function setMainScene () {
  renderer.setSize(w, h)
  renderer.setClearColor(0x111111)
  renderer.setPixelRatio(window.devicePixelRatio || 1)
  document.body.appendChild(renderer.domElement)

  camera.position.set(0, 20, 100);
  camera.lookAt(new THREE.Vector3());

  shape = new THREE.Mesh(
    new THREE.SphereBufferGeometry(10, 20, 20),
    new THREE.MeshBasicMaterial({ color: 0xFF0000 })
  );
  scene.add(shape);
}

function renderFrame (timeElapsed) {
  timeElapsed *= 0.001;
  
  renderer.render(scene, camera, sceneTarget);
  
  blendMaterial.uniforms.tDiffuse1.value = previousTarget.texture;
  blendMaterial.uniforms.tDiffuse2.value = sceneTarget.texture;
  renderer.render(blendScene, blendCamera, resultTarget);
  
  resultMaterial.map = resultTarget.texture;
  renderer.render(resultScene, resultCamera);
  swapBuffers();

  shape.position.x = Math.sin(timeElapsed) * 20.0;
  shape.position.y = Math.cos(timeElapsed * Math.PI) * 20.0;
  
  requestAnimationFrame(renderFrame);
  
}
* { margin: 0; padding: 0; }
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/88/three.min.js"></script>

我还要补充一点,这并不是一个很好的持久性影响。我不确定最好的是什么。上面的问题是你设置的持久性越高,你看到的当前帧就越少。

更好的是这样的,虽然它需要选择淡出颜色。只需要 2 个目标,previousTargetcurrentTarget

  1. 使用着色器渲染 previousTarget.texturecurrentTarget 褪色成某种颜色。 mix(tex, color, 0.05) 或类似的东西。

  2. 也将场景渲染到 currentTarget

  3. 渲染 currentTarget.texture 到 canvas

  4. 交换 currentTargetpreviousTarget

let w = window.innerWidth
let h = window.innerHeight

const scene = new THREE.Scene()
const camera = new THREE.PerspectiveCamera(60, w / h, 0.1, 1000)
const renderer = new THREE.WebGLRenderer()
const clock = new THREE.Clock()

let currentTarget = createRenderTarget()
let previousTarget  = currentTarget.clone();

const fadeScene = new THREE.Scene();
const fadeCamera = new THREE.OrthographicCamera(-w/2, w/2, -h/2, h/2, -1000, 1000);
const fadeMaterial = new THREE.ShaderMaterial({
  uniforms: {
    tDiffuse: { value: previousTarget.texture },
  },
  vertexShader: `
    varying vec2 vUv;

    void main () {
      gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);

      vUv = uv;
    }
  `,
  fragmentShader: `
    uniform sampler2D tDiffuse;

    varying vec2 vUv;

    void main () {
      vec4 fadeColor = vec4(0,0,0,1);
      gl_FragColor = mix(texture2D(tDiffuse, vUv), fadeColor, 0.05);
    }
  `,
});
const fadeMesh = new THREE.Mesh(
  new THREE.PlaneBufferGeometry(w, h),
  fadeMaterial
);
fadeMesh.rotation.x = Math.PI;
fadeScene.add(fadeMesh);

const resultScene = new THREE.Scene();
const resultCamera = new THREE.OrthographicCamera(-w/2, w/2, -h/2, h/2, -1000, 1000);
const resultMaterial = new THREE.MeshBasicMaterial({
  map: currentTarget.texture,
});
const resultMesh = new THREE.Mesh(
  new THREE.PlaneBufferGeometry(w, h),
  resultMaterial
);
resultMesh.rotation.x = Math.PI;
resultScene.add(resultMesh);

let shape

setMainScene()
renderFrame(0)

function createRenderTarget () {
  let type = THREE.FloatType
  if( renderer.extensions.get( 'OES_texture_float_linear' ) === null ) type = THREE.HalfFloatType

  let renderTarget = new THREE.WebGLRenderTarget( 1, 1, {
    type,
    wrapS: THREE.ClampToEdgeWrapping,
    wrapT: THREE.ClampToEdgeWrapping,
    format: THREE.RGBAFormat,
    minFilter: THREE.NearestFilter,
    magFilter: THREE.NearestFilter,
    stencilBuffer: false,
    depthBuffer: true
  })
  
  renderTarget.texture.generateMipmaps = false
  renderTarget.setSize(w, h)

  return renderTarget
}

function swapBuffers () {
  const temp = previousTarget;
  previousTarget = currentTarget;
  currentTarget = temp;
}

function setMainScene () {
  renderer.setSize(w, h)
  renderer.setClearColor(0x111111)
  renderer.setPixelRatio(window.devicePixelRatio || 1)
  renderer.autoClearColor = false;
  document.body.appendChild(renderer.domElement)

  camera.position.set(0, 20, 100);
  camera.lookAt(new THREE.Vector3());

  shape = new THREE.Mesh(
    new THREE.SphereBufferGeometry(10, 20, 20),
    new THREE.MeshBasicMaterial({ color: 0xFF0000 })
  );
  scene.add(shape);
}

function renderFrame (timeElapsed) {
  timeElapsed *= 0.001;
  
  fadeMaterial.uniforms.tDiffuse.value = previousTarget.texture;
  renderer.render(fadeScene, fadeCamera, currentTarget);
  
  renderer.render(scene, camera, currentTarget);
    
  resultMaterial.map = currentTarget.texture;
  renderer.render(resultScene, resultCamera);
  swapBuffers();

  shape.position.x = Math.sin(timeElapsed) * 20.0;
  shape.position.y = Math.cos(timeElapsed * Math.PI) * 20.0;
  
  requestAnimationFrame(renderFrame);
  
}
* { margin: 0; padding: 0; }
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/88/three.min.js"></script>