SSAO 的深度纹理不正确
Incorrect depthTexture with SSAO
最近我一直很困惑,因为我一直在尝试让 THREE.DepthTexture
与环境光遮蔽着色器一起工作。我以前用 RGBA 解包工作过,但在阅读了 Matt Deslauriers 的项目 Audiograph 后,我决定尝试他描述的方法来提高潜在的性能:
Historically in ThreeJS, you would render your scene with
MeshDepthMaterial to a WebGLRenderTarget, and then unpack to a linear
depth value when sampling from the depth target. This is fairly
expensive and often unnecessary, since many environments support the
WEBGL_depth_texture extension.
尝试这种方法后,我不知何故得到了这种奇怪的不需要的效果,其中线条遍布整个地形:
我在下面设置了一个小例子,我在其中复制了这个问题。我觉得这是很明显的事情,我只是在掩饰。
我希望这里有人能够指出我遗漏了什么,这样我就可以让环境光遮蔽以一种更高效的方式工作!
非常感谢。
const scene = new THREE.Scene();
const camera = new THREE.PerspectiveCamera(75, window.innerWidth/window.innerHeight, 0.1, 2000);
const pivot = new THREE.Object3D();
pivot.add(camera);
scene.add(pivot);
camera.position.set(0, 250, 500);
camera.lookAt(pivot.position);
const renderer = new THREE.WebGLRenderer();
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.gammaInput = true;
renderer.gammaOutput = true;
renderer.gammaFactor = 2.2;
let supportsExtension = false;
if (renderer.extensions.get('WEBGL_depth_texture')) {
supportsExtension = true;
}
document.body.appendChild(renderer.domElement);
const createCube = () => {
const geo = new THREE.BoxGeometry(500, 500, 500);
const mat = new THREE.MeshBasicMaterial({ color: 0x00ff00 });
const obj = new THREE.Mesh(geo, mat);
obj.position.y = -(obj.geometry.parameters.height / 2);
scene.add(obj);
}
const createSphere = () => {
const geo = new THREE.SphereGeometry(100, 12, 8);
const mat = new THREE.MeshBasicMaterial({ color: 0xff00ff });
const obj = new THREE.Mesh(geo, mat);
obj.position.y = obj.geometry.parameters.radius;
scene.add(obj);
}
// Create objects
createCube();
createSphere();
const composer = new THREE.EffectComposer(renderer);
const target = new THREE.WebGLRenderTarget( window.innerWidth, window.innerHeight );
target.texture.format = THREE.RGBFormat;
target.texture.minFilter = THREE.NearestFilter;
target.texture.magFilter = THREE.NearestFilter;
target.texture.generateMipmaps = false;
target.stencilBuffer = false;
target.depthBuffer = true;
target.depthTexture = new THREE.DepthTexture();
target.depthTexture.type = THREE.UnsignedShortType;
function initPostProcessing() {
composer.addPass(new THREE.RenderPass( scene, camera ));
const pass = new THREE.ShaderPass({
uniforms: {
"tDiffuse": { value: null },
"tDepth": { value: target.depthTexture },
"resolution": { value: new THREE.Vector2( 512, 512 ) },
"cameraNear": { value: 1 },
"cameraFar": { value: 100 },
"onlyAO": { value: 0 },
"aoClamp": { value: 0.5 },
"lumInfluence": { value: 0.5 }
},
vertexShader: document.getElementById('vertexShader').textContent,
fragmentShader: document.getElementById('fragmentShader').textContent,
});
pass.material.precision = 'highp';
composer.addPass(pass);
pass.uniforms.tDepth.value = target.depthTexture;
pass.uniforms.cameraNear.value = camera.near;
pass.uniforms.cameraFar.value = camera.far;
composer.passes[composer.passes.length - 1].renderToScreen = true;
}
initPostProcessing();
const animate = () => {
requestAnimationFrame( animate );
pivot.rotation.y += 0.01;
renderer.render( scene, camera, target );
composer.render();
}
animate();
html, body { margin: 0; }
canvas { display: block; width: 100%; height: 100%; }
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/86/three.js"></script>
<script src="https://cdn.rawgit.com/mrdoob/three.js/dev/examples/js/postprocessing/EffectComposer.js"></script>
<script src="https://cdn.rawgit.com/mrdoob/three.js/dev/examples/js/postprocessing/RenderPass.js"></script>
<script src="https://cdn.rawgit.com/mrdoob/three.js/dev/examples/js/postprocessing/ShaderPass.js"></script>
<script src="https://cdn.rawgit.com/mrdoob/three.js/dev/examples/js/shaders/CopyShader.js"></script>
<script id="vertexShader" type="x-shader/x-vertex">
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}
</script>
<script id="fragmentShader" type="x-shader/x-fragment">
uniform float cameraNear;
uniform float cameraFar;
uniform bool onlyAO; // use only ambient occlusion pass?
uniform vec2 resolution; // texture width, height
uniform float aoClamp; // depth clamp - reduces haloing at screen edges
uniform float lumInfluence; // how much luminance affects occlusion
uniform sampler2D tDiffuse;
uniform highp sampler2D tDepth;
varying vec2 vUv;
// #define PI 3.14159265
#define DL 2.399963229728653 // PI * ( 3.0 - sqrt( 5.0 ) )
#define EULER 2.718281828459045
// user variables
const int samples = 4; // ao sample count
const float radius = 5.0; // ao radius
const bool useNoise = false; // use noise instead of pattern for sample dithering
const float noiseAmount = 0.0003; // dithering amount
const float diffArea = 0.4; // self-shadowing reduction
const float gDisplace = 0.4; // gauss bell center
highp vec2 rand( const vec2 coord ) {
highp vec2 noise;
if ( useNoise ) {
float nx = dot ( coord, vec2( 12.9898, 78.233 ) );
float ny = dot ( coord, vec2( 12.9898, 78.233 ) * 2.0 );
noise = clamp( fract ( 43758.5453 * sin( vec2( nx, ny ) ) ), 0.0, 1.0 );
} else {
highp float ff = fract( 1.0 - coord.s * ( resolution.x / 2.0 ) );
highp float gg = fract( coord.t * ( resolution.y / 2.0 ) );
noise = vec2( 0.25, 0.75 ) * vec2( ff ) + vec2( 0.75, 0.25 ) * gg;
}
return ( noise * 2.0 - 1.0 ) * noiseAmount;
}
float readDepth( const in vec2 coord ) {
float cameraFarPlusNear = cameraFar + cameraNear;
float cameraFarMinusNear = cameraFar - cameraNear;
float cameraCoef = 2.0 * cameraNear;
return cameraCoef / ( cameraFarPlusNear - texture2D( tDepth, coord ).x * cameraFarMinusNear );
}
float compareDepths( const in float depth1, const in float depth2, inout int far ) {
float garea = 2.0; // gauss bell width
float diff = ( depth1 - depth2 ) * 100.0; // depth difference (0-100)
// reduce left bell width to avoid self-shadowing
if ( diff < gDisplace ) {
garea = diffArea;
} else {
far = 1;
}
float dd = diff - gDisplace;
float gauss = pow( EULER, -2.0 * dd * dd / ( garea * garea ) );
return gauss;
}
float calcAO( float depth, float dw, float dh ) {
float dd = radius - depth * radius;
vec2 vv = vec2( dw, dh );
vec2 coord1 = vUv + dd * vv;
vec2 coord2 = vUv - dd * vv;
float temp1 = 0.0;
float temp2 = 0.0;
int far = 0;
temp1 = compareDepths( depth, readDepth( coord1 ), far );
// DEPTH EXTRAPOLATION
if ( far > 0 ) {
temp2 = compareDepths( readDepth( coord2 ), depth, far );
temp1 += ( 1.0 - temp1 ) * temp2;
}
return temp1;
}
void main() {
highp vec2 noise = rand( vUv );
float depth = readDepth( vUv );
float tt = clamp( depth, aoClamp, 1.0 );
float w = ( 1.0 / resolution.x ) / tt + ( noise.x * ( 1.0 - noise.x ) );
float h = ( 1.0 / resolution.y ) / tt + ( noise.y * ( 1.0 - noise.y ) );
float ao = 0.0;
float dz = 1.0 / float( samples );
float z = 1.0 - dz / 2.0;
float l = 0.0;
for ( int i = 0; i <= samples; i ++ ) {
float r = sqrt( 1.0 - z );
float pw = cos( l ) * r;
float ph = sin( l ) * r;
ao += calcAO( depth, pw * w, ph * h );
z = z - dz;
l = l + DL;
}
ao /= float( samples );
ao = 1.0 - ao;
vec3 color = texture2D( tDiffuse, vUv ).rgb;
vec3 lumcoeff = vec3( 0.299, 0.587, 0.114 );
float lum = dot( color.rgb, lumcoeff );
vec3 luminance = vec3( lum );
vec3 final = vec3( color * mix( vec3( ao ), vec3( 1.0 ), luminance * lumInfluence ) ); // mix( color * ao, white, luminance )
float depth2 = readDepth(vUv);
if ( onlyAO ) {
final = vec3( mix( vec3( ao ), vec3( 1.0 ), luminance * lumInfluence ) ); // ambient occlusion only
}
// gl_FragColor = vec4( vec3( readDepth( vUv) ), 1.0 ); // Depth
gl_FragColor = vec4( final, 1.0 );
}
</script>
我很想知道是什么导致我的环境光遮蔽无法正确渲染!
如果您使用透视相机并出于任何目的依赖深度图 - 包括 SSAO 和阴影 - 请小心选择 camera.near
和 camera.far
- 特别是near
。 (如果你正在处理阴影,那将是 shadow.camera.near
。)
在您的用例合理的范围内将近平面推出。如果您的场景位于平截头体前方附近,您将获得最佳效果。
three.js r.86
最近我一直很困惑,因为我一直在尝试让 THREE.DepthTexture
与环境光遮蔽着色器一起工作。我以前用 RGBA 解包工作过,但在阅读了 Matt Deslauriers 的项目 Audiograph 后,我决定尝试他描述的方法来提高潜在的性能:
Historically in ThreeJS, you would render your scene with MeshDepthMaterial to a WebGLRenderTarget, and then unpack to a linear depth value when sampling from the depth target. This is fairly expensive and often unnecessary, since many environments support the WEBGL_depth_texture extension.
尝试这种方法后,我不知何故得到了这种奇怪的不需要的效果,其中线条遍布整个地形:
我在下面设置了一个小例子,我在其中复制了这个问题。我觉得这是很明显的事情,我只是在掩饰。
我希望这里有人能够指出我遗漏了什么,这样我就可以让环境光遮蔽以一种更高效的方式工作!
非常感谢。
const scene = new THREE.Scene();
const camera = new THREE.PerspectiveCamera(75, window.innerWidth/window.innerHeight, 0.1, 2000);
const pivot = new THREE.Object3D();
pivot.add(camera);
scene.add(pivot);
camera.position.set(0, 250, 500);
camera.lookAt(pivot.position);
const renderer = new THREE.WebGLRenderer();
renderer.setSize(window.innerWidth, window.innerHeight);
renderer.gammaInput = true;
renderer.gammaOutput = true;
renderer.gammaFactor = 2.2;
let supportsExtension = false;
if (renderer.extensions.get('WEBGL_depth_texture')) {
supportsExtension = true;
}
document.body.appendChild(renderer.domElement);
const createCube = () => {
const geo = new THREE.BoxGeometry(500, 500, 500);
const mat = new THREE.MeshBasicMaterial({ color: 0x00ff00 });
const obj = new THREE.Mesh(geo, mat);
obj.position.y = -(obj.geometry.parameters.height / 2);
scene.add(obj);
}
const createSphere = () => {
const geo = new THREE.SphereGeometry(100, 12, 8);
const mat = new THREE.MeshBasicMaterial({ color: 0xff00ff });
const obj = new THREE.Mesh(geo, mat);
obj.position.y = obj.geometry.parameters.radius;
scene.add(obj);
}
// Create objects
createCube();
createSphere();
const composer = new THREE.EffectComposer(renderer);
const target = new THREE.WebGLRenderTarget( window.innerWidth, window.innerHeight );
target.texture.format = THREE.RGBFormat;
target.texture.minFilter = THREE.NearestFilter;
target.texture.magFilter = THREE.NearestFilter;
target.texture.generateMipmaps = false;
target.stencilBuffer = false;
target.depthBuffer = true;
target.depthTexture = new THREE.DepthTexture();
target.depthTexture.type = THREE.UnsignedShortType;
function initPostProcessing() {
composer.addPass(new THREE.RenderPass( scene, camera ));
const pass = new THREE.ShaderPass({
uniforms: {
"tDiffuse": { value: null },
"tDepth": { value: target.depthTexture },
"resolution": { value: new THREE.Vector2( 512, 512 ) },
"cameraNear": { value: 1 },
"cameraFar": { value: 100 },
"onlyAO": { value: 0 },
"aoClamp": { value: 0.5 },
"lumInfluence": { value: 0.5 }
},
vertexShader: document.getElementById('vertexShader').textContent,
fragmentShader: document.getElementById('fragmentShader').textContent,
});
pass.material.precision = 'highp';
composer.addPass(pass);
pass.uniforms.tDepth.value = target.depthTexture;
pass.uniforms.cameraNear.value = camera.near;
pass.uniforms.cameraFar.value = camera.far;
composer.passes[composer.passes.length - 1].renderToScreen = true;
}
initPostProcessing();
const animate = () => {
requestAnimationFrame( animate );
pivot.rotation.y += 0.01;
renderer.render( scene, camera, target );
composer.render();
}
animate();
html, body { margin: 0; }
canvas { display: block; width: 100%; height: 100%; }
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/86/three.js"></script>
<script src="https://cdn.rawgit.com/mrdoob/three.js/dev/examples/js/postprocessing/EffectComposer.js"></script>
<script src="https://cdn.rawgit.com/mrdoob/three.js/dev/examples/js/postprocessing/RenderPass.js"></script>
<script src="https://cdn.rawgit.com/mrdoob/three.js/dev/examples/js/postprocessing/ShaderPass.js"></script>
<script src="https://cdn.rawgit.com/mrdoob/three.js/dev/examples/js/shaders/CopyShader.js"></script>
<script id="vertexShader" type="x-shader/x-vertex">
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}
</script>
<script id="fragmentShader" type="x-shader/x-fragment">
uniform float cameraNear;
uniform float cameraFar;
uniform bool onlyAO; // use only ambient occlusion pass?
uniform vec2 resolution; // texture width, height
uniform float aoClamp; // depth clamp - reduces haloing at screen edges
uniform float lumInfluence; // how much luminance affects occlusion
uniform sampler2D tDiffuse;
uniform highp sampler2D tDepth;
varying vec2 vUv;
// #define PI 3.14159265
#define DL 2.399963229728653 // PI * ( 3.0 - sqrt( 5.0 ) )
#define EULER 2.718281828459045
// user variables
const int samples = 4; // ao sample count
const float radius = 5.0; // ao radius
const bool useNoise = false; // use noise instead of pattern for sample dithering
const float noiseAmount = 0.0003; // dithering amount
const float diffArea = 0.4; // self-shadowing reduction
const float gDisplace = 0.4; // gauss bell center
highp vec2 rand( const vec2 coord ) {
highp vec2 noise;
if ( useNoise ) {
float nx = dot ( coord, vec2( 12.9898, 78.233 ) );
float ny = dot ( coord, vec2( 12.9898, 78.233 ) * 2.0 );
noise = clamp( fract ( 43758.5453 * sin( vec2( nx, ny ) ) ), 0.0, 1.0 );
} else {
highp float ff = fract( 1.0 - coord.s * ( resolution.x / 2.0 ) );
highp float gg = fract( coord.t * ( resolution.y / 2.0 ) );
noise = vec2( 0.25, 0.75 ) * vec2( ff ) + vec2( 0.75, 0.25 ) * gg;
}
return ( noise * 2.0 - 1.0 ) * noiseAmount;
}
float readDepth( const in vec2 coord ) {
float cameraFarPlusNear = cameraFar + cameraNear;
float cameraFarMinusNear = cameraFar - cameraNear;
float cameraCoef = 2.0 * cameraNear;
return cameraCoef / ( cameraFarPlusNear - texture2D( tDepth, coord ).x * cameraFarMinusNear );
}
float compareDepths( const in float depth1, const in float depth2, inout int far ) {
float garea = 2.0; // gauss bell width
float diff = ( depth1 - depth2 ) * 100.0; // depth difference (0-100)
// reduce left bell width to avoid self-shadowing
if ( diff < gDisplace ) {
garea = diffArea;
} else {
far = 1;
}
float dd = diff - gDisplace;
float gauss = pow( EULER, -2.0 * dd * dd / ( garea * garea ) );
return gauss;
}
float calcAO( float depth, float dw, float dh ) {
float dd = radius - depth * radius;
vec2 vv = vec2( dw, dh );
vec2 coord1 = vUv + dd * vv;
vec2 coord2 = vUv - dd * vv;
float temp1 = 0.0;
float temp2 = 0.0;
int far = 0;
temp1 = compareDepths( depth, readDepth( coord1 ), far );
// DEPTH EXTRAPOLATION
if ( far > 0 ) {
temp2 = compareDepths( readDepth( coord2 ), depth, far );
temp1 += ( 1.0 - temp1 ) * temp2;
}
return temp1;
}
void main() {
highp vec2 noise = rand( vUv );
float depth = readDepth( vUv );
float tt = clamp( depth, aoClamp, 1.0 );
float w = ( 1.0 / resolution.x ) / tt + ( noise.x * ( 1.0 - noise.x ) );
float h = ( 1.0 / resolution.y ) / tt + ( noise.y * ( 1.0 - noise.y ) );
float ao = 0.0;
float dz = 1.0 / float( samples );
float z = 1.0 - dz / 2.0;
float l = 0.0;
for ( int i = 0; i <= samples; i ++ ) {
float r = sqrt( 1.0 - z );
float pw = cos( l ) * r;
float ph = sin( l ) * r;
ao += calcAO( depth, pw * w, ph * h );
z = z - dz;
l = l + DL;
}
ao /= float( samples );
ao = 1.0 - ao;
vec3 color = texture2D( tDiffuse, vUv ).rgb;
vec3 lumcoeff = vec3( 0.299, 0.587, 0.114 );
float lum = dot( color.rgb, lumcoeff );
vec3 luminance = vec3( lum );
vec3 final = vec3( color * mix( vec3( ao ), vec3( 1.0 ), luminance * lumInfluence ) ); // mix( color * ao, white, luminance )
float depth2 = readDepth(vUv);
if ( onlyAO ) {
final = vec3( mix( vec3( ao ), vec3( 1.0 ), luminance * lumInfluence ) ); // ambient occlusion only
}
// gl_FragColor = vec4( vec3( readDepth( vUv) ), 1.0 ); // Depth
gl_FragColor = vec4( final, 1.0 );
}
</script>
我很想知道是什么导致我的环境光遮蔽无法正确渲染!
如果您使用透视相机并出于任何目的依赖深度图 - 包括 SSAO 和阴影 - 请小心选择 camera.near
和 camera.far
- 特别是near
。 (如果你正在处理阴影,那将是 shadow.camera.near
。)
在您的用例合理的范围内将近平面推出。如果您的场景位于平截头体前方附近,您将获得最佳效果。
three.js r.86