Camera-space 来自深度纹理的法线

Camera-space Normals from depth texture

我想使用第 1 遍存储的 (non-linear) 深度纹理来生成 screen-space 法线。在第二遍中,我可以渲染出深度、漫反射、ID 等,但我似乎无法从深度工作中获得法线。

目前对从深度获取法线的理解:

  1. texture() / texelFetch() p at the current tex coord, p + (1, 0) = p1,和 p + (0, 1) = p2;从这些

  2. 重建camera-space个位置
  3. 获取v1向量p1 - p:当前位置与其screen/texture-space x neighbor

  4. 之间的向量
  5. 获取v2向量p2 - p:当前位置与其screen/texture-space y 邻居
  6. 之间的向量
  7. cross 乘积这两个和 normalize() 以获得当前纹素的表面法线。

着色器(第 2 遍!渲染到全屏四边形)

顶点:

#version 330 core

layout (location = 0) in vec2 position;
layout (location = 2) in vec2 texcoord;

out vec2 texcoordFrag;

void main()
{
    gl_Position = vec4(position.x, position.y, 0, 1);
    texcoordFrag = texcoord;
}

片段:

#version 330 core

uniform sampler2D tex;
uniform mat4 vpInv;

in vec2 texcoordFrag;
layout(location = 0) out vec4 fragmentColor;


float near = 0.1;
float far = 100.0;

float linearizeDepth(float depth)
{
    float nearToFarDistance = far - near;
    return (2.0 * near) / (far + near - depth * nearToFarDistance);
    //http://www.ozone3d.net/blogs/lab/20090206/how-to-linearize-the-depth-value/ 
    //http://www.geeks3d.com/20091216/geexlab-how-to-visualize-the-depth-buffer-in-glsl/
} 

vec3 worldSpacePositionFromDepth(in float depth)
{
    vec4 clipSpacePos;
    clipSpacePos.xy = texcoordFrag * 2.0 - 1.0;
    clipSpacePos.z = texture(tex, texcoordFrag).r * 2.0 - 1.0;
    clipSpacePos.w = 1.0;
    vec4 homogenousPos = vpInv * clipSpacePos;
    return homogenousPos.xyz / homogenousPos.w;
}

vec3 viewSpacePositionFromDepth(in float depth) //PositionFromDepth_DarkPhoton(): https://www.opengl.org/discussion_boards/showthread.php/176040-Render-depth-to-texture-issue
{
    vec2 ndc;             // Reconstructed NDC-space position
    vec3 eye;             // Reconstructed EYE-space position

    float top = 0.05463024898; //per 1 radianm FoV & distance of 0.1
    float bottom = -top;

    float right = top * 1024.0 / 768.0;
    float left = -right;

    float width = 1024.0;
    float height = 768.0;
    float widthInv = 1.0 / width;
    float heightInv = 1.0 / height;

    ndc.x = (texcoordFrag.x - 0.5) * 2.0;
    ndc.y = (texcoordFrag.y - 0.5) * 2.0;

    eye.z = linearizeDepth(depth); //eye.z = near * far / ((depth * (far - near)) - far); //original
    eye.x = (-ndc.x * eye.z) * right/near;
    eye.y = (-ndc.y * eye.z) * top/near;

    return eye;
}

vec3 getNormal(vec3 p1, vec3 p2)
{
    vec3 normal = cross(p2, p1);
    normal.z = -normal.z;
    return normalize(normal) * 0.5 + 0.5;
}   

void main()
{
    float depth  = texture(tex, texcoordFrag.st).r;
    float depth1 = texture(tex, texcoordFrag.st + vec2(1.0/1024.0, 0)).r;
    float depth2 = texture(tex, texcoordFrag.st + vec2(1.0/768.0 , 0)).r;
    float depthLinear = linearizeDepth(depth);

    if (depthLinear > 1.0) discard;
    //fragmentColor = vec4(depth, depth, depth, 1.0f);
    fragmentColor = vec4(depthLinear, depthLinear, depthLinear, 1.0);

    //vec3 w = worldSpacePositionFromDepth(depth);
    //fragmentColor = vec4(w, 1.0);

    vec3 p  = viewSpacePositionFromDepth(depth);
    vec3 p1 = viewSpacePositionFromDepth(depth1);
    vec3 p2 = viewSpacePositionFromDepth(depth2);

    vec3 v1 = (p1-p);
    vec3 v2 = (p2-p);
    vec3 normal = getNormal(v1, v2);
    fragmentColor = vec4(normal, 1.0);
}

结果

问题我做错了什么?像对一个五岁的孩子那样解释它。:)

这将用作 SSAO 和定向照明的基础。

据我所知,dFdxdFdy 不是答案,因为我使用的是纹素,而不是片段。

你通过viewSpacePositionFromDepth计算pp1p2的方式肯定是错误的。该函数对所有三个点使用相同 texcoordFrag,只是深度不同,因此所有三个点都位于一条线上。 getNormal 中的叉积对于任何像素应该只产生 0 - 你在这里看到的只是数值不稳定性,通过将结果向量归一化为单位长度来放大它。