如何克服屏幕外和屏幕上帧缓冲区渲染之间的差异?
How to overcome differences between off-screen and on-screen framebuffer rendering?
我正在尝试使用屏幕外帧缓冲区来复制一个场景,该场景可以很好地渲染到默认帧缓冲区。渲染上好像有差异,我没法整理。
对于上下文,我正在使用大气着色器可视化地球。我正在使用 QT QOpenGLWidget,但主要是原始 GL 调用,因为我不喜欢 QT 的抽象。我需要将这个场景渲染到屏幕外的帧缓冲区,因为我想在我的可视化中实现一些 post 处理效果,为此我需要能够将场景采样为纹理。我已经到了成功创建帧缓冲区并将其颜色纹理渲染到屏幕上四边形的地步。
我的理解是,与默认情况相比,alpha 混合在渲染到屏幕外帧缓冲区时表现不同。我无法在网上找到任何资源,这些资源表明无需进行重大重构即可产生相同结果的方法。我所见过的方法包括按照从后到前的顺序手动渲染对象,或者将 alpha 值烘焙到发送到帧缓冲区的颜色。我尝试了一种经常被推荐的替代方法,即使用 glBlendFuncSeparate 来更手动地控制事物:
glEnable(GL_BLEND);
glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
但这并没有导致我的结果有任何明显的改善(我也不期望它,因为这里的数学无法解决我看到的混合问题)。
废话太多了,开始写一些实际的代码吧。我的代码库非常庞大,所以遗憾的是我无法共享所有代码,因为有许多专有绘图例程,但我可以从如何生成帧缓冲区开始:
// Create the framebuffer object
glGenFramebuffers(1, &m_fbo);
// Bind the framebuffer to the current context
glBindFramebuffer(GL_FRAMEBUFFER, m_fbo);
// generate texture to attach as a color attachment to the current frame buffer
m_texColorUnit = 4;
// Set to width and height of window, and leave data uninitialized
glGenTextures(1, &m_texColorBuffer);
glActiveTexture(GL_TEXTURE0 + m_texColorUnit);
glBindTexture(GL_TEXTURE_2D, m_texColorBuffer);
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGB8_OES,
m_navigation->renderContext()->getWidth(),
m_navigation->renderContext()->getHeight(),
0,
GL_RGB8_OES,
GL_UNSIGNED_BYTE,
NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// attach texture to currently bound framebuffer object
glFramebufferTexture2D(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D,
m_texColorBuffer,
0);
glBindTexture(GL_TEXTURE_2D, 0); //unbind the texture
glActiveTexture(GL_TEXTURE0); // Reset active texture to default
// Create renderBuffer object for depth and stencil checking
glGenRenderbuffers(1, &m_rbo);
glBindRenderbuffer(GL_RENDERBUFFER, m_rbo); // bind rbo
glRenderbufferStorage(GL_RENDERBUFFER,
GL_DEPTH24_STENCIL8_OES,
m_navigation->renderContext()->getWidth(),
m_navigation->renderContext()->getHeight()
); // allocate memory
// Attach rbo to the depth and stencil attachment of the fbo
glFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_DEPTH_STENCIL_OES,
GL_RENDERBUFFER,
m_rbo);
以及大气着色器:
// vert
#ifndef GL_ES
precision mediump int;
precision highp float;
#endif
attribute vec3 posAttr;
uniform highp mat4 matrix;
uniform highp mat4 modelMatrix;
uniform vec3 v3CameraPos; // The camera's current position
uniform vec3 v3LightPos; // The direction vector to the light source
uniform vec3 v3InvWavelength; // 1 / pow(wavelength, 4) for the red, green, and blue channels
uniform float fCameraHeight; // The camera's current height
uniform float fCameraHeight2; // fCameraHeight^2
uniform float fOuterRadius; // The outer (atmosphere) radius
uniform float fOuterRadius2; // fOuterRadius^2
uniform float fInnerRadius; // The inner (planetary) radius
uniform float fInnerRadius2; // fInnerRadius^2
uniform float fKrESun; // Kr * ESun
uniform float fKmESun; // Km * ESun
uniform float fKr4PI; // Kr * 4 * PI
uniform float fKm4PI; // Km * 4 * PI
uniform float fScale; // 1 / (fOuterRadius - fInnerRadius)
uniform float fScaleDepth; // The scale depth (i.e. the altitude at which the atmosphere's average density is found)
uniform float fScaleOverScaleDepth; // fScale / fScaleDepth
const int nSamples = 5;
const float fSamples = 5.0;
varying vec3 col;
varying vec3 colatten;
varying vec3 v3Direction;
varying vec3 vertexWorld;
float scale(float fCos)
{
float x = 1.0 - fCos;
return fScaleDepth * exp(-0.00287 + x*(0.459 + x*(3.83 + x*(-6.80 + x*5.25))));
}
void main(void)
{
// Get the ray from the camera to the vertex and its length (which is the far point of the ray passing through the atmosphere)
vec3 v3Pos = posAttr;
vec3 vertexWorld = posAttr;
vec3 v3Ray = v3Pos - v3CameraPos;
float fFar = length(v3Ray);
v3Ray /= fFar;
// Calculate the closest intersection of the ray with the outer atmosphere (which is the near point of the ray passing through the atmosphere)
float B = 2.0 * dot(v3CameraPos, v3Ray);
float C = fCameraHeight2 - fOuterRadius2;
float fDet = max(0.0, B*B - 4.0 * C);
float fNear = 0.5 * (-B - sqrt(fDet));
// Calculate the ray's starting position, then calculate its scattering offset
vec3 v3Start = v3CameraPos + v3Ray*fNear;
fFar -= fNear;
float fStartAngle = dot(v3Ray, v3Start) / fOuterRadius;
float fStartDepth = exp(-1.0 / fScaleDepth);
float fStartOffset = fStartDepth*scale(fStartAngle);
// Initialize the scattering loop variables
float fSampleLength = fFar / fSamples;
float fScaledLength = fSampleLength * fScale;
vec3 v3SampleRay = v3Ray * fSampleLength;
vec3 v3SamplePoint = v3Start + v3SampleRay * 0.5;
// Now loop through the sample rays
vec3 v3FrontColor = vec3(0.0, 0.0, 0.0);
for(int i=0; i<nSamples; i++)
{
float fHeight = length(v3SamplePoint);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fLightAngle = dot(v3LightPos, v3SamplePoint) / fHeight;
float fCameraAngle = dot(v3Ray, v3SamplePoint) / fHeight;
float fScatter = (fStartOffset + fDepth*(scale(fLightAngle) - scale(fCameraAngle)));
vec3 v3Attenuate = exp(-fScatter * (v3InvWavelength * fKr4PI + fKm4PI));
v3FrontColor += v3Attenuate * (fDepth * fScaledLength);
v3SamplePoint += v3SampleRay;
}
// Finally, scale the Mie and Rayleigh colors and set up the varying variables for the pixel shader
colatten = v3FrontColor * fKmESun;
col = v3FrontColor * (v3InvWavelength*fKrESun);
v3Direction = v3CameraPos - v3Pos;
gl_Position = matrix * modelMatrix * vec4(posAttr,1);
}
// frag
#ifdef GL_ES
precision highp float;
precision mediump int;
#endif
varying vec3 col;
varying vec3 colatten;
varying vec3 v3Direction;
varying vec3 vertexWorld;
uniform vec3 v3LightPos;
uniform float g;
uniform float g2;
uniform float fExposure;
void main (void)
{
//float fCos = dot(normalize(lPos), normalize(v3Direction));
float fCos = dot(v3LightPos, v3Direction) / length(v3Direction);
float fRayleighPhase = 0.75 * (1.0 + fCos*fCos);
float fMiePhase = 1.5 * ((1.0 - g2) / (2.0 + g2)) * (1.0 + fCos*fCos) / pow(1.0 + g2 - 2.0*g*fCos, 1.5);
//vec3 result = clamp(col + fMiePhase * colatten, vec3(0,0,0), vec3(1,1,1));
//gl_FragColor = vec4(result, result.b);
gl_FragColor.rgb = 1.0 - exp(-fExposure * (fRayleighPhase * col + fMiePhase * colatten));
//gl_FragColor.a = 1.0;
gl_FragColor.a = gl_FragColor.b;
}
正如我所说,我的成绩并不出色。 第一个图像是我渲染到离屏帧缓冲区时得到的,第二个图像是我直接渲染到屏幕时得到的。关于如何解决这两个问题有什么想法吗?
深度渲染缓冲区未附加到帧缓冲区。 glFramebufferRenderbuffer
的第二个参数必须是附着点。
GL_DEPTH_STENCIL_OES
不是附着点的有效值。所以
glFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_DEPTH_STENCIL_OES,
GL_RENDERBUFFER,
m_rbo);
会导致GL_INVALID_ENUM
错误,可以通过glGetError
.
得到
指定深度和模板缓冲区的枚举器常量是GL_DEPTH_STENCIL_ATTACHMENT
:
glFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_DEPTH_STENCIL_ATTACHMENT,
GL_RENDERBUFFER,
m_rbo);
请注意,depth/stencil 缓冲区未附加到帧缓冲区,但帧缓冲区仍然完整,没有深度和模板缓冲区。
或者,您可以使用仅深度缓冲区附件。创建深度渲染缓冲区 (GL_DEPTH_COMPONENT
) 添加使用附件类型 GL_DEPTH_ATTACHMENT
。
问题的产生是因为附加到帧缓冲区颜色平面的纹理没有 Alpha 通道。格式 GL_RGB8_OES
提供 3 个颜色通道 (RGB) 但没有 alpha 通道。
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGB8_OES,
m_navigation->renderContext()->getWidth(),
m_navigation->renderContext()->getHeight(),
0,
GL_RGB8_OES,
GL_UNSIGNED_BYTE,
NULL);
您必须使用格式和内部格式 GL_RGBA8_OES
而不是 GL_RGB8_OES
,后者包含在 OES_required_internalformat
, too. See also __gles2_gl2ext_h_
中:
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGBA8_OES,
m_navigation->renderContext()->getWidth(),
m_navigation->renderContext()->getHeight(),
0,
GL_RGBA8_OES,
GL_UNSIGNED_BYTE,
NULL);
我正在尝试使用屏幕外帧缓冲区来复制一个场景,该场景可以很好地渲染到默认帧缓冲区。渲染上好像有差异,我没法整理。
对于上下文,我正在使用大气着色器可视化地球。我正在使用 QT QOpenGLWidget,但主要是原始 GL 调用,因为我不喜欢 QT 的抽象。我需要将这个场景渲染到屏幕外的帧缓冲区,因为我想在我的可视化中实现一些 post 处理效果,为此我需要能够将场景采样为纹理。我已经到了成功创建帧缓冲区并将其颜色纹理渲染到屏幕上四边形的地步。
我的理解是,与默认情况相比,alpha 混合在渲染到屏幕外帧缓冲区时表现不同。我无法在网上找到任何资源,这些资源表明无需进行重大重构即可产生相同结果的方法。我所见过的方法包括按照从后到前的顺序手动渲染对象,或者将 alpha 值烘焙到发送到帧缓冲区的颜色。我尝试了一种经常被推荐的替代方法,即使用 glBlendFuncSeparate 来更手动地控制事物:
glEnable(GL_BLEND);
glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
但这并没有导致我的结果有任何明显的改善(我也不期望它,因为这里的数学无法解决我看到的混合问题)。
废话太多了,开始写一些实际的代码吧。我的代码库非常庞大,所以遗憾的是我无法共享所有代码,因为有许多专有绘图例程,但我可以从如何生成帧缓冲区开始:
// Create the framebuffer object
glGenFramebuffers(1, &m_fbo);
// Bind the framebuffer to the current context
glBindFramebuffer(GL_FRAMEBUFFER, m_fbo);
// generate texture to attach as a color attachment to the current frame buffer
m_texColorUnit = 4;
// Set to width and height of window, and leave data uninitialized
glGenTextures(1, &m_texColorBuffer);
glActiveTexture(GL_TEXTURE0 + m_texColorUnit);
glBindTexture(GL_TEXTURE_2D, m_texColorBuffer);
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGB8_OES,
m_navigation->renderContext()->getWidth(),
m_navigation->renderContext()->getHeight(),
0,
GL_RGB8_OES,
GL_UNSIGNED_BYTE,
NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// attach texture to currently bound framebuffer object
glFramebufferTexture2D(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D,
m_texColorBuffer,
0);
glBindTexture(GL_TEXTURE_2D, 0); //unbind the texture
glActiveTexture(GL_TEXTURE0); // Reset active texture to default
// Create renderBuffer object for depth and stencil checking
glGenRenderbuffers(1, &m_rbo);
glBindRenderbuffer(GL_RENDERBUFFER, m_rbo); // bind rbo
glRenderbufferStorage(GL_RENDERBUFFER,
GL_DEPTH24_STENCIL8_OES,
m_navigation->renderContext()->getWidth(),
m_navigation->renderContext()->getHeight()
); // allocate memory
// Attach rbo to the depth and stencil attachment of the fbo
glFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_DEPTH_STENCIL_OES,
GL_RENDERBUFFER,
m_rbo);
以及大气着色器:
// vert
#ifndef GL_ES
precision mediump int;
precision highp float;
#endif
attribute vec3 posAttr;
uniform highp mat4 matrix;
uniform highp mat4 modelMatrix;
uniform vec3 v3CameraPos; // The camera's current position
uniform vec3 v3LightPos; // The direction vector to the light source
uniform vec3 v3InvWavelength; // 1 / pow(wavelength, 4) for the red, green, and blue channels
uniform float fCameraHeight; // The camera's current height
uniform float fCameraHeight2; // fCameraHeight^2
uniform float fOuterRadius; // The outer (atmosphere) radius
uniform float fOuterRadius2; // fOuterRadius^2
uniform float fInnerRadius; // The inner (planetary) radius
uniform float fInnerRadius2; // fInnerRadius^2
uniform float fKrESun; // Kr * ESun
uniform float fKmESun; // Km * ESun
uniform float fKr4PI; // Kr * 4 * PI
uniform float fKm4PI; // Km * 4 * PI
uniform float fScale; // 1 / (fOuterRadius - fInnerRadius)
uniform float fScaleDepth; // The scale depth (i.e. the altitude at which the atmosphere's average density is found)
uniform float fScaleOverScaleDepth; // fScale / fScaleDepth
const int nSamples = 5;
const float fSamples = 5.0;
varying vec3 col;
varying vec3 colatten;
varying vec3 v3Direction;
varying vec3 vertexWorld;
float scale(float fCos)
{
float x = 1.0 - fCos;
return fScaleDepth * exp(-0.00287 + x*(0.459 + x*(3.83 + x*(-6.80 + x*5.25))));
}
void main(void)
{
// Get the ray from the camera to the vertex and its length (which is the far point of the ray passing through the atmosphere)
vec3 v3Pos = posAttr;
vec3 vertexWorld = posAttr;
vec3 v3Ray = v3Pos - v3CameraPos;
float fFar = length(v3Ray);
v3Ray /= fFar;
// Calculate the closest intersection of the ray with the outer atmosphere (which is the near point of the ray passing through the atmosphere)
float B = 2.0 * dot(v3CameraPos, v3Ray);
float C = fCameraHeight2 - fOuterRadius2;
float fDet = max(0.0, B*B - 4.0 * C);
float fNear = 0.5 * (-B - sqrt(fDet));
// Calculate the ray's starting position, then calculate its scattering offset
vec3 v3Start = v3CameraPos + v3Ray*fNear;
fFar -= fNear;
float fStartAngle = dot(v3Ray, v3Start) / fOuterRadius;
float fStartDepth = exp(-1.0 / fScaleDepth);
float fStartOffset = fStartDepth*scale(fStartAngle);
// Initialize the scattering loop variables
float fSampleLength = fFar / fSamples;
float fScaledLength = fSampleLength * fScale;
vec3 v3SampleRay = v3Ray * fSampleLength;
vec3 v3SamplePoint = v3Start + v3SampleRay * 0.5;
// Now loop through the sample rays
vec3 v3FrontColor = vec3(0.0, 0.0, 0.0);
for(int i=0; i<nSamples; i++)
{
float fHeight = length(v3SamplePoint);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fLightAngle = dot(v3LightPos, v3SamplePoint) / fHeight;
float fCameraAngle = dot(v3Ray, v3SamplePoint) / fHeight;
float fScatter = (fStartOffset + fDepth*(scale(fLightAngle) - scale(fCameraAngle)));
vec3 v3Attenuate = exp(-fScatter * (v3InvWavelength * fKr4PI + fKm4PI));
v3FrontColor += v3Attenuate * (fDepth * fScaledLength);
v3SamplePoint += v3SampleRay;
}
// Finally, scale the Mie and Rayleigh colors and set up the varying variables for the pixel shader
colatten = v3FrontColor * fKmESun;
col = v3FrontColor * (v3InvWavelength*fKrESun);
v3Direction = v3CameraPos - v3Pos;
gl_Position = matrix * modelMatrix * vec4(posAttr,1);
}
// frag
#ifdef GL_ES
precision highp float;
precision mediump int;
#endif
varying vec3 col;
varying vec3 colatten;
varying vec3 v3Direction;
varying vec3 vertexWorld;
uniform vec3 v3LightPos;
uniform float g;
uniform float g2;
uniform float fExposure;
void main (void)
{
//float fCos = dot(normalize(lPos), normalize(v3Direction));
float fCos = dot(v3LightPos, v3Direction) / length(v3Direction);
float fRayleighPhase = 0.75 * (1.0 + fCos*fCos);
float fMiePhase = 1.5 * ((1.0 - g2) / (2.0 + g2)) * (1.0 + fCos*fCos) / pow(1.0 + g2 - 2.0*g*fCos, 1.5);
//vec3 result = clamp(col + fMiePhase * colatten, vec3(0,0,0), vec3(1,1,1));
//gl_FragColor = vec4(result, result.b);
gl_FragColor.rgb = 1.0 - exp(-fExposure * (fRayleighPhase * col + fMiePhase * colatten));
//gl_FragColor.a = 1.0;
gl_FragColor.a = gl_FragColor.b;
}
正如我所说,我的成绩并不出色。
深度渲染缓冲区未附加到帧缓冲区。 glFramebufferRenderbuffer
的第二个参数必须是附着点。
GL_DEPTH_STENCIL_OES
不是附着点的有效值。所以
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_OES, GL_RENDERBUFFER, m_rbo);
会导致GL_INVALID_ENUM
错误,可以通过glGetError
.
指定深度和模板缓冲区的枚举器常量是GL_DEPTH_STENCIL_ATTACHMENT
:
glFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_DEPTH_STENCIL_ATTACHMENT,
GL_RENDERBUFFER,
m_rbo);
请注意,depth/stencil 缓冲区未附加到帧缓冲区,但帧缓冲区仍然完整,没有深度和模板缓冲区。
或者,您可以使用仅深度缓冲区附件。创建深度渲染缓冲区 (GL_DEPTH_COMPONENT
) 添加使用附件类型 GL_DEPTH_ATTACHMENT
。
问题的产生是因为附加到帧缓冲区颜色平面的纹理没有 Alpha 通道。格式 GL_RGB8_OES
提供 3 个颜色通道 (RGB) 但没有 alpha 通道。
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8_OES, m_navigation->renderContext()->getWidth(), m_navigation->renderContext()->getHeight(), 0, GL_RGB8_OES, GL_UNSIGNED_BYTE, NULL);
您必须使用格式和内部格式 GL_RGBA8_OES
而不是 GL_RGB8_OES
,后者包含在 OES_required_internalformat
, too. See also __gles2_gl2ext_h_
中:
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGBA8_OES,
m_navigation->renderContext()->getWidth(),
m_navigation->renderContext()->getHeight(),
0,
GL_RGBA8_OES,
GL_UNSIGNED_BYTE,
NULL);