OSX + 离屏渲染 + CoreGL + 帧缓冲区 + 着色器 = 头疼?

OSX + offscreen rendering + CoreGL + framebuffers + shaders = headache?

上下文与此完全相同:Apple OSX OpenGL offline rendering with CoreGL

我写了一个非常小的C++/OpenGL程序(我们实际上无法想象更简单),它应该在着色器的帮助下在屏幕外绘制一个白色三角形......但它没有。我基本上使用一对 framebuffer/renderbuffer 链接到 CoreGL 上下文中分配的缓冲区,灵感来自这里:http://renderingpipeline.com/2012/05/windowless-opengl-on-macos-x/。然后,将缓冲区的内容写入位图图片。

结果很奇怪。我使用 glClearColor 命令在图片上设置了正确的背景颜色。这让我觉得 CoreGL 上下文已成功初始化...但不幸的是我什么也没得到(没有三角形)。

不存在着色器compilation/link问题。我的帧缓冲区/VBO/VAO/顶点属性绑定没有任何问题......但它就像我的着色器不存在于 CoreGL 上下文。

这是定义和编译我的着色器的函数:

int loadShader()
{
  const GLchar* vertexSource = 
    "#version 150\n"
    "in vec3 position;"
    "void main(void)"
    "{ gl_Position = vec4(position, 1.); }";

  const GLchar* fragmentSource =
    "#version 150\n"
    "out vec4 fragColor;"
    "void main(void)"
    "{ fragColor = vec4(1.); }";

  vertexID = glCreateShader(GL_VERTEX_SHADER);
  fragmentID = glCreateShader(GL_FRAGMENT_SHADER);

  glShaderSource(vertexID, 1, &vertexSource, NULL);
  glShaderSource(fragmentID, 1, &fragmentSource, NULL);

  glCompileShader(vertexID);
  glCompileShader(fragmentID);

  if (!checkShaderCompilation(vertexID) || !checkShaderCompilation(fragmentID))
    throw std::runtime_error("bad shader compilation");

  programID = glCreateProgram();
  if (programID == 0)
    throw std::runtime_error("unable to create shader program");

  glAttachShader(programID, vertexID);
  glAttachShader(programID, fragmentID);
  glLinkProgram(programID);

  GLint programState = 0;
  glGetProgramiv(programID, GL_LINK_STATUS, &programState);
  if (programState != GL_TRUE)
    throw std::runtime_error("bad shader link");

  glUseProgram(programID);

  return 0;
}

此处,渲染函数:

GLfloat triangleVertices[] =
  {
      0.0f , 0.5f, 0.0f,    
      0.5f, -0.5f, 0.0f,
     -0.5f, -0.5f, 0.0f
  };

void displayTriangle()
{
  glClearColor(0.3, 0.1, 0.1, 1.);
  glPointSize(40.f);

  glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

  loadShader();

  GLint fragmentGPUProcessing, vertexGPUProcessing;
  CGLGetParameter(CGLGetCurrentContext(), kCGLCPGPUFragmentProcessing, &fragmentGPUProcessing);
  CGLGetParameter(CGLGetCurrentContext(), kCGLCPGPUVertexProcessing, &vertexGPUProcessing);

  if (!fragmentGPUProcessing || !vertexGPUProcessing)
    throw std::runtime_error("vertex and fragment shaders seem not correctly bound!");

  GLuint vaoID, vboID;
  glGenVertexArrays(1, &vaoID);
  glBindVertexArray(vaoID);

  glGenBuffers(1, &vboID);
  glBindBuffer(GL_ARRAY_BUFFER, vboID);
  glBufferData(GL_ARRAY_BUFFER, sizeof(triangleVertices), triangleVertices, GL_STATIC_DRAW);

  positionID = glGetAttribLocation(programID, "position");
  if (positionID < 0)
    throw std::runtime_error("unable to find 'position' name in vertex shader!");

  glVertexAttribPointer(positionID, 3, GL_FLOAT, GL_TRUE, 0, NULL);
  glEnableVertexAttribArray(positionID);

  glDrawArrays(GL_TRIANGLES, 0, 3);

  glFinish();
}

和 main(),定义 CoreGL 上下文和帧缓冲区对象: (注意 OpenGL 3.2 核心配置文件 已设置)

int main(int argc, char** argv)
{
  CGLContextObj context;

  CGLPixelFormatAttribute attributes[4] = { kCGLPFAOpenGLProfile, (CGLPixelFormatAttribute)kCGLOGLPVersion_3_2_Core,
                        kCGLPFAAccelerated,  // no software rendering
                        (CGLPixelFormatAttribute)0
  };

  CGLPixelFormatObj pix;
  GLint num;
  CGLError errorCode = CGLChoosePixelFormat(attributes, &pix, &num);
  if (errorCode != kCGLNoError)
    throw std::runtime_error("CGLChoosePixelFormat failure");

  errorCode = CGLCreateContext(pix, NULL, &context);
  if (errorCode != kCGLNoError)
    throw std::runtime_error("CGLCreateContext failure");

  CGLDestroyPixelFormat(pix);

  errorCode = CGLSetCurrentContext(context);
  if (errorCode != kCGLNoError)
    throw std::runtime_error("CGLSetCurrentContext failure");

  GLuint framebufferID, renderbufferID;
  glGenFramebuffers(1, &framebufferID);
  glBindFramebuffer(GL_FRAMEBUFFER, framebufferID);

  glGenRenderbuffers(1, &renderbufferID);
  glBindRenderbuffer(GL_RENDERBUFFER, renderbufferID);
  glRenderbufferStorage(GL_RENDERBUFFER, GL_RGB8, WIDTH, HEIGHT);

  glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, renderbufferID);

  if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
    throw std::runtime_error("framebuffer is not complete!");

  displayTriangle();

  GLubyte* buffer = new GLubyte[WIDTH * HEIGHT * 3 * sizeof(GLubyte)];
  glReadPixels(0, 0, WIDTH, HEIGHT, GL_RGB, GL_UNSIGNED_BYTE, buffer);

  writeBitmap("path/to/the/bitmap/file", buffer, WIDTH, HEIGHT);

  glDeleteRenderbuffers(1, &renderbufferID);
  glDeleteFramebuffers(1, &framebufferID);

  CGLSetCurrentContext(NULL);
  CGLDestroyContext(context);

  delete[] buffer;

  return 0;
}

怎么了?

我终于设法解决了这个问题。我实际上忘了用这个命令设置视口:

glViewport(0, 0, WIDTH, HEIGHT);

现在,一切看起来都很好,我的着色器也被正确使用了。

此处简化为最基本形式的代码示例实际上是跨平台离屏渲染项目的一部分。问题是我已经在 Linux 之前在 OSMesa 库的帮助下让它工作了。这意味着 OSMesa 不需要视口初始化来执行与 CoreGL 相反的正确渲染。很高兴知道!