如何像在 3dsMax 中一样实现相机平移?
How to implement camera pan like in 3dsMax?
实现 3ds max 中使用的相机平移效果需要哪些数学运算?
在 3ds max 中,光标和网格之间的距离在整个移动过程中始终保持不变 (mouse_down+mouse_motion+mouse_up)。
我的天真和失败的尝试一直试图通过使用 dt(帧时间)乘以一些硬编码常量来在 XY 平面上移动相机,结果非常丑陋和不直观。
我目前得到的代码是:
def glut_mouse(self, button, state, x, y):
self.last_mouse_pos = vec2(x, y)
self.mouse_down_pos = vec2(x, y)
def glut_motion(self, x, y):
pos = vec2(x, y)
move = self.last_mouse_pos - pos
self.last_mouse_pos = pos
self.pan(move)
def pan(self, delta):
forward = vec3.normalize(self.target - self.eye)
right = vec3.normalize(vec3.cross(forward, self.up))
up = vec3.normalize(vec3.cross(forward, right))
if delta.x:
right = right*delta.x
if delta.y:
up = up*delta.y
self.eye+=(right+up)
self.target+=(right+up)
您能解释一下 3dsmax 中相机平移的数学原理吗?
编辑:
@Rabbid76 最初已经回答了我的问题,但仍有一种情况是他的算法无法正常工作。它无法正确处理平移从空 space 开始的情况(换句话说,当深度缓冲区值采用远值 =1.0 时)。在 3dsmax 中,无论深度缓冲区的值是多少,在所有情况下都能正确处理相机平移。
您的解决方案适用于正交投影,但不适用于透视投影。请注意,在透视投影中,投影矩阵描述了从针孔相机看到的世界中的 3D 点到视口的 2D 点的映射。
眼睛和目标位置的位移量取决于在视口上拖动的对象的深度。
如果对象靠近眼睛位置,则视口上的平移会导致眼睛和目标位置发生小的位移:
如果物体到眼睛的距离很远,则视口上的平移会导致眼睛和目标位置发生较大位移:
要想做你想做的事,你必须知道视口的大小、视图矩阵和投影矩阵:
self.width # width of the viewport
self.height # height of the viewport
self.view # view matrix
self.proj # prjection matrix
更改pane
方法,使其接收新旧鼠标位置。注意 y 轴必须翻转 (self.height-y
)。使用格式类型 GL_DEPTH_COMPONENT
:
通过 glReadPixels
获取命中点(对象)的深度
def glut_mouse(self, button, state, x, y):
self.drag = state == GLUT_DOWN
self.last_mouse_pos = glm.vec2(x, self.height-y)
self.mouse_down_pos = glm.vec2(x, self.height-y)
if self.drag:
depth_buffer = glReadPixels(x, self.height-y, 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT)
self.last_depth = depth_buffer[0][0]
print(self.last_depth)
def glut_motion(self, x, y):
if not self.drag:
return
old_pos = self.last_mouse_pos
new_pos = glm.vec2(x, self.__vp_size[1]-y)
self.last_mouse_pos = new_pos
self.pan(self.last_depth, old_pos, new_pos)
def pan(self, depth, old_pos, new_pos):
# .....
鼠标位置给出了window中的一个位置space,其中z坐标是分别击中对象的深度:
wnd_from = glm.vec3(old_pos[0], old_pos[1], float(depth))
wnd_to = glm.vec3(new_pos[0], new_pos[1], float(depth))
这个位置可以通过glm.unProject
转换到世界space:
vp_rect = glm.vec4(0, 0, self.width, self.height)
world_from = glm.unProject(wnd_from, self.view, self.proj, vp_rect)
world_to = glm.unProject(wnd_to, self.view, self.proj, vp_rect)
世界space眼睛和目标位置的位移是旧世界位置到新世界位置的距离:
world_vec = world_to - world_from
最后计算出新的眼睛和目标位置并更新视图矩阵:
self.eye = self.eye - world_vec
self.target = self.target - world_vec
self.view = glm.lookAt(self.eye, self.target, self.up)
另见 Python OpenGL 4.6, GLM navigation
我用下面的例子测试了代码:
预览:
完整 python 代码:
import os
import math
import numpy as np
import glm
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GL.shaders import *
from OpenGL.arrays import *
from ctypes import c_void_p
class MyWindow:
__caption = 'OpenGL Window'
__vp_size = [800, 600]
__vp_valid = False
__glut_wnd = None
__glsl_vert = """
#version 450 core
layout (location = 0) in vec3 a_pos;
layout (location = 1) in vec3 a_nv;
layout (location = 2) in vec4 a_col;
out vec3 v_pos;
out vec3 v_nv;
out vec4 v_color;
uniform mat4 u_proj;
uniform mat4 u_view;
uniform mat4 u_model;
void main()
{
mat4 model_view = u_view * u_model;
mat3 normal = transpose(inverse(mat3(model_view)));
vec4 view_pos = model_view * vec4(a_pos.xyz, 1.0);
v_pos = view_pos.xyz;
v_nv = normal * a_nv;
v_color = a_col;
gl_Position = u_proj * view_pos;
}
"""
__glsl_frag = """
#version 450 core
out vec4 frag_color;
in vec3 v_pos;
in vec3 v_nv;
in vec4 v_color;
void main()
{
vec3 N = normalize(v_nv);
vec3 V = -normalize(v_pos);
float ka = 0.1;
float kd = max(0.0, dot(N, V)) * 0.9;
frag_color = vec4(v_color.rgb * (ka + kd), v_color.a);
}
"""
__program = None
__vao = None
__vbo = None
__no_vert = 0
def __init__(self, w, h):
self.__vp_size = [w, h]
glutInit()
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)
glutInitWindowSize(self.__vp_size[0], self.__vp_size[1])
__glut_wnd = glutCreateWindow(self.__caption)
self.__program = compileProgram(
compileShader( self.__glsl_vert, GL_VERTEX_SHADER ),
compileShader( self.__glsl_frag, GL_FRAGMENT_SHADER ),
)
self.___attrib = { a : glGetAttribLocation (self.__program, a) for a in ['a_pos', 'a_nv', 'a_col'] }
print(self.___attrib)
self.___uniform = { u : glGetUniformLocation (self.__program, u) for u in ['u_model', 'u_view', 'u_proj'] }
print(self.___uniform)
v = [ -1,-1,1, 1,-1,1, 1,1,1, -1,1,1, -1,-1,-1, 1,-1,-1, 1,1,-1, -1,1,-1 ]
c = [ 1.0, 0.0, 0.0, 1.0, 0.5, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0 ]
n = [ 0,0,1, 1,0,0, 0,0,-1, -1,0,0, 0,1,0, 0,-1,0 ]
e = [ 0,1,2,3, 1,5,6,2, 5,4,7,6, 4,0,3,7, 3,2,6,7, 1,0,4,5 ]
attr_array = []
for si in range(6):
for vi in range(6):
ci = [0, 1, 2, 0, 2, 3][vi]
i = si*4+ci
attr_array.extend( [ v[e[i]*3], v[e[i]*3+1], v[e[i]*3+2] ] )
attr_array.extend( [ n[si*3], n[si*3+1], n[si*3+2] ] )
attr_array.extend( [ c[si*3], c[si*3+1], c[si*3+2], 1 ] );
self.__no_vert = len(attr_array) // 10
vertex_attributes = np.array(attr_array, dtype=np.float32)
self.__vbo = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.__vbo)
glBufferData(GL_ARRAY_BUFFER, vertex_attributes, GL_STATIC_DRAW)
self.__vao = glGenVertexArrays(1)
glBindVertexArray(self.__vao)
glVertexAttribPointer(0, 3, GL_FLOAT, False, 10*vertex_attributes.itemsize, None)
glEnableVertexAttribArray(0)
glVertexAttribPointer(1, 3, GL_FLOAT, False, 10*vertex_attributes.itemsize, c_void_p(3*vertex_attributes.itemsize))
glEnableVertexAttribArray(1)
glVertexAttribPointer(2, 4, GL_FLOAT, False, 10*vertex_attributes.itemsize, c_void_p(6*vertex_attributes.itemsize))
glEnableVertexAttribArray(2)
glEnable(GL_DEPTH_TEST)
glUseProgram(self.__program)
glutReshapeFunc(self.__reshape)
glutDisplayFunc(self.__mainloop)
glutMouseFunc(self.glut_mouse)
glutMotionFunc(self.glut_motion)
self.drag = False
self.eye = glm.vec3(-3, -7, 6)
self.target = glm.vec3(0, 0, 0)
self.up = glm.vec3(0, 0, 1)
self.near = 0.1
self.far = 100.0
aspect = self.__vp_size[0]/self.__vp_size[1]
self.proj = glm.perspective(glm.radians(90.0), aspect, self.near, self.far)
self.view = glm.lookAt(self.eye, self.target, self.up)
self.model = glm.mat4(1)
def run(self):
self.__starttime = 0
self.__starttime = self.elapsed_ms()
glutMainLoop()
def elapsed_ms(self):
return glutGet(GLUT_ELAPSED_TIME) - self.__starttime
def __reshape(self, w, h):
self.__vp_valid = False
def __mainloop(self):
if not self.__vp_valid:
self.width = glutGet(GLUT_WINDOW_WIDTH)
self.height = glutGet(GLUT_WINDOW_HEIGHT)
self.__vp_size = [self.width, self.height]
self.__vp_valid = True
aspect = self.width / self.height
self.proj = glm.perspective(glm.radians(90.0), aspect, self.near, self.far)
glUniformMatrix4fv(self.___uniform['u_proj'], 1, GL_FALSE, glm.value_ptr(self.proj) )
glUniformMatrix4fv(self.___uniform['u_view'], 1, GL_FALSE, glm.value_ptr(self.view) )
glUniformMatrix4fv(self.___uniform['u_model'], 1, GL_FALSE, glm.value_ptr(self.model) )
glClearColor(0.2, 0.3, 0.3, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glDrawArrays(GL_TRIANGLES, 0, self.__no_vert)
glutSwapBuffers()
glutPostRedisplay()
def glut_mouse(self, button, state, x, y):
self.drag = state == GLUT_DOWN
self.last_mouse_pos = glm.vec2(x, self.height-y)
self.mouse_down_pos = glm.vec2(x, self.height-y)
if self.drag:
depth_buffer = glReadPixels(x, self.height-y, 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT)
self.last_depth = depth_buffer[0][0]
print(self.last_depth)
def glut_motion(self, x, y):
if not self.drag:
return
old_pos = self.last_mouse_pos
new_pos = glm.vec2(x, self.__vp_size[1]-y)
self.last_mouse_pos = new_pos
self.pan(self.last_depth, old_pos, new_pos)
def pan(self, depth, old_pos, new_pos):
wnd_from = glm.vec3(old_pos[0], old_pos[1], float(depth))
wnd_to = glm.vec3(new_pos[0], new_pos[1], float(depth))
vp_rect = glm.vec4(0, 0, self.width, self.height)
world_from = glm.unProject(wnd_from, self.view, self.proj, vp_rect)
world_to = glm.unProject(wnd_to, self.view, self.proj, vp_rect)
world_vec = world_to - world_from
self.eye = self.eye - world_vec
self.target = self.target - world_vec
self.view = glm.lookAt(self.eye, self.target, self.up)
window = MyWindow(800, 600)
window.run()
[...] but there's still one case where his algorithm won't work properly. It doesn't handle properly the case where you panning is started from empty space [...]
在解决方案中,对象的深度取自深度缓冲区,在发生鼠标单击的那个位置。如果这是“空space”,一个没有绘制对象的位置,深度是深度范围的最大值(常见的1)。这会导致快速疼痛。
解决方案或变通方法是使用场景中代表性位置的深度。例如世界的起源:
pt_drag = glm.vec3(0, 0, 0)
当然,这可能不会在每种情况下都产生正确的结果。如果场景的对象不在世界原点周围,这种方法就会失败。我建议计算场景 axis aligned bounding box 的中心。用这个点代表“深度”:
box_min = ... # glm.vec3
box_max = ... # glm.vec3
pt_drag = (box_min + box_max) / 2
点的深度可以通过视图和投影矩阵的变换以及最终的透视划分来计算:
o_clip = self.proj * self.view * glm.vec4(pt_drag, 1)
o_ndc = glm.vec3(o_clip) / o_clip.w
这可以应用于函数 glut_mouse
:
def glut_mouse(self, button, state, x, y):
self.drag = state == GLUT_DOWN
self.last_mouse_pos = glm.vec2(x, self.height-y)
self.mouse_down_pos = glm.vec2(x, self.height-y)
if self.drag:
depth_buffer = glReadPixels(x, self.height-y, 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT)
self.last_depth = depth_buffer[0][0]
if self.last_depth == 1:
pt_drag = glm.vec3(0, 0, 0)
o_clip = self.proj * self.view * glm.vec4(pt_drag, 1)
o_ndc = glm.vec3(o_clip) / o_clip.w
if o_ndc.z > -1 and o_ndc.z < 1:
self.last_depth = o_ndc.z * 0.5 + 0.5
预览:
良好感觉解决方案的关键是找到“正确”的深度。在透视投影中,鼠标移动以 1:1 运动影响对象,投影在视口上,拖动仅适用于明确定义的深度。不同深度的物体在投影到视口上时会产生不同比例的位移,这就是透视的“本质”。
要找到“正确”的深度,有不同的可能性,具体取决于您的需要:
- 从当前鼠标位置的深度缓冲区中读取深度:
depth_buffer = glReadPixels(x, self.height-y, 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT)
self.last_depth = depth_buffer[0][0]
- 获取深度缓冲区的最小和最大深度(远平面的值除外,1.0)并计算平均深度。当然在这种情况下必须调查整个深度缓冲区:
d_buf = glReadPixels(0, 0, self.width, self.height, GL_DEPTH_COMPONENT, GL_FLOAT)
d_vals = [float(d_buf[i][j]) for i in range(self.width) for j in range(self.height) if d_buf[i][j] != 1]
if len(d_vals) > 0:
self.last_depth = (min(d_vals) + max(d_vals)) / 2
- 使用世界原点:
pt_drag = glm.vec3(0, 0, 0)
o_clip = self.proj * self.view * glm.vec4(pt_drag, 1)
o_ndc = glm.vec3(o_clip) / o_clip.w
if o_ndc.z > -1 and o_ndc.z < 1:
self.last_depth = o_ndc.z * 0.5 + 0.5
正在计算场景边界框的中心。
实施光线投射,通过光线识别对象,光线从视点 a 开始,穿过光标(鼠标)位置。当没有物体被击中时,可以通过识别“最接近”射线的物体来改进该算法。
实现 3ds max 中使用的相机平移效果需要哪些数学运算?
在 3ds max 中,光标和网格之间的距离在整个移动过程中始终保持不变 (mouse_down+mouse_motion+mouse_up)。
我的天真和失败的尝试一直试图通过使用 dt(帧时间)乘以一些硬编码常量来在 XY 平面上移动相机,结果非常丑陋和不直观。
我目前得到的代码是:
def glut_mouse(self, button, state, x, y):
self.last_mouse_pos = vec2(x, y)
self.mouse_down_pos = vec2(x, y)
def glut_motion(self, x, y):
pos = vec2(x, y)
move = self.last_mouse_pos - pos
self.last_mouse_pos = pos
self.pan(move)
def pan(self, delta):
forward = vec3.normalize(self.target - self.eye)
right = vec3.normalize(vec3.cross(forward, self.up))
up = vec3.normalize(vec3.cross(forward, right))
if delta.x:
right = right*delta.x
if delta.y:
up = up*delta.y
self.eye+=(right+up)
self.target+=(right+up)
您能解释一下 3dsmax 中相机平移的数学原理吗?
编辑:
@Rabbid76 最初已经回答了我的问题,但仍有一种情况是他的算法无法正常工作。它无法正确处理平移从空 space 开始的情况(换句话说,当深度缓冲区值采用远值 =1.0 时)。在 3dsmax 中,无论深度缓冲区的值是多少,在所有情况下都能正确处理相机平移。
您的解决方案适用于正交投影,但不适用于透视投影。请注意,在透视投影中,投影矩阵描述了从针孔相机看到的世界中的 3D 点到视口的 2D 点的映射。
眼睛和目标位置的位移量取决于在视口上拖动的对象的深度。
如果对象靠近眼睛位置,则视口上的平移会导致眼睛和目标位置发生小的位移:
如果物体到眼睛的距离很远,则视口上的平移会导致眼睛和目标位置发生较大位移:
要想做你想做的事,你必须知道视口的大小、视图矩阵和投影矩阵:
self.width # width of the viewport
self.height # height of the viewport
self.view # view matrix
self.proj # prjection matrix
更改pane
方法,使其接收新旧鼠标位置。注意 y 轴必须翻转 (self.height-y
)。使用格式类型 GL_DEPTH_COMPONENT
:
glReadPixels
获取命中点(对象)的深度
def glut_mouse(self, button, state, x, y):
self.drag = state == GLUT_DOWN
self.last_mouse_pos = glm.vec2(x, self.height-y)
self.mouse_down_pos = glm.vec2(x, self.height-y)
if self.drag:
depth_buffer = glReadPixels(x, self.height-y, 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT)
self.last_depth = depth_buffer[0][0]
print(self.last_depth)
def glut_motion(self, x, y):
if not self.drag:
return
old_pos = self.last_mouse_pos
new_pos = glm.vec2(x, self.__vp_size[1]-y)
self.last_mouse_pos = new_pos
self.pan(self.last_depth, old_pos, new_pos)
def pan(self, depth, old_pos, new_pos):
# .....
鼠标位置给出了window中的一个位置space,其中z坐标是分别击中对象的深度:
wnd_from = glm.vec3(old_pos[0], old_pos[1], float(depth))
wnd_to = glm.vec3(new_pos[0], new_pos[1], float(depth))
这个位置可以通过glm.unProject
转换到世界space:
vp_rect = glm.vec4(0, 0, self.width, self.height)
world_from = glm.unProject(wnd_from, self.view, self.proj, vp_rect)
world_to = glm.unProject(wnd_to, self.view, self.proj, vp_rect)
世界space眼睛和目标位置的位移是旧世界位置到新世界位置的距离:
world_vec = world_to - world_from
最后计算出新的眼睛和目标位置并更新视图矩阵:
self.eye = self.eye - world_vec
self.target = self.target - world_vec
self.view = glm.lookAt(self.eye, self.target, self.up)
另见 Python OpenGL 4.6, GLM navigation
我用下面的例子测试了代码:
预览:
完整 python 代码:
import os
import math
import numpy as np
import glm
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GL.shaders import *
from OpenGL.arrays import *
from ctypes import c_void_p
class MyWindow:
__caption = 'OpenGL Window'
__vp_size = [800, 600]
__vp_valid = False
__glut_wnd = None
__glsl_vert = """
#version 450 core
layout (location = 0) in vec3 a_pos;
layout (location = 1) in vec3 a_nv;
layout (location = 2) in vec4 a_col;
out vec3 v_pos;
out vec3 v_nv;
out vec4 v_color;
uniform mat4 u_proj;
uniform mat4 u_view;
uniform mat4 u_model;
void main()
{
mat4 model_view = u_view * u_model;
mat3 normal = transpose(inverse(mat3(model_view)));
vec4 view_pos = model_view * vec4(a_pos.xyz, 1.0);
v_pos = view_pos.xyz;
v_nv = normal * a_nv;
v_color = a_col;
gl_Position = u_proj * view_pos;
}
"""
__glsl_frag = """
#version 450 core
out vec4 frag_color;
in vec3 v_pos;
in vec3 v_nv;
in vec4 v_color;
void main()
{
vec3 N = normalize(v_nv);
vec3 V = -normalize(v_pos);
float ka = 0.1;
float kd = max(0.0, dot(N, V)) * 0.9;
frag_color = vec4(v_color.rgb * (ka + kd), v_color.a);
}
"""
__program = None
__vao = None
__vbo = None
__no_vert = 0
def __init__(self, w, h):
self.__vp_size = [w, h]
glutInit()
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)
glutInitWindowSize(self.__vp_size[0], self.__vp_size[1])
__glut_wnd = glutCreateWindow(self.__caption)
self.__program = compileProgram(
compileShader( self.__glsl_vert, GL_VERTEX_SHADER ),
compileShader( self.__glsl_frag, GL_FRAGMENT_SHADER ),
)
self.___attrib = { a : glGetAttribLocation (self.__program, a) for a in ['a_pos', 'a_nv', 'a_col'] }
print(self.___attrib)
self.___uniform = { u : glGetUniformLocation (self.__program, u) for u in ['u_model', 'u_view', 'u_proj'] }
print(self.___uniform)
v = [ -1,-1,1, 1,-1,1, 1,1,1, -1,1,1, -1,-1,-1, 1,-1,-1, 1,1,-1, -1,1,-1 ]
c = [ 1.0, 0.0, 0.0, 1.0, 0.5, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0 ]
n = [ 0,0,1, 1,0,0, 0,0,-1, -1,0,0, 0,1,0, 0,-1,0 ]
e = [ 0,1,2,3, 1,5,6,2, 5,4,7,6, 4,0,3,7, 3,2,6,7, 1,0,4,5 ]
attr_array = []
for si in range(6):
for vi in range(6):
ci = [0, 1, 2, 0, 2, 3][vi]
i = si*4+ci
attr_array.extend( [ v[e[i]*3], v[e[i]*3+1], v[e[i]*3+2] ] )
attr_array.extend( [ n[si*3], n[si*3+1], n[si*3+2] ] )
attr_array.extend( [ c[si*3], c[si*3+1], c[si*3+2], 1 ] );
self.__no_vert = len(attr_array) // 10
vertex_attributes = np.array(attr_array, dtype=np.float32)
self.__vbo = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.__vbo)
glBufferData(GL_ARRAY_BUFFER, vertex_attributes, GL_STATIC_DRAW)
self.__vao = glGenVertexArrays(1)
glBindVertexArray(self.__vao)
glVertexAttribPointer(0, 3, GL_FLOAT, False, 10*vertex_attributes.itemsize, None)
glEnableVertexAttribArray(0)
glVertexAttribPointer(1, 3, GL_FLOAT, False, 10*vertex_attributes.itemsize, c_void_p(3*vertex_attributes.itemsize))
glEnableVertexAttribArray(1)
glVertexAttribPointer(2, 4, GL_FLOAT, False, 10*vertex_attributes.itemsize, c_void_p(6*vertex_attributes.itemsize))
glEnableVertexAttribArray(2)
glEnable(GL_DEPTH_TEST)
glUseProgram(self.__program)
glutReshapeFunc(self.__reshape)
glutDisplayFunc(self.__mainloop)
glutMouseFunc(self.glut_mouse)
glutMotionFunc(self.glut_motion)
self.drag = False
self.eye = glm.vec3(-3, -7, 6)
self.target = glm.vec3(0, 0, 0)
self.up = glm.vec3(0, 0, 1)
self.near = 0.1
self.far = 100.0
aspect = self.__vp_size[0]/self.__vp_size[1]
self.proj = glm.perspective(glm.radians(90.0), aspect, self.near, self.far)
self.view = glm.lookAt(self.eye, self.target, self.up)
self.model = glm.mat4(1)
def run(self):
self.__starttime = 0
self.__starttime = self.elapsed_ms()
glutMainLoop()
def elapsed_ms(self):
return glutGet(GLUT_ELAPSED_TIME) - self.__starttime
def __reshape(self, w, h):
self.__vp_valid = False
def __mainloop(self):
if not self.__vp_valid:
self.width = glutGet(GLUT_WINDOW_WIDTH)
self.height = glutGet(GLUT_WINDOW_HEIGHT)
self.__vp_size = [self.width, self.height]
self.__vp_valid = True
aspect = self.width / self.height
self.proj = glm.perspective(glm.radians(90.0), aspect, self.near, self.far)
glUniformMatrix4fv(self.___uniform['u_proj'], 1, GL_FALSE, glm.value_ptr(self.proj) )
glUniformMatrix4fv(self.___uniform['u_view'], 1, GL_FALSE, glm.value_ptr(self.view) )
glUniformMatrix4fv(self.___uniform['u_model'], 1, GL_FALSE, glm.value_ptr(self.model) )
glClearColor(0.2, 0.3, 0.3, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glDrawArrays(GL_TRIANGLES, 0, self.__no_vert)
glutSwapBuffers()
glutPostRedisplay()
def glut_mouse(self, button, state, x, y):
self.drag = state == GLUT_DOWN
self.last_mouse_pos = glm.vec2(x, self.height-y)
self.mouse_down_pos = glm.vec2(x, self.height-y)
if self.drag:
depth_buffer = glReadPixels(x, self.height-y, 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT)
self.last_depth = depth_buffer[0][0]
print(self.last_depth)
def glut_motion(self, x, y):
if not self.drag:
return
old_pos = self.last_mouse_pos
new_pos = glm.vec2(x, self.__vp_size[1]-y)
self.last_mouse_pos = new_pos
self.pan(self.last_depth, old_pos, new_pos)
def pan(self, depth, old_pos, new_pos):
wnd_from = glm.vec3(old_pos[0], old_pos[1], float(depth))
wnd_to = glm.vec3(new_pos[0], new_pos[1], float(depth))
vp_rect = glm.vec4(0, 0, self.width, self.height)
world_from = glm.unProject(wnd_from, self.view, self.proj, vp_rect)
world_to = glm.unProject(wnd_to, self.view, self.proj, vp_rect)
world_vec = world_to - world_from
self.eye = self.eye - world_vec
self.target = self.target - world_vec
self.view = glm.lookAt(self.eye, self.target, self.up)
window = MyWindow(800, 600)
window.run()
[...] but there's still one case where his algorithm won't work properly. It doesn't handle properly the case where you panning is started from empty space [...]
在解决方案中,对象的深度取自深度缓冲区,在发生鼠标单击的那个位置。如果这是“空space”,一个没有绘制对象的位置,深度是深度范围的最大值(常见的1)。这会导致快速疼痛。
解决方案或变通方法是使用场景中代表性位置的深度。例如世界的起源:
pt_drag = glm.vec3(0, 0, 0)
当然,这可能不会在每种情况下都产生正确的结果。如果场景的对象不在世界原点周围,这种方法就会失败。我建议计算场景 axis aligned bounding box 的中心。用这个点代表“深度”:
box_min = ... # glm.vec3
box_max = ... # glm.vec3
pt_drag = (box_min + box_max) / 2
点的深度可以通过视图和投影矩阵的变换以及最终的透视划分来计算:
o_clip = self.proj * self.view * glm.vec4(pt_drag, 1)
o_ndc = glm.vec3(o_clip) / o_clip.w
这可以应用于函数 glut_mouse
:
def glut_mouse(self, button, state, x, y):
self.drag = state == GLUT_DOWN
self.last_mouse_pos = glm.vec2(x, self.height-y)
self.mouse_down_pos = glm.vec2(x, self.height-y)
if self.drag:
depth_buffer = glReadPixels(x, self.height-y, 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT)
self.last_depth = depth_buffer[0][0]
if self.last_depth == 1:
pt_drag = glm.vec3(0, 0, 0)
o_clip = self.proj * self.view * glm.vec4(pt_drag, 1)
o_ndc = glm.vec3(o_clip) / o_clip.w
if o_ndc.z > -1 and o_ndc.z < 1:
self.last_depth = o_ndc.z * 0.5 + 0.5
预览:
良好感觉解决方案的关键是找到“正确”的深度。在透视投影中,鼠标移动以 1:1 运动影响对象,投影在视口上,拖动仅适用于明确定义的深度。不同深度的物体在投影到视口上时会产生不同比例的位移,这就是透视的“本质”。
要找到“正确”的深度,有不同的可能性,具体取决于您的需要:
- 从当前鼠标位置的深度缓冲区中读取深度:
depth_buffer = glReadPixels(x, self.height-y, 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT)
self.last_depth = depth_buffer[0][0]
- 获取深度缓冲区的最小和最大深度(远平面的值除外,1.0)并计算平均深度。当然在这种情况下必须调查整个深度缓冲区:
d_buf = glReadPixels(0, 0, self.width, self.height, GL_DEPTH_COMPONENT, GL_FLOAT)
d_vals = [float(d_buf[i][j]) for i in range(self.width) for j in range(self.height) if d_buf[i][j] != 1]
if len(d_vals) > 0:
self.last_depth = (min(d_vals) + max(d_vals)) / 2
- 使用世界原点:
pt_drag = glm.vec3(0, 0, 0)
o_clip = self.proj * self.view * glm.vec4(pt_drag, 1)
o_ndc = glm.vec3(o_clip) / o_clip.w
if o_ndc.z > -1 and o_ndc.z < 1:
self.last_depth = o_ndc.z * 0.5 + 0.5
正在计算场景边界框的中心。
实施光线投射,通过光线识别对象,光线从视点 a 开始,穿过光标(鼠标)位置。当没有物体被击中时,可以通过识别“最接近”射线的物体来改进该算法。