OpenGL C++ 鼠标光线拾取 glm:unproject
OpenGL C++ mouse ray picking glm:unproject
我目前正在开发 C++ 游戏引擎,我想在应用程序中构建鼠标交互。我以前通过光线拾取来完成此操作,但当时我使用了固定的鼠标位置,现在我想不用它了。我读到您可以使用 glm::unProject 函数来执行此操作,但我的方法不起作用。这个函数给出的坐标是不对的。我做错了什么?
rscore_projection_matrix = glm::perspective(45.0f, (float)(windowWidth)/(float)(windowHeight), 0.1f, 1000.0f);
rscore_view_matrix = glm::lookAt(glm::vec3(lengthdir_x(16, rscam_direction)+rscam_x, rscam_z, lengthdir_y(16, rscam_direction)+rscam_y), glm::vec3(rscam_x, 0, rscam_y), glm::vec3(0,1,0));
rscore_model_matrix = glm::mat4(1.0f);
glm::vec3 screenPos = glm::vec3(rscore_mouse_x, rscore_mouse_y, 0.1f);
glm::vec4 viewport = glm::vec4(0.0f, 0.0f, windowWidth, windowHeight);
glm::vec3 worldPos = glm::unProject(screenPos, rscore_model_matrix, rscore_projection_matrix, viewport);
我使用 vec3 worldPos 位置来绘制对象。
不确定这是否对您有帮助,但我是这样实现光线拾取(计算光线方向)的:
glm::vec3 CFreeCamera::CreateRay() {
// these positions must be in range [-1, 1] (!!!), not [0, width] and [0, height]
float mouseX = getMousePositionX() / (getWindowWidth() * 0.5f) - 1.0f;
float mouseY = getMousePositionY() / (getWindowHeight() * 0.5f) - 1.0f;
glm::mat4 proj = glm::perspective(FoV, AspectRatio, Near, Far);
glm::mat4 view = glm::lookAt(glm::vec3(0.0f), CameraDirection, CameraUpVector);
glm::mat4 invVP = glm::inverse(proj * view);
glm::vec4 screenPos = glm::vec4(mouseX, -mouseY, 1.0f, 1.0f);
glm::vec4 worldPos = invVP * screenPos;
glm::vec3 dir = glm::normalize(glm::vec3(worldPos));
return dir;
}
// Values you might be interested:
glm::vec3 cameraPosition; // some camera position, this is supplied by you
glm::vec3 rayDirection = CFreeCamera::CreateRay();
glm::vec3 rayStartPositon = cameraPosition;
glm::vec3 rayEndPosition = rayStartPosition + rayDirection * someDistance;
解释:
当你将顶点的位置与视图和投影矩阵相乘时,你就得到了像素位置。如果将像素位置乘以 View 和 Projection 矩阵的倒置,则得到世界的位置。
虽然计算逆矩阵很昂贵,但我不确定 glm::unProject 是如何工作的,它可能做同样的事情。
这只会给你光线的世界导向方向(你应该已经有了相机的位置)。此代码不对对象执行 'collision'。
相机 class 的其余代码是 here。
可以找到更多信息 - 例如 - here.
您可以在下面看到 gluUnproject
的工作原理。这突出了您忘记使用视图矩阵而只使用模型矩阵的事实。
int glhUnProjectf(float winx, float winy, float winz,
float* modelview, float* projection, int* viewport, float* objectCoordinate)
{
// Transformation matrices
float m[16], A[16];
float in[4], out[4];
// Calculation for inverting a matrix, compute projection x modelview
// and store in A[16]
MultiplyMatrices4by4OpenGL_FLOAT(A, projection, modelview);
// Now compute the inverse of matrix A
if(glhInvertMatrixf2(A, m)==0)
return 0;
// Transformation of normalized coordinates between -1 and 1
in[0]=(winx-(float) viewport[0])/(float) viewport[2]*2.0-1.0;
in[1]=(winy-(float) viewport[1])/(float) viewport[3]*2.0-1.0;
in[2]=2.0* winz-1.0;
in[3]=1.0;
// Objects coordinates
MultiplyMatrixByVector4by4OpenGL_FLOAT(out, m, in);
if(out[3]==0.0)
return 0;
out[3]=1.0/out[3];
objectCoordinate[0]=out[0]*out[3];
objectCoordinate[1]=out[1]*out[3];
objectCoordinate[2]=out[2]*out[3];
return 1;
}
代码取自here。
glm 实现此功能 (documentation):
template<typename T, typename U, qualifier Q>
GLM_FUNC_QUALIFIER vec<3, T, Q> unProjectZO(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport)
{
mat<4, 4, T, Q> Inverse = inverse(proj * model);
vec<4, T, Q> tmp = vec<4, T, Q>(win, T(1));
tmp.x = (tmp.x - T(viewport[0])) / T(viewport[2]);
tmp.y = (tmp.y - T(viewport[1])) / T(viewport[3]);
tmp.x = tmp.x * static_cast<T>(2) - static_cast<T>(1);
tmp.y = tmp.y * static_cast<T>(2) - static_cast<T>(1);
vec<4, T, Q> obj = Inverse * tmp;
obj /= obj.w;
return vec<3, T, Q>(obj);
}
template<typename T, typename U, qualifier Q>
GLM_FUNC_QUALIFIER vec<3, T, Q> unProjectNO(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport)
{
mat<4, 4, T, Q> Inverse = inverse(proj * model);
vec<4, T, Q> tmp = vec<4, T, Q>(win, T(1));
tmp.x = (tmp.x - T(viewport[0])) / T(viewport[2]);
tmp.y = (tmp.y - T(viewport[1])) / T(viewport[3]);
tmp = tmp * static_cast<T>(2) - static_cast<T>(1);
vec<4, T, Q> obj = Inverse * tmp;
obj /= obj.w;
return vec<3, T, Q>(obj);
}
我目前正在开发 C++ 游戏引擎,我想在应用程序中构建鼠标交互。我以前通过光线拾取来完成此操作,但当时我使用了固定的鼠标位置,现在我想不用它了。我读到您可以使用 glm::unProject 函数来执行此操作,但我的方法不起作用。这个函数给出的坐标是不对的。我做错了什么?
rscore_projection_matrix = glm::perspective(45.0f, (float)(windowWidth)/(float)(windowHeight), 0.1f, 1000.0f);
rscore_view_matrix = glm::lookAt(glm::vec3(lengthdir_x(16, rscam_direction)+rscam_x, rscam_z, lengthdir_y(16, rscam_direction)+rscam_y), glm::vec3(rscam_x, 0, rscam_y), glm::vec3(0,1,0));
rscore_model_matrix = glm::mat4(1.0f);
glm::vec3 screenPos = glm::vec3(rscore_mouse_x, rscore_mouse_y, 0.1f);
glm::vec4 viewport = glm::vec4(0.0f, 0.0f, windowWidth, windowHeight);
glm::vec3 worldPos = glm::unProject(screenPos, rscore_model_matrix, rscore_projection_matrix, viewport);
我使用 vec3 worldPos 位置来绘制对象。
不确定这是否对您有帮助,但我是这样实现光线拾取(计算光线方向)的:
glm::vec3 CFreeCamera::CreateRay() {
// these positions must be in range [-1, 1] (!!!), not [0, width] and [0, height]
float mouseX = getMousePositionX() / (getWindowWidth() * 0.5f) - 1.0f;
float mouseY = getMousePositionY() / (getWindowHeight() * 0.5f) - 1.0f;
glm::mat4 proj = glm::perspective(FoV, AspectRatio, Near, Far);
glm::mat4 view = glm::lookAt(glm::vec3(0.0f), CameraDirection, CameraUpVector);
glm::mat4 invVP = glm::inverse(proj * view);
glm::vec4 screenPos = glm::vec4(mouseX, -mouseY, 1.0f, 1.0f);
glm::vec4 worldPos = invVP * screenPos;
glm::vec3 dir = glm::normalize(glm::vec3(worldPos));
return dir;
}
// Values you might be interested:
glm::vec3 cameraPosition; // some camera position, this is supplied by you
glm::vec3 rayDirection = CFreeCamera::CreateRay();
glm::vec3 rayStartPositon = cameraPosition;
glm::vec3 rayEndPosition = rayStartPosition + rayDirection * someDistance;
解释:
当你将顶点的位置与视图和投影矩阵相乘时,你就得到了像素位置。如果将像素位置乘以 View 和 Projection 矩阵的倒置,则得到世界的位置。
虽然计算逆矩阵很昂贵,但我不确定 glm::unProject 是如何工作的,它可能做同样的事情。
这只会给你光线的世界导向方向(你应该已经有了相机的位置)。此代码不对对象执行 'collision'。
相机 class 的其余代码是 here。
可以找到更多信息 - 例如 - here.
您可以在下面看到 gluUnproject
的工作原理。这突出了您忘记使用视图矩阵而只使用模型矩阵的事实。
int glhUnProjectf(float winx, float winy, float winz,
float* modelview, float* projection, int* viewport, float* objectCoordinate)
{
// Transformation matrices
float m[16], A[16];
float in[4], out[4];
// Calculation for inverting a matrix, compute projection x modelview
// and store in A[16]
MultiplyMatrices4by4OpenGL_FLOAT(A, projection, modelview);
// Now compute the inverse of matrix A
if(glhInvertMatrixf2(A, m)==0)
return 0;
// Transformation of normalized coordinates between -1 and 1
in[0]=(winx-(float) viewport[0])/(float) viewport[2]*2.0-1.0;
in[1]=(winy-(float) viewport[1])/(float) viewport[3]*2.0-1.0;
in[2]=2.0* winz-1.0;
in[3]=1.0;
// Objects coordinates
MultiplyMatrixByVector4by4OpenGL_FLOAT(out, m, in);
if(out[3]==0.0)
return 0;
out[3]=1.0/out[3];
objectCoordinate[0]=out[0]*out[3];
objectCoordinate[1]=out[1]*out[3];
objectCoordinate[2]=out[2]*out[3];
return 1;
}
代码取自here。
glm 实现此功能 (documentation):
template<typename T, typename U, qualifier Q>
GLM_FUNC_QUALIFIER vec<3, T, Q> unProjectZO(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport)
{
mat<4, 4, T, Q> Inverse = inverse(proj * model);
vec<4, T, Q> tmp = vec<4, T, Q>(win, T(1));
tmp.x = (tmp.x - T(viewport[0])) / T(viewport[2]);
tmp.y = (tmp.y - T(viewport[1])) / T(viewport[3]);
tmp.x = tmp.x * static_cast<T>(2) - static_cast<T>(1);
tmp.y = tmp.y * static_cast<T>(2) - static_cast<T>(1);
vec<4, T, Q> obj = Inverse * tmp;
obj /= obj.w;
return vec<3, T, Q>(obj);
}
template<typename T, typename U, qualifier Q>
GLM_FUNC_QUALIFIER vec<3, T, Q> unProjectNO(vec<3, T, Q> const& win, mat<4, 4, T, Q> const& model, mat<4, 4, T, Q> const& proj, vec<4, U, Q> const& viewport)
{
mat<4, 4, T, Q> Inverse = inverse(proj * model);
vec<4, T, Q> tmp = vec<4, T, Q>(win, T(1));
tmp.x = (tmp.x - T(viewport[0])) / T(viewport[2]);
tmp.y = (tmp.y - T(viewport[1])) / T(viewport[3]);
tmp = tmp * static_cast<T>(2) - static_cast<T>(1);
vec<4, T, Q> obj = Inverse * tmp;
obj /= obj.w;
return vec<3, T, Q>(obj);
}