Kinect v2 将颜色坐标映射到相机 space
Kinect v2 mapping color coordinates to camera space
我正在尝试将坐标从颜色 space 映射到相机 space。
我使用的代码如下:
HRESULT ModelRecognizer::MapColorToCameraCoordinates(const std::vector<ColorSpacePoint>& colorsps, std::vector<CameraSpacePoint>& camerasps)
{
//Access frame
HRESULT hr = GetDepthFrame();
if (SUCCEEDED(hr))
{
ICoordinateMapper* pMapper;
hr = m_pKinectSensor->get_CoordinateMapper(&pMapper);
if (SUCCEEDED(hr))
{
CameraSpacePoint* cameraSpacePoints = new CameraSpacePoint[cColorWidth * cColorHeight];
hr = pMapper->MapColorFrameToCameraSpace(nDepthWidth * nDepthHeight, depthImageBuffer, cColorWidth * cColorHeight, cameraSpacePoints);
if (SUCCEEDED(hr))
{
for (ColorSpacePoint colorsp : colorsps)
{
long colorIndex = (long)(colorsp.Y * cColorWidth + colorsp.X);
CameraSpacePoint csp = cameraSpacePoints[colorIndex];
camerasps.push_back(csp);
}
}
delete[] cameraSpacePoints;
}
}
ReleaseDepthFrame();
return hr;
}
我没有收到任何错误,但是,结果似乎旋转了 180 度并且有偏移。有没有人建议我做错了什么?感谢任何帮助。
只是为了更全面地说明为什么我需要这个:
我正在使用打开的 cv 从彩色图像跟踪粘贴在 table 上的彩色胶带。然后我在 3D 网格中的胶带位置创建墙。此外,我正在使用 KinectFusion 生成 table 上其他对象的网格。但是,当我在 Meshlab 中打开两个网格时,可以清楚地看到错位。由于我假设 KinectFusion 的网格是在 CameraSpace 中正确创建的,并且我在上述函数返回的 CameraSpacePoints 处创建了墙壁的网格,所以我很确定错误出在 CoordinateMapping 过程中。
可以在 http://imgur.com/UsrEdZb,ZseN2br#0 , http://imgur.com/UsrEdZb,ZseN2br#1
找到显示错位的图片
我终于弄明白了:无论出于何种原因,返回的 CameraSpacePoints 在 X 和 Y 的原点处进行了镜像,但在 Z 中没有。如果有人对此有解释,我仍然很感兴趣。
现在可以使用以下代码:
/// <summary>
/// Maps coordinates from ColorSpace to CameraSpace
/// Expects that the Points in ColorSpace are mirrored at x (as Kinects returns it by default).
/// </summary>
HRESULT ModelRecognizer::MapColorToCameraCoordinates(const std::vector<ColorSpacePoint>& colorsps, std::vector<CameraSpacePoint>& camerasps)
{
//Access frame
HRESULT hr = GetDepthFrame();
if (SUCCEEDED(hr))
{
ICoordinateMapper* pMapper;
hr = m_pKinectSensor->get_CoordinateMapper(&pMapper);
if (SUCCEEDED(hr))
{
CameraSpacePoint* cameraSpacePoints = new CameraSpacePoint[cColorWidth * cColorHeight];
hr = pMapper->MapColorFrameToCameraSpace(nDepthWidth * nDepthHeight, depthImageBuffer, cColorWidth * cColorHeight, cameraSpacePoints);
if (SUCCEEDED(hr))
{
for (ColorSpacePoint colorsp : colorsps)
{
int colorX = static_cast<int>(colorsp.X + 0.5f);
int colorY = static_cast<int>(colorsp.Y + 0.5f);
long colorIndex = (long)(colorY * cColorWidth + colorX);
CameraSpacePoint csp = cameraSpacePoints[colorIndex];
camerasps.push_back(CameraSpacePoint{ -csp.X, -csp.Y, csp.Z });
}
}
delete[] cameraSpacePoints;
}
}
ReleaseDepthFrame();
return hr;
}
我正在尝试将坐标从颜色 space 映射到相机 space。 我使用的代码如下:
HRESULT ModelRecognizer::MapColorToCameraCoordinates(const std::vector<ColorSpacePoint>& colorsps, std::vector<CameraSpacePoint>& camerasps)
{
//Access frame
HRESULT hr = GetDepthFrame();
if (SUCCEEDED(hr))
{
ICoordinateMapper* pMapper;
hr = m_pKinectSensor->get_CoordinateMapper(&pMapper);
if (SUCCEEDED(hr))
{
CameraSpacePoint* cameraSpacePoints = new CameraSpacePoint[cColorWidth * cColorHeight];
hr = pMapper->MapColorFrameToCameraSpace(nDepthWidth * nDepthHeight, depthImageBuffer, cColorWidth * cColorHeight, cameraSpacePoints);
if (SUCCEEDED(hr))
{
for (ColorSpacePoint colorsp : colorsps)
{
long colorIndex = (long)(colorsp.Y * cColorWidth + colorsp.X);
CameraSpacePoint csp = cameraSpacePoints[colorIndex];
camerasps.push_back(csp);
}
}
delete[] cameraSpacePoints;
}
}
ReleaseDepthFrame();
return hr;
}
我没有收到任何错误,但是,结果似乎旋转了 180 度并且有偏移。有没有人建议我做错了什么?感谢任何帮助。
只是为了更全面地说明为什么我需要这个:
我正在使用打开的 cv 从彩色图像跟踪粘贴在 table 上的彩色胶带。然后我在 3D 网格中的胶带位置创建墙。此外,我正在使用 KinectFusion 生成 table 上其他对象的网格。但是,当我在 Meshlab 中打开两个网格时,可以清楚地看到错位。由于我假设 KinectFusion 的网格是在 CameraSpace 中正确创建的,并且我在上述函数返回的 CameraSpacePoints 处创建了墙壁的网格,所以我很确定错误出在 CoordinateMapping 过程中。
可以在 http://imgur.com/UsrEdZb,ZseN2br#0 , http://imgur.com/UsrEdZb,ZseN2br#1
找到显示错位的图片我终于弄明白了:无论出于何种原因,返回的 CameraSpacePoints 在 X 和 Y 的原点处进行了镜像,但在 Z 中没有。如果有人对此有解释,我仍然很感兴趣。
现在可以使用以下代码:
/// <summary>
/// Maps coordinates from ColorSpace to CameraSpace
/// Expects that the Points in ColorSpace are mirrored at x (as Kinects returns it by default).
/// </summary>
HRESULT ModelRecognizer::MapColorToCameraCoordinates(const std::vector<ColorSpacePoint>& colorsps, std::vector<CameraSpacePoint>& camerasps)
{
//Access frame
HRESULT hr = GetDepthFrame();
if (SUCCEEDED(hr))
{
ICoordinateMapper* pMapper;
hr = m_pKinectSensor->get_CoordinateMapper(&pMapper);
if (SUCCEEDED(hr))
{
CameraSpacePoint* cameraSpacePoints = new CameraSpacePoint[cColorWidth * cColorHeight];
hr = pMapper->MapColorFrameToCameraSpace(nDepthWidth * nDepthHeight, depthImageBuffer, cColorWidth * cColorHeight, cameraSpacePoints);
if (SUCCEEDED(hr))
{
for (ColorSpacePoint colorsp : colorsps)
{
int colorX = static_cast<int>(colorsp.X + 0.5f);
int colorY = static_cast<int>(colorsp.Y + 0.5f);
long colorIndex = (long)(colorY * cColorWidth + colorX);
CameraSpacePoint csp = cameraSpacePoints[colorIndex];
camerasps.push_back(CameraSpacePoint{ -csp.X, -csp.Y, csp.Z });
}
}
delete[] cameraSpacePoints;
}
}
ReleaseDepthFrame();
return hr;
}