Intel RealSense Depth Camera D435i 噪音和土墩

Intel RealSense Depth Camera D435i noises and mounds

英特尔实感深度摄像头 D435i。

我尝试捕捉图像并将其保存为 stl 格式。

我使用英特尔提供的这个项目来完成这个任务。

https://github.com/IntelRealSense/librealsense/releases/download/v2.29.0/Intel.RealSense.SDK.exe

解决方案中有一个名为 PointCloud 的应用程序。 我稍微修改了应用程序以获得清晰的图像。

但即使使用基本代码,结果也不是很令人满意。 我捕获了一个光滑的表面,但结果有很多小颠簸。

不知道是SDK的问题还是摄像头的问题。 我在 MeshLab 中检查结果,这是一个很棒的 3D 工具。

有什么想法吗?

结果(光滑的 table 表面):

这是我的 C++ 代码(我只添加了一些过滤器但没有过滤器我有同样的问题):

#include <librealsense2/rs.hpp> // Include RealSense Cross Platform API
#include "example.hpp"          // Include short list of convenience functions for rendering

#include <algorithm>            // std::min, std::max
#include <iostream>
#include <Windows.h>

#include <imgui.h>
#include "imgui_impl_glfw.h"


#include <stdio.h>
#include <windows.h>
#include <conio.h>
#include "tchar.h"
// Helper functions
void register_glfw_callbacks(window& app, glfw_state& app_state);

int main(int argc, char * argv[]) try
{

::ShowWindow(::GetConsoleWindow(), SW_HIDE);

// Create a simple OpenGL window for rendering:
window app(1280, 720, "Capron 3D");
ImGui_ImplGlfw_Init(app, false);
bool capture = false;


HWND hWnd;
hWnd = FindWindow(NULL, _T("Capron 3D"));
ShowWindow(hWnd, SW_MAXIMIZE);





// Construct an object to manage view state
glfw_state app_state;
// register callbacks to allow manipulation of the pointcloud
register_glfw_callbacks(app, app_state);
app_state.yaw = 3.29;
app_state.pitch = 0;
// Declare pointcloud object, for calculating pointclouds and texture mappings
rs2::pointcloud pc;
// We want the points object to be persistent so we can display the last cloud when a frame drops
rs2::points points;

// Declare RealSense pipeline, encapsulating the actual device and sensors
rs2::pipeline pipe;
// Start streaming with default recommended configuration
pipe.start();


rs2::decimation_filter dec_filter;
rs2::spatial_filter spat_filter;
rs2::threshold_filter thres_filter;
rs2::temporal_filter temp_filter;


float w = static_cast<float>(app.width());
float h = static_cast<float>(app.height());



while (app) // Application still alive?
{
    static const int flags = ImGuiWindowFlags_NoCollapse
        | ImGuiWindowFlags_NoScrollbar
        | ImGuiWindowFlags_NoSavedSettings
        | ImGuiWindowFlags_NoTitleBar
        | ImGuiWindowFlags_NoResize
        | ImGuiWindowFlags_NoMove;

    ImGui_ImplGlfw_NewFrame(1);
    ImGui::SetNextWindowSize({ app.width(), app.height() });
    ImGui::Begin("app", nullptr, flags);


    // Set options for the ImGui buttons
    ImGui::PushStyleColor(ImGuiCol_TextSelectedBg, { 1, 1, 1, 1 });
    ImGui::PushStyleColor(ImGuiCol_Button, { 36 / 255.f, 44 / 255.f, 51 / 255.f, 1 });
    ImGui::PushStyleColor(ImGuiCol_ButtonHovered, { 40 / 255.f, 170 / 255.f, 90 / 255.f, 1 });
    ImGui::PushStyleColor(ImGuiCol_ButtonActive, { 36 / 255.f, 44 / 255.f, 51 / 255.f, 1 });
    ImGui::PushStyleVar(ImGuiStyleVar_FrameRounding, 12);


    ImGui::SetCursorPos({ 10, 10 });
    if (ImGui::Button("Capturer", { 100, 50 }))
    {
        capture = true;
    }

    // Wait for the next set of frames from the camera
    auto frames = pipe.wait_for_frames();

    auto color = frames.get_color_frame();

    // For cameras that don't have RGB sensor, we'll map the pointcloud to infrared instead of color
    if (!color)
        color = frames.get_infrared_frame();

    // Tell pointcloud object to map to this color frame
    pc.map_to(color);

    auto depth = frames.get_depth_frame();

    /*spat_filter.set_option(RS2_OPTION_FILTER_SMOOTH_DELTA, 50);
    depth = spat_filter.process(depth);*/

    spat_filter.set_option(RS2_OPTION_FILTER_SMOOTH_ALPHA, 1);
    depth = spat_filter.process(depth);

    spat_filter.set_option(RS2_OPTION_HOLES_FILL, 2);
    depth = spat_filter.process(depth);

    //temp_filter.set_option(RS2_OPTION_FILTER_SMOOTH_ALPHA, 1);
    //depth = temp_filter.process(depth);


    // Generate the pointcloud and texture mappings
    points = pc.calculate(depth);

    // Upload the color frame to OpenGL
    app_state.tex.upload(color);

    thres_filter.set_option(RS2_OPTION_MIN_DISTANCE, 0);
    depth = thres_filter.process(depth);

    // Draw the pointcloud
    draw_pointcloud(int(w) / 2, int(h) / 2, app_state, points);

    if (capture)
    {

        points.export_to_ply("My3DFolder\new.ply", depth);
        return EXIT_SUCCESS;
    }

    ImGui::PopStyleColor(4);
    ImGui::PopStyleVar();

    ImGui::End();
    ImGui::Render();
   }

return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
  std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n    " << e.what() << std::endl;
   MessageBox(0, "Erreur connexion RealSense. Veuillez vérifier votre caméra 3D.", "Capron Podologie", 0);
  return EXIT_FAILURE;
}
catch (const std::exception & e)
{
  std::cerr << e.what() << std::endl;
  return EXIT_FAILURE;
}

我找到了答案,

我正在使用 HolesFill 过滤器。

    //These lines
    spat_filter.set_option(RS2_OPTION_HOLES_FILL, 2);
    depth = spat_filter.process(depth);

并且孔填充算法是可预测的。它创建点,但这些点的坐标并不完全正确。 spat_filter.set_option 的第二个参数介于 1 和 5 之间。我将此参数增加得越多,结果就越嘈杂。 如果删除这些行,我会得到更清晰的结果。 但是这次我对结果有很多漏洞。