D435深度圖片對齊到彩色圖片
開發環境:
- ubuntu 16.04LTS
- Intel Realsense SDK2.0
- C++
- Opencv3.4
- CMake
Intel Realsense SDK2.0 貌似已經有了紅外攝像頭和RBG攝像頭的標定數據,本例參照example裏面的例程Align進行簡化和修改,註釋翻譯。這裏是直接用SDK的對齊了,遲點會出自己實現的對齊方案。
程序流程:
(1) 創建窗口
(2)創建數據管道以及配置信息
(3)打開數據管道,並傳入配置信息,打開的函數返回一個profile對象
(4)選擇要將深度圖對齊到什麼數據流(這裏選擇對齊到RS2_STREAM_COLOR流)
(6)定義一個align對象
(7)取新的一幀,通過align對象即可獲得對齊後的幀(再重複一遍,這個例子是將深度圖對齊到彩色圖,即彩色圖不變,深度圖發生變化)
(8)將對應的幀輸出(詳細可參見 ----)
(10)注意:這裏雖然有個 rs2::等待捕獲下一幀,但是opencv的waitkey()函數不能少,否則不能顯示
全部代碼及原版英文註釋和翻譯註釋:
#include <iostream>
using namespace std;
#include <sstream>
#include <iostream>
#include <fstream>
#include <algorithm>
#include <cstring>
#include<opencv2/imgproc/imgproc.hpp>
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
using namespace cv;
#include<librealsense2/rs.hpp>
//獲取深度像素對應長度單位轉換
float get_depth_scale(rs2::device dev);
//檢查攝像頭數據管道設置是否改變
bool profile_changed(const std::vector<rs2::stream_profile>& current, const std::vector<rs2::stream_profile>& prev);
int main(int argc, char * argv[]) try
{
// Create and initialize GUI related objects
//創建gui窗口
//window app(1280, 720, "CPP - Align Example"); // Simple window handling
//ImGui_ImplGlfw_Init(app, false); // ImGui library intializition
const char* depth_win="depth_Image";
namedWindow(depth_win,WINDOW_AUTOSIZE);
const char* color_win="color_Image";
namedWindow(color_win,WINDOW_AUTOSIZE);
//深度圖像顏色map
rs2::colorizer c; // Helper to colorize depth images
//helper用於渲染圖片
//texture renderer; // Helper for renderig images
// Create a pipeline to easily configure and start the camera
//創建數據管道
rs2::pipeline pipe;
rs2::config pipe_config;
pipe_config.enable_stream(RS2_STREAM_DEPTH,640,480,RS2_FORMAT_Z16,30);
pipe_config.enable_stream(RS2_STREAM_COLOR,640,480,RS2_FORMAT_BGR8,30);
//Calling pipeline's start() without any additional parameters will start the first device
//直接start(),不添加配置參數,則默認打開第一個設備
// with its default streams.
//以及以默認的配置進行流輸出
//The start function returns the pipeline profile which the pipeline used to start the device
//start()函數返回數據管道的profile
rs2::pipeline_profile profile = pipe.start(pipe_config);
// Each depth camera might have different units for depth pixels, so we get it here
//每個深度攝像頭有不同單元的像素,我們這裏獲取
// Using the pipeline's profile, we can retrieve the device that the pipeline uses
//使用數據管道的profile獲取深度圖像像素對應於長度單位(米)的轉換比例
float depth_scale = get_depth_scale(profile.get_device());
//Pipeline could choose a device that does not have a color stream
//數據管道可以選擇一個沒有彩色圖像數據流的設備
//If there is no color stream, choose to align depth to another stream
//選擇彩色圖像數據流來作爲對齊對象
rs2_stream align_to = RS2_STREAM_COLOR;//find_stream_to_align(profile.get_stream());
/*
@這裏的對齊是改變深度圖,而不改變color圖
*/
// Create a rs2::align object.
//創建一個rs2::align的對象
// rs2::align allows us to perform alignment of depth frames to others frames
//rs2::align 允許我們去實現深度圖像對齊其他圖像
//The "align_to" is the stream type to which we plan to align depth frames.
// "align_to"是我們打算用深度圖像對齊的圖像流
rs2::align align(align_to);
// Define a variable for controlling the distance to clip
//定義一個變量去轉換深度到距離
float depth_clipping_distance = 1.f;
while (cvGetWindowHandle(depth_win)&&cvGetWindowHandle(color_win)) // Application still alive?
{
// Using the align object, we block the application until a frameset is available
//堵塞程序直到新的一幀捕獲
rs2::frameset frameset = pipe.wait_for_frames();
// rs2::pipeline::wait_for_frames() can replace the device it uses in case of device error or disconnection.
// Since rs2::align is aligning depth to some other stream, we need to make sure that the stream was not changed
//因爲rs2::align 正在對齊深度圖像到其他圖像流,我們要確保對齊的圖像流不發生改變
// after the call to wait_for_frames();
if (profile_changed(pipe.get_active_profile().get_streams(), profile.get_streams()))
{
//If the profile was changed, update the align object, and also get the new device's depth scale
//如果profile發生改變,則更新align對象,重新獲取深度圖像像素到長度單位的轉換比例
profile = pipe.get_active_profile();
align = rs2::align(align_to);
depth_scale = get_depth_scale(profile.get_device());
}
//Get processed aligned frame
//獲取對齊後的幀
auto processed = align.process(frameset);
// Trying to get both other and aligned depth frames
//嘗試獲取對齊後的深度圖像幀和其他幀
rs2::frame aligned_color_frame = processed.get_color_frame();//processed.first(align_to);
rs2::frame aligned_depth_frame = processed.get_depth_frame().apply_filter(c);;
//獲取對齊之前的color圖像
rs2::frame before_depth_frame=frameset.get_depth_frame().apply_filter(c);
//獲取寬高
const int depth_w=aligned_depth_frame.as<rs2::video_frame>().get_width();
const int depth_h=aligned_depth_frame.as<rs2::video_frame>().get_height();
const int color_w=aligned_color_frame.as<rs2::video_frame>().get_width();
const int color_h=aligned_color_frame.as<rs2::video_frame>().get_height();
const int b_color_w=before_depth_frame.as<rs2::video_frame>().get_width();
const int b_color_h=before_depth_frame.as<rs2::video_frame>().get_height();
//If one of them is unavailable, continue iteration
if (!aligned_depth_frame || !aligned_color_frame)
{
continue;
}
//創建OPENCV類型 並傳入數據
Mat aligned_depth_image(Size(depth_w,depth_h),CV_8UC3,(void*)aligned_depth_frame.get_data(),Mat::AUTO_STEP);
Mat aligned_color_image(Size(color_w,color_h),CV_8UC3,(void*)aligned_color_frame.get_data(),Mat::AUTO_STEP);
Mat before_color_image(Size(b_color_w,b_color_h),CV_8UC3,(void*)before_depth_frame.get_data(),Mat::AUTO_STEP);
//顯示
imshow(depth_win,aligned_depth_image);
imshow(color_win,aligned_color_image);
imshow("before aligned",before_color_image);
waitKey(10);
}
return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl;
return EXIT_FAILURE;
}
catch (const std::exception & e)
{
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
float get_depth_scale(rs2::device dev)
{
// Go over the device's sensors
for (rs2::sensor& sensor : dev.query_sensors())
{
// Check if the sensor if a depth sensor
if (rs2::depth_sensor dpt = sensor.as<rs2::depth_sensor>())
{
return dpt.get_depth_scale();
}
}
throw std::runtime_error("Device does not have a depth sensor");
}
bool profile_changed(const std::vector<rs2::stream_profile>& current, const std::vector<rs2::stream_profile>& prev)
{
for (auto&& sp : prev)
{
//If previous profile is in current (maybe just added another)
auto itr = std::find_if(std::begin(current), std::end(current), [&sp](const rs2::stream_profile& current_sp) { return sp.unique_id() == current_sp.unique_id(); });
if (itr == std::end(current)) //If it previous stream wasn't found in current
{
return true;
}
}
return false;
}
float get_depth_scale(rs2::device dev)
{
// Go over the device's sensors
for (rs2::sensor& sensor : dev.query_sensors())
{
// Check if the sensor if a depth sensor
if (rs2::depth_sensor dpt = sensor.as<rs2::depth_sensor>())
{
return dpt.get_depth_scale();
}
}
throw std::runtime_error("Device does not have a depth sensor");
}
CMakeLists.txt
project(align_test)
cmake_minimum_required(VERSION 2.8)
aux_source_directory(. SRC_LIST)
add_executable(${PROJECT_NAME} ${SRC_LIST})
set(CMAKE_CXX_FLAGS "-std=c++11")
#尋找opencv庫
find_package(OpenCV REQUIRED)
#message(STATUS ${OpenCV_INCLUDE_DIRS})
#添加頭文件
include_directories(${OpenCV_INCLUDE_DIRS})
#鏈接Opencv庫
target_link_libraries(align_test ${OpenCV_LIBS} )
#添加後可進行調試
set( CMAKE_BUILD_TYPE Debug )
set(DEPENDENCIES realsense2 )
target_link_libraries(align_test ${DEPENDENCIES})
實現效果:
左上爲對齊之前的深度圖,
左下爲對齊後的深度圖