事先需要先編譯好Kinect和PCL的庫
先用Kinect獲取視野裏cameraSpace的三維座標點和colorSpace的rgb圖像,因爲這兩個數據都是depthFrame map過去的,所以這兩組點可以看做是一一對應的,把三維座標和顏色結合就能得到點雲的pointxyzrgb數據了。
CMakeLists.txt文件如下:
# cmake needs this line
cmake_minimum_required(VERSION 3.1)
# Enable C++11
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED TRUE)
#Define project name
project(robotCalibrate)
set(KINECT_INC "C:\\Program Files\\Microsoft SDKs\\Kinect\\v2.0_1409\\inc")
set(KINECT_LIB "C:\\Program Files\\Microsoft SDKs\\Kinect\\v2.0_1409\\Lib\\x64")
find_package(OpenCV REQUIRED)
find_package(PCL REQUIRED)
include_directories(${PCL_INCLUDE_DIRS} ${KINECT_INC} )
message("${PCL_LIBRARIES}")
link_directories(${PCL_LIBRARY_DIRS} ${KINECT_LIB})
add_definitions(${PCL_DEFINITIONS})
add_executable(robotCalibrate main.cpp lib.cpp definition.h KinectPointcloud.cpp)
target_link_libraries(robotCalibrate ${PCL_LIBRARIES} ${OpenCV_LIBS} kinect20.lib)
definition.h文件裏如下
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <pcl/io/pcd_io.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <pcl/visualization/pcl_visualizer.h>
#include <kinect.h>
using namespace std;
typedef pcl::PointXYZRGB PointRGB;
typedef pcl::PointCloud<pcl::PointXYZ> PointCloudXYZ;
typedef pcl::PointCloud<pcl::PointXYZRGB> PointCloudRGB;
#define depthWidth 512
#define depthHeight 424
#define rgbWidth 1920
#define rgbHeight 1080
#define depthSize 424 * 512
#define rgbSize 1920 * 1080
class KinectPointcloud
{
public:
template<class Interface>
inline void SafeRelease(Interface *& pInterfaceToRelease);
KinectPointcloud();
void getPointcloudData();
PointCloudRGB::Ptr getPointcloud();
ColorSpacePoint* colorSpacePts = new ColorSpacePoint[depthSize]; // Maps depth pixels to rgb pixels
CameraSpacePoint* cameraSpacePts = new CameraSpacePoint[depthSize]; // Maps depth pixels to 3d coordinates
cv::Mat getRGBImg();
cv::Mat getDepthImg();
~KinectPointcloud();
private:
IKinectSensor* _sensor = nullptr;
std::string _filepath;
PointCloudRGB::Ptr _cloud = PointCloudRGB::Ptr(new PointCloudRGB);
cv::Mat _rgbImg;
cv::Mat _depthImg;
PointRGB _neckPoint;
};
template<class Interface>
inline void SafeRelease(Interface *& pInterfaceToRelease);
KinectPointcloud.cpp文件如下,裏面有很多細節,需要認真理解。
#include "definition.h"
// 本程序使用Kinect v2傳感器採集並保存彩色圖與深度圖,彩色圖分辨率爲1920*1080,
// 深度圖分辨率爲512*424,並使用Kinect SDK得到深度圖映射的rgb圖像與三維座標,處理得到點雲數據並保存。
KinectPointcloud::KinectPointcloud()
{
while (1)
{
HRESULT hr = GetDefaultKinectSensor(&_sensor);
if (hr == S_OK)
hr = _sensor->Open();
if (hr == S_OK)
break;
}
_rgbImg = cv::Mat(rgbHeight, rgbWidth, CV_8UC4);
_depthImg = cv::Mat(depthHeight, depthWidth, CV_16UC1);
std::cout << "Kinect sensor ready"<<std::endl;
std::cout <<"KinectPointcloud class is initialized"<< std::endl;
}
KinectPointcloud::~KinectPointcloud()
{
std::cout << "KinectPointcloud class is deconstructed." << std::endl;
delete cameraSpacePts;
delete colorSpacePts;
_sensor->Close();
};
template<class Interface>
inline void KinectPointcloud::SafeRelease(Interface *& pInterfaceToRelease)
{
if (pInterfaceToRelease != NULL)
{
pInterfaceToRelease->Release();
pInterfaceToRelease = NULL;
}
}
PointCloudRGB::Ptr KinectPointcloud::getPointcloud()
{
return _cloud;
}
cv::Mat KinectPointcloud::getRGBImg()
{
return _rgbImg;
}
cv::Mat KinectPointcloud::getDepthImg()
{
return _depthImg;
}
void KinectPointcloud::getPointcloudData()
{
std::cout << "Start getting pointcloud data..."<< std::endl;
ICoordinateMapper* mapper = nullptr;
HRESULT hr;
IMultiSourceFrameReader* multiFrameReader = nullptr;
IMultiSourceFrame* multiFrame = nullptr;
IColorFrameReference* colorFrameReference = nullptr;
IDepthFrameReference* depthFrameReference = nullptr;
IColorFrame* colorFrame = nullptr;
IDepthFrame* depthFrame = nullptr;
IBodyFrameReference* bodyFrameReference = nullptr;
IBodyFrame* bodyFrame = nullptr;
IBodyFrameSource * bodySource = nullptr;
IBodyFrameReader * bodyReader = nullptr;
const int bodyCount = 6;
unsigned int sz;
unsigned short* buf;
int i;
IBody** myBodyArr = new IBody *[bodyCount]; //爲存身體數據的數組做準備
//ColorSpacePoint* colorSpacePts = new ColorSpacePoint[depthSize]; // Maps depth pixels to rgb pixels
//CameraSpacePoint* cameraSpacePts = new CameraSpacePoint[depthSize]; // Maps depth pixels to 3d coordinates
float(*cameraPointZ)[rgbHeight] = new float[rgbWidth][rgbHeight];
int(*pointID)[rgbHeight] = new int[rgbWidth][rgbHeight];
hr = _sensor->get_CoordinateMapper(&mapper);
if (SUCCEEDED(hr))
{
hr = _sensor->OpenMultiSourceFrameReader(
FrameSourceTypes::FrameSourceTypes_Color |
FrameSourceTypes::FrameSourceTypes_Depth,
&multiFrameReader);
}
while (1)
{
while (true)
{
hr = multiFrameReader->AcquireLatestFrame(&multiFrame);
if (FAILED(hr)) {
continue;
}
else
{
if (SUCCEEDED(hr))
{
hr = multiFrame->get_DepthFrameReference(&depthFrameReference);
//cout << hr << ", " << "hr = multiFrame->get_DepthFrameReference(&depthFrameReference);" << endl;
}
if (SUCCEEDED(hr))
{
hr = depthFrameReference->AcquireFrame(&depthFrame);
//cout << hr << ", " << "hr = depthFrameReference->AcquireFrame(&depthFrame);" << endl;
}
if (SUCCEEDED(hr))
{
hr = depthFrame->CopyFrameDataToArray(depthHeight * depthWidth, reinterpret_cast<UINT16*>(_depthImg.data));
//cout << hr << ", " << "hr = depthFrame->CopyFrameDataToArray(424 * 512, reinterpret_cast<UINT16*>(depthImg.data));" << endl;
}
if (SUCCEEDED(hr))
{
hr = multiFrame->get_ColorFrameReference(&colorFrameReference);
//cout << hr << ", " << "hr = multiFrame->get_ColorFrameReference(&colorFrameReference);" << endl;
}
if (SUCCEEDED(hr))
{
hr = colorFrameReference->AcquireFrame(&colorFrame);
//cout << hr << ", " << "colorFrameReference->AcquireFrame(&colorFrame);" << endl;
}
if (SUCCEEDED(hr))
{
/*hr = colorFrame->CopyConvertedFrameDataToArray(1920 * 1080 * 4, reinterpret_cast<BYTE*>(rgbImg.data), ColorImageFormat::ColorImageFormat_Bgra);*/
hr = colorFrame->CopyConvertedFrameDataToArray(rgbWidth * rgbHeight * 4, _rgbImg.data, ColorImageFormat_Bgra);
//cout << hr << ", " << "hr = colorFrame->CopyConvertedFrameDataToArray(1920 * 1080 * 4, rgbImg.data, ColorImageFormat_Bgra);" << endl;
}
if (SUCCEEDED(hr))
{
hr = depthFrame->AccessUnderlyingBuffer(&sz, &buf);
//cout << hr << ", " << "depthFrame->AccessUnderlyingBuffer(&sz, &buf);" << endl;
}
bool mapdata = false;
//std::cout <<"here"<< std::endl;
if (SUCCEEDED(hr))
{
//std::cout <<"here"<< std::endl;
if (mapper->MapDepthFrameToCameraSpace(depthSize, buf, depthSize, cameraSpacePts) == S_OK &&
mapper->MapDepthFrameToColorSpace(depthSize, buf, depthSize, colorSpacePts) == S_OK)
{
//cout << hr << ", " << "mapper->MapDepthFrameToCameraSpace(depthSize, buf, depthSize, cameraSpacePts)" << endl;
mapdata= true;
}
}
//釋放所有的frame
SafeRelease(multiFrame);
SafeRelease(colorFrame);
SafeRelease(bodyFrame);
SafeRelease(depthFrame);
SafeRelease(colorFrameReference);
SafeRelease(depthFrameReference);
SafeRelease(bodyFrameReference);
if (mapdata)
{
break;
}
}
}
std::cout << "Kinect data ready"<<std::endl;
std::memset(cameraPointZ, 0, rgbSize * sizeof(float));//注意這裏的typesize
int num_sub(0), num_discard(0);
_cloud->clear();
for (i = 0; i < depthSize; i++)
{
ColorSpacePoint colorP = colorSpacePts[i];
CameraSpacePoint cameraP = cameraSpacePts[i];
PointRGB point;
if (colorP.X > 0 && colorP.X < rgbWidth && colorP.Y>0 && colorP.Y < rgbHeight)
{
if (cameraP.Z == INFINITY)
std::cout << i << std::endl;
//cout << cameraP.X << cameraP.Y << cameraP.Z << endl;
int x = (int)colorP.X / 4;
int y = (int)colorP.Y / 4;
if (cameraPointZ[x][y] == 0 || fabs(cameraP.Z - cameraPointZ[x][y]) < 0.05)
{
cameraPointZ[x][y] = cameraP.Z;
point.x = cameraP.X;
point.y = cameraP.Y;
point.z = cameraP.Z;
pointID[x][y] = _cloud->points.size();
//cout << "size:"<< cloud->size() << endl;
cv::Vec4b bgra = _rgbImg.at<cv::Vec4b>(colorP.Y, colorP.X);
point.b = bgra[0];
point.g = bgra[1];
point.r = bgra[2];
_cloud->push_back(point);
}
else
{
//cout << i << ": " << cameraP.Z << ", " << cameraPointZ[x][y] << endl;
if (cameraP.Z < cameraPointZ[x][y])
{
num_sub++;
cameraPointZ[x][y] = cameraP.Z;
int id = pointID[x][y];
/*cout << id << " " << cloud->at(id).z << endl;*/
_cloud->at(id).x = cameraP.X;
_cloud->at(id).y = cameraP.Y;
_cloud->at(id).z = cameraP.Z;
cv::Vec4b bgra = _rgbImg.at<cv::Vec4b>(colorP.Y, colorP.X);
_cloud->at(id).b = bgra[0];
_cloud->at(id).g = bgra[1];
_cloud->at(id).r = bgra[2];
}
else
num_discard++;
}
}
}
_cloud->height = 1;
_cloud->width = _cloud->points.size();
/*cout << "point cloud size = " << cloud->points.size() << endl;
cout << "Num substituted: " << num_sub << ", " << "num discarded: " << num_discard << endl;*/
_cloud->is_dense = false;
if (_cloud->size() == 0)
continue;
else
{
std::cout <<"cloud data has been collected successfully"<< std::endl;
break;
}
}
//ColorSpacePoint* colorSpacePts = new ColorSpacePoint[depthSize]; // Maps depth pixels to rgb pixels
//CameraSpacePoint* cameraSpacePts = new CameraSpacePoint[depthSize]; // Maps depth pixels to 3d coordinates
//delete[] colorSpacePts;
//delete[] cameraSpacePts;
delete[] cameraPointZ;
delete[] pointID;
delete[] myBodyArr;
}
main.cpp如下
#include <iostream>
#include "definition.h"
// 本程序使用Kinect v2傳感器採集並保存彩色圖與深度圖,彩色圖分辨率爲1920*1080,
// 深度圖分辨率爲512*424,並使用Kinect SDK得到該幀點雲數據並保存。
// 使用者可以根據Kinect相機內參,從得到的深度圖與彩色圖中自己生成點雲數據。
// 主要涉及Kinect傳感器的打開、採集與深度圖彩色圖映射。
int main()
{
KinectPointcloud kinectHandler=KinectPointcloud();
//縮小圖像大小方便顯示
cv::Mat rgbImg;
PointCloudRGB::Ptr cloud(new PointCloudRGB);
pcl::PCDWriter writer;
cv::Mat depthImg;
cv::Mat resized_rgbImg;
cv::Mat tmp;
//PointCloudRGB::Ptr cloud(new PointCloudRGB);
while (1)
{
kinectHandler.getPointcloudData();
rgbImg = kinectHandler.getRGBImg();
depthImg = kinectHandler.getDepthImg();
cloud = kinectHandler.getPointcloud();
depthImg.convertTo(tmp, CV_8U, -255.0f / 8000.0f, 255.0f);
cv::equalizeHist(tmp, tmp);//均衡化,爲了提高顯示效果
cv::imshow("depth", tmp);
//縮小圖像大小方便顯示
cv::resize(rgbImg, resized_rgbImg, cv::Size(rgbWidth / 2, rgbHeight / 2));
cv::imshow("rgb", resized_rgbImg);
if (cv::waitKey(30) >= 0)
{
break;
}
}
cv::destroyAllWindows();
return 0;
}
然後就可以愉快地採集點雲數據,看到Kinect採集的圖像了~
在python裏讀取pcd文件,參考:https://blog.csdn.net/qq_35565669/article/details/106687208