事先需要先编译好Kinect和PCL的库
先用Kinect获取视野里cameraSpace的三维座标点和colorSpace的rgb图像,因为这两个数据都是depthFrame map过去的,所以这两组点可以看做是一一对应的,把三维座标和颜色结合就能得到点云的pointxyzrgb数据了。
CMakeLists.txt文件如下:
# cmake needs this line
cmake_minimum_required(VERSION 3.1)
# Enable C++11
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED TRUE)
#Define project name
project(robotCalibrate)
set(KINECT_INC "C:\\Program Files\\Microsoft SDKs\\Kinect\\v2.0_1409\\inc")
set(KINECT_LIB "C:\\Program Files\\Microsoft SDKs\\Kinect\\v2.0_1409\\Lib\\x64")
find_package(OpenCV REQUIRED)
find_package(PCL REQUIRED)
include_directories(${PCL_INCLUDE_DIRS} ${KINECT_INC} )
message("${PCL_LIBRARIES}")
link_directories(${PCL_LIBRARY_DIRS} ${KINECT_LIB})
add_definitions(${PCL_DEFINITIONS})
add_executable(robotCalibrate main.cpp lib.cpp definition.h KinectPointcloud.cpp)
target_link_libraries(robotCalibrate ${PCL_LIBRARIES} ${OpenCV_LIBS} kinect20.lib)
definition.h文件里如下
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <pcl/io/pcd_io.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <pcl/visualization/pcl_visualizer.h>
#include <kinect.h>
using namespace std;
typedef pcl::PointXYZRGB PointRGB;
typedef pcl::PointCloud<pcl::PointXYZ> PointCloudXYZ;
typedef pcl::PointCloud<pcl::PointXYZRGB> PointCloudRGB;
#define depthWidth 512
#define depthHeight 424
#define rgbWidth 1920
#define rgbHeight 1080
#define depthSize 424 * 512
#define rgbSize 1920 * 1080
class KinectPointcloud
{
public:
template<class Interface>
inline void SafeRelease(Interface *& pInterfaceToRelease);
KinectPointcloud();
void getPointcloudData();
PointCloudRGB::Ptr getPointcloud();
ColorSpacePoint* colorSpacePts = new ColorSpacePoint[depthSize]; // Maps depth pixels to rgb pixels
CameraSpacePoint* cameraSpacePts = new CameraSpacePoint[depthSize]; // Maps depth pixels to 3d coordinates
cv::Mat getRGBImg();
cv::Mat getDepthImg();
~KinectPointcloud();
private:
IKinectSensor* _sensor = nullptr;
std::string _filepath;
PointCloudRGB::Ptr _cloud = PointCloudRGB::Ptr(new PointCloudRGB);
cv::Mat _rgbImg;
cv::Mat _depthImg;
PointRGB _neckPoint;
};
template<class Interface>
inline void SafeRelease(Interface *& pInterfaceToRelease);
KinectPointcloud.cpp文件如下,里面有很多细节,需要认真理解。
#include "definition.h"
// 本程序使用Kinect v2传感器采集并保存彩色图与深度图,彩色图分辨率为1920*1080,
// 深度图分辨率为512*424,并使用Kinect SDK得到深度图映射的rgb图像与三维座标,处理得到点云数据并保存。
KinectPointcloud::KinectPointcloud()
{
while (1)
{
HRESULT hr = GetDefaultKinectSensor(&_sensor);
if (hr == S_OK)
hr = _sensor->Open();
if (hr == S_OK)
break;
}
_rgbImg = cv::Mat(rgbHeight, rgbWidth, CV_8UC4);
_depthImg = cv::Mat(depthHeight, depthWidth, CV_16UC1);
std::cout << "Kinect sensor ready"<<std::endl;
std::cout <<"KinectPointcloud class is initialized"<< std::endl;
}
KinectPointcloud::~KinectPointcloud()
{
std::cout << "KinectPointcloud class is deconstructed." << std::endl;
delete cameraSpacePts;
delete colorSpacePts;
_sensor->Close();
};
template<class Interface>
inline void KinectPointcloud::SafeRelease(Interface *& pInterfaceToRelease)
{
if (pInterfaceToRelease != NULL)
{
pInterfaceToRelease->Release();
pInterfaceToRelease = NULL;
}
}
PointCloudRGB::Ptr KinectPointcloud::getPointcloud()
{
return _cloud;
}
cv::Mat KinectPointcloud::getRGBImg()
{
return _rgbImg;
}
cv::Mat KinectPointcloud::getDepthImg()
{
return _depthImg;
}
void KinectPointcloud::getPointcloudData()
{
std::cout << "Start getting pointcloud data..."<< std::endl;
ICoordinateMapper* mapper = nullptr;
HRESULT hr;
IMultiSourceFrameReader* multiFrameReader = nullptr;
IMultiSourceFrame* multiFrame = nullptr;
IColorFrameReference* colorFrameReference = nullptr;
IDepthFrameReference* depthFrameReference = nullptr;
IColorFrame* colorFrame = nullptr;
IDepthFrame* depthFrame = nullptr;
IBodyFrameReference* bodyFrameReference = nullptr;
IBodyFrame* bodyFrame = nullptr;
IBodyFrameSource * bodySource = nullptr;
IBodyFrameReader * bodyReader = nullptr;
const int bodyCount = 6;
unsigned int sz;
unsigned short* buf;
int i;
IBody** myBodyArr = new IBody *[bodyCount]; //为存身体数据的数组做准备
//ColorSpacePoint* colorSpacePts = new ColorSpacePoint[depthSize]; // Maps depth pixels to rgb pixels
//CameraSpacePoint* cameraSpacePts = new CameraSpacePoint[depthSize]; // Maps depth pixels to 3d coordinates
float(*cameraPointZ)[rgbHeight] = new float[rgbWidth][rgbHeight];
int(*pointID)[rgbHeight] = new int[rgbWidth][rgbHeight];
hr = _sensor->get_CoordinateMapper(&mapper);
if (SUCCEEDED(hr))
{
hr = _sensor->OpenMultiSourceFrameReader(
FrameSourceTypes::FrameSourceTypes_Color |
FrameSourceTypes::FrameSourceTypes_Depth,
&multiFrameReader);
}
while (1)
{
while (true)
{
hr = multiFrameReader->AcquireLatestFrame(&multiFrame);
if (FAILED(hr)) {
continue;
}
else
{
if (SUCCEEDED(hr))
{
hr = multiFrame->get_DepthFrameReference(&depthFrameReference);
//cout << hr << ", " << "hr = multiFrame->get_DepthFrameReference(&depthFrameReference);" << endl;
}
if (SUCCEEDED(hr))
{
hr = depthFrameReference->AcquireFrame(&depthFrame);
//cout << hr << ", " << "hr = depthFrameReference->AcquireFrame(&depthFrame);" << endl;
}
if (SUCCEEDED(hr))
{
hr = depthFrame->CopyFrameDataToArray(depthHeight * depthWidth, reinterpret_cast<UINT16*>(_depthImg.data));
//cout << hr << ", " << "hr = depthFrame->CopyFrameDataToArray(424 * 512, reinterpret_cast<UINT16*>(depthImg.data));" << endl;
}
if (SUCCEEDED(hr))
{
hr = multiFrame->get_ColorFrameReference(&colorFrameReference);
//cout << hr << ", " << "hr = multiFrame->get_ColorFrameReference(&colorFrameReference);" << endl;
}
if (SUCCEEDED(hr))
{
hr = colorFrameReference->AcquireFrame(&colorFrame);
//cout << hr << ", " << "colorFrameReference->AcquireFrame(&colorFrame);" << endl;
}
if (SUCCEEDED(hr))
{
/*hr = colorFrame->CopyConvertedFrameDataToArray(1920 * 1080 * 4, reinterpret_cast<BYTE*>(rgbImg.data), ColorImageFormat::ColorImageFormat_Bgra);*/
hr = colorFrame->CopyConvertedFrameDataToArray(rgbWidth * rgbHeight * 4, _rgbImg.data, ColorImageFormat_Bgra);
//cout << hr << ", " << "hr = colorFrame->CopyConvertedFrameDataToArray(1920 * 1080 * 4, rgbImg.data, ColorImageFormat_Bgra);" << endl;
}
if (SUCCEEDED(hr))
{
hr = depthFrame->AccessUnderlyingBuffer(&sz, &buf);
//cout << hr << ", " << "depthFrame->AccessUnderlyingBuffer(&sz, &buf);" << endl;
}
bool mapdata = false;
//std::cout <<"here"<< std::endl;
if (SUCCEEDED(hr))
{
//std::cout <<"here"<< std::endl;
if (mapper->MapDepthFrameToCameraSpace(depthSize, buf, depthSize, cameraSpacePts) == S_OK &&
mapper->MapDepthFrameToColorSpace(depthSize, buf, depthSize, colorSpacePts) == S_OK)
{
//cout << hr << ", " << "mapper->MapDepthFrameToCameraSpace(depthSize, buf, depthSize, cameraSpacePts)" << endl;
mapdata= true;
}
}
//释放所有的frame
SafeRelease(multiFrame);
SafeRelease(colorFrame);
SafeRelease(bodyFrame);
SafeRelease(depthFrame);
SafeRelease(colorFrameReference);
SafeRelease(depthFrameReference);
SafeRelease(bodyFrameReference);
if (mapdata)
{
break;
}
}
}
std::cout << "Kinect data ready"<<std::endl;
std::memset(cameraPointZ, 0, rgbSize * sizeof(float));//注意这里的typesize
int num_sub(0), num_discard(0);
_cloud->clear();
for (i = 0; i < depthSize; i++)
{
ColorSpacePoint colorP = colorSpacePts[i];
CameraSpacePoint cameraP = cameraSpacePts[i];
PointRGB point;
if (colorP.X > 0 && colorP.X < rgbWidth && colorP.Y>0 && colorP.Y < rgbHeight)
{
if (cameraP.Z == INFINITY)
std::cout << i << std::endl;
//cout << cameraP.X << cameraP.Y << cameraP.Z << endl;
int x = (int)colorP.X / 4;
int y = (int)colorP.Y / 4;
if (cameraPointZ[x][y] == 0 || fabs(cameraP.Z - cameraPointZ[x][y]) < 0.05)
{
cameraPointZ[x][y] = cameraP.Z;
point.x = cameraP.X;
point.y = cameraP.Y;
point.z = cameraP.Z;
pointID[x][y] = _cloud->points.size();
//cout << "size:"<< cloud->size() << endl;
cv::Vec4b bgra = _rgbImg.at<cv::Vec4b>(colorP.Y, colorP.X);
point.b = bgra[0];
point.g = bgra[1];
point.r = bgra[2];
_cloud->push_back(point);
}
else
{
//cout << i << ": " << cameraP.Z << ", " << cameraPointZ[x][y] << endl;
if (cameraP.Z < cameraPointZ[x][y])
{
num_sub++;
cameraPointZ[x][y] = cameraP.Z;
int id = pointID[x][y];
/*cout << id << " " << cloud->at(id).z << endl;*/
_cloud->at(id).x = cameraP.X;
_cloud->at(id).y = cameraP.Y;
_cloud->at(id).z = cameraP.Z;
cv::Vec4b bgra = _rgbImg.at<cv::Vec4b>(colorP.Y, colorP.X);
_cloud->at(id).b = bgra[0];
_cloud->at(id).g = bgra[1];
_cloud->at(id).r = bgra[2];
}
else
num_discard++;
}
}
}
_cloud->height = 1;
_cloud->width = _cloud->points.size();
/*cout << "point cloud size = " << cloud->points.size() << endl;
cout << "Num substituted: " << num_sub << ", " << "num discarded: " << num_discard << endl;*/
_cloud->is_dense = false;
if (_cloud->size() == 0)
continue;
else
{
std::cout <<"cloud data has been collected successfully"<< std::endl;
break;
}
}
//ColorSpacePoint* colorSpacePts = new ColorSpacePoint[depthSize]; // Maps depth pixels to rgb pixels
//CameraSpacePoint* cameraSpacePts = new CameraSpacePoint[depthSize]; // Maps depth pixels to 3d coordinates
//delete[] colorSpacePts;
//delete[] cameraSpacePts;
delete[] cameraPointZ;
delete[] pointID;
delete[] myBodyArr;
}
main.cpp如下
#include <iostream>
#include "definition.h"
// 本程序使用Kinect v2传感器采集并保存彩色图与深度图,彩色图分辨率为1920*1080,
// 深度图分辨率为512*424,并使用Kinect SDK得到该帧点云数据并保存。
// 使用者可以根据Kinect相机内参,从得到的深度图与彩色图中自己生成点云数据。
// 主要涉及Kinect传感器的打开、采集与深度图彩色图映射。
int main()
{
KinectPointcloud kinectHandler=KinectPointcloud();
//缩小图像大小方便显示
cv::Mat rgbImg;
PointCloudRGB::Ptr cloud(new PointCloudRGB);
pcl::PCDWriter writer;
cv::Mat depthImg;
cv::Mat resized_rgbImg;
cv::Mat tmp;
//PointCloudRGB::Ptr cloud(new PointCloudRGB);
while (1)
{
kinectHandler.getPointcloudData();
rgbImg = kinectHandler.getRGBImg();
depthImg = kinectHandler.getDepthImg();
cloud = kinectHandler.getPointcloud();
depthImg.convertTo(tmp, CV_8U, -255.0f / 8000.0f, 255.0f);
cv::equalizeHist(tmp, tmp);//均衡化,为了提高显示效果
cv::imshow("depth", tmp);
//缩小图像大小方便显示
cv::resize(rgbImg, resized_rgbImg, cv::Size(rgbWidth / 2, rgbHeight / 2));
cv::imshow("rgb", resized_rgbImg);
if (cv::waitKey(30) >= 0)
{
break;
}
}
cv::destroyAllWindows();
return 0;
}
然后就可以愉快地采集点云数据,看到Kinect采集的图像了~
在python里读取pcd文件,参考:https://blog.csdn.net/qq_35565669/article/details/106687208