Kinect深度圖與彩色圖的座標校準

kinect的深度數據和彩色數據的分辨率以及視場大小都不一樣,不能直接對應起來,想要把深度和彩色融合起來就要費一番周折了。

看了MSDN中kinectSDK的介紹,發現了一個ICoordinateMapper的類,看介紹知道里面有一個成員函數MapDepthFrameToColorSpace可以實現深度圖和彩色圖的座標校準,於

想試試看,結果轉到定義處才發現這個成員函數是純虛函數!!白高興一場了,想了想也沒有找到一個合適的方法來更好的實現,於是就想手動校準好了。

接下來就是先用規則的長方體來測一下在深度圖和彩色圖中的同一邊長的比例,爲了測量更加準確,就把深度圖同一時間的深度圖和彩色圖給截圖下來,然後通過matlab來實現座標顯示,來得到同一位置長方體的同一邊長,通過求幾組數據的平均得到深度圖和彩色圖的比例,這樣,在使用opencv的resize函數便將深度圖和彩色圖的比例調整爲1:1了。最後通過將同一點在深度圖和彩色圖中的座標關係來將兩種圖的座標關係對應好,這樣便實現了座標的對應,通過opencv的ROI來將兩幅圖重合的部分裁剪下來,並且在同一幅畫面中顯示。


下面是一個效果圖:



/*****    Measurement of height by kinect            ******/
/*****    VisualStudio 2012 (開發工具)
		  OpenCV2.4.8 (顯示界面庫 vc11庫)
		  KinectSDK-v2.0-PublicPreview1409-Setup (Kinect SDK驅動版本)
		  Windows 8.1 (操作系統)                   ******/
/*****    shihz                                    ******/
/*****    2015-2-7                                ******/

#include"stdafx.h"
#include "opencv2/opencv.hpp"

#define Y 160
using namespace cv;

// Safe release for interfaces
template<class Interface>
inline void SafeRelease(Interface *& pInterfaceToRelease)
{
	if (pInterfaceToRelease != NULL)
	{
		pInterfaceToRelease->Release();
		pInterfaceToRelease = NULL;
	}
}

//定義Kinect方法類
class Kinect
{  
public:
	static const int        cDepthWidth  = 512;   //深度圖的大小
	static const int        cDepthHeight = 424;

	static const int        cColorWidth  = 1920;   //彩色圖的大小
	static const int        cColorHeight = 1080;
	Mat showImageDepth;

	HRESULT					InitKinect();//初始化Kinect
	void					UpdateDepth();//更新深度數據
	void					UpdateColor();//更新深度數據
	void					ProcessDepth(const UINT16* pBuffer, int nWidth, int nHeight, USHORT nMinDepth, USHORT nMaxDepth);   //處理得到的深度圖數據
	void					ProcessColor(RGBQUAD* pBuffer, int nWidth, int nHeight);   //處理得到的彩色圖數據

	Kinect();                                     //構造函數
	~Kinect();                                     //析構函數

private:
	
	IKinectSensor*          m_pKinectSensor;// Current Kinect
	IDepthFrameReader*      m_pDepthFrameReader;// Depth reader    在需要的時候可以再添加IColorFrameReader,進行color reader
	RGBQUAD*                m_pDepthRGBX;        
    IColorFrameReader*      m_pColorFrameReader;// Color reader
	RGBQUAD*                m_pColorRGBX;
};

int main()
{
    Kinect kinect;
	Mat showImageColor;
	kinect.InitKinect();
	while(1)
	    {
		   kinect.UpdateColor();                          //程序的中心內容,獲取數據並顯示
		   kinect.UpdateDepth();
		   if(waitKey(1) >= 0)//按下任意鍵退出
		      {
			     break;                                
		       }
	     }


	return 0;
}

Kinect::Kinect()
{
	m_pKinectSensor = NULL;
	m_pColorFrameReader = NULL;
	m_pDepthFrameReader = NULL;
	
	m_pDepthRGBX = new RGBQUAD[cDepthWidth * cDepthHeight];// create heap storage for color pixel data in RGBX format  ,開闢一個動態存儲區域
	m_pColorRGBX = new RGBQUAD[cColorWidth * cColorHeight];// create heap storage for color pixel data in RGBX format
}

Kinect::~Kinect()                        //定義析構函數
{
	if (m_pDepthRGBX)
	{
		delete [] m_pDepthRGBX;            //刪除動態存儲區域
		m_pDepthRGBX = NULL;
	}

	SafeRelease(m_pDepthFrameReader);// done with depth frame reader

	if (m_pColorRGBX)
	{
		delete [] m_pColorRGBX;
		m_pColorRGBX = NULL;
	}

	SafeRelease(m_pColorFrameReader);// done with color frame reader
	
	if (m_pKinectSensor)
	{
		m_pKinectSensor->Close();// close the Kinect Sensor
	}
	SafeRelease(m_pKinectSensor);
}

HRESULT	Kinect::InitKinect()            //定義初始化kinect函數
{
	HRESULT hr;                           //typedef long HRESULT

	hr = GetDefaultKinectSensor(&m_pKinectSensor);      //獲取默認的kinect,一般來說只有用一個kinect,所以默認的也就是唯一的那個
	if (FAILED(hr))                                //Failed這個函數的參數小於0的時候返回true else 返回false
	{
		return hr;
	}

	if (m_pKinectSensor)
	{
		// Initialize the Kinect and get the depth reader
		IDepthFrameSource* pDepthFrameSource = NULL;
        IColorFrameSource* pColorFrameSource = NULL;

		hr = m_pKinectSensor->Open();

		if (SUCCEEDED(hr))
		{
			hr = m_pKinectSensor->get_ColorFrameSource(&pColorFrameSource);
			hr = m_pKinectSensor->get_DepthFrameSource(&pDepthFrameSource);
		}

		if (SUCCEEDED(hr))
		{
			hr = pColorFrameSource->OpenReader(&m_pColorFrameReader);
			hr = pDepthFrameSource->OpenReader(&m_pDepthFrameReader);      //初始化Depth reader,傳入該IDepthReader的地址,讓該指針指向深度數據流
		}

		SafeRelease(pColorFrameSource);
		SafeRelease(pDepthFrameSource);                                   
	}

	if (!m_pKinectSensor || FAILED(hr))
	{
		printf("No ready Kinect found! \n");
		return E_FAIL;
	}

	return hr;
}

void Kinect::UpdateDepth()
{
	if (!m_pDepthFrameReader)
	{
		return;
	}

	IDepthFrame* pDepthFrame = NULL;

	HRESULT hr = m_pDepthFrameReader->AcquireLatestFrame(&pDepthFrame);

	if (SUCCEEDED(hr))
	{
		IFrameDescription* pFrameDescription = NULL;
		int nWidth = 0;
		int nHeight = 0;
		USHORT nDepthMinReliableDistance = 0;
		USHORT nDepthMaxDistance = 0;
		UINT nBufferSize = 0;
        UINT16 *pBuffer = NULL;

		if (SUCCEEDED(hr))
		{
			hr = pDepthFrame->get_FrameDescription(&pFrameDescription);
		}

		if (SUCCEEDED(hr))
		{
			hr = pFrameDescription->get_Width(&nWidth);
		}

		if (SUCCEEDED(hr))
		{
			hr = pFrameDescription->get_Height(&nHeight);
		}

		if (SUCCEEDED(hr))
		{
			hr = pDepthFrame->get_DepthMinReliableDistance(&nDepthMinReliableDistance);
		}

		if (SUCCEEDED(hr))
		{
			// In order to see the full range of depth (including the less reliable far field depth)
			// we are setting nDepthMaxDistance to the extreme potential depth threshold
			nDepthMaxDistance = USHRT_MAX;

			// Note:  If you wish to filter by reliable depth distance, uncomment the following line.
			// hr = pDepthFrame->get_DepthMaxReliableDistance(&nDepthMaxDistance);
		}

		if (SUCCEEDED(hr))
		{
			hr = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize, &pBuffer);            
		}

		if (SUCCEEDED(hr))
		{
			ProcessDepth( pBuffer, nWidth, nHeight, nDepthMinReliableDistance, nDepthMaxDistance);
		}

		SafeRelease(pFrameDescription);
	}

	SafeRelease(pDepthFrame);
}

void Kinect::UpdateColor()
{
	if (!m_pColorFrameReader)
	{
		return;
	}

	IColorFrame* pColorFrame = NULL;

	HRESULT hr = m_pColorFrameReader->AcquireLatestFrame(&pColorFrame);

	if (SUCCEEDED(hr))
	{
		IFrameDescription* pFrameDescription = NULL;
		int nWidth = 0;
		int nHeight = 0;
		ColorImageFormat imageFormat = ColorImageFormat_None;
		UINT nBufferSize = 0;
		RGBQUAD *pBuffer = NULL;

		if (SUCCEEDED(hr))
		{
			hr = pColorFrame->get_FrameDescription(&pFrameDescription);
		}

		if (SUCCEEDED(hr))
		{
			hr = pFrameDescription->get_Width(&nWidth);
		}

		if (SUCCEEDED(hr))
		{
			hr = pFrameDescription->get_Height(&nHeight);
		}

		if (SUCCEEDED(hr))
		{
			hr = pColorFrame->get_RawColorImageFormat(&imageFormat);
		}

		if (SUCCEEDED(hr))
		{
			if (imageFormat == ColorImageFormat_Bgra)
			{
				hr = pColorFrame->AccessRawUnderlyingBuffer(&nBufferSize, reinterpret_cast<BYTE**>(&pBuffer));
			}
			else if (m_pColorRGBX)
			{
				pBuffer = m_pColorRGBX;
				nBufferSize = cColorWidth * cColorHeight * sizeof(RGBQUAD);
				hr = pColorFrame->CopyConvertedFrameDataToArray(nBufferSize, reinterpret_cast<BYTE*>(pBuffer), ColorImageFormat_Bgra);            
			}
			else
			{
				hr = E_FAIL;
			}
		}

		if (SUCCEEDED(hr))
		{
			ProcessColor(pBuffer, nWidth, nHeight);
		}

		SafeRelease(pFrameDescription);
	}

	SafeRelease(pColorFrame);
}

void Kinect::ProcessDepth(const UINT16* pBuffer, int nWidth, int nHeight, USHORT nMinDepth, USHORT nMaxDepth  )
{
	// Make sure we've received valid data
	if (m_pDepthRGBX && pBuffer && (nWidth == cDepthWidth) && (nHeight == cDepthHeight))
	{
		RGBQUAD* pRGBX = m_pDepthRGBX;

		// end pixel is start + width*height - 1
		const UINT16* pBufferEnd = pBuffer + (nWidth * nHeight);

		while (pBuffer < pBufferEnd)
		{
			USHORT depth = *pBuffer;

			// To convert to a byte, we're discarding the most-significant
			// rather than least-significant bits.
			// We're preserving detail, although the intensity will "wrap."
			// Values outside the reliable depth range are mapped to 0 (black).

			// Note: Using conditionals in this loop could degrade performance.
			// Consider using a lookup table instead when writing production code.
			BYTE intensity = static_cast<BYTE>((depth >= nMinDepth) && (depth <= nMaxDepth) ? (depth % 256) : 0);     //深度數據由黑白圖像顯示//,每256個單位是一個有黑到白的週期


			pRGBX->rgbRed   = intensity;
		        pRGBX->rgbGreen = intensity; 
			pRGBX->rgbBlue  = intensity;

			++pRGBX;
			++pBuffer;
		}
			
		// Draw the data with OpenCV
		Mat DepthImage(nHeight, nWidth, CV_8UC4, m_pDepthRGBX);
		Mat show = DepthImage.clone();
		
		 resize(DepthImage, show, Size(cDepthWidth*1.437 , cDepthHeight*1.437));
		showImageDepth = show.clone();
		
		imshow("DepthImage", show);
	}
}

void Kinect::ProcessColor(RGBQUAD* pBuffer, int nWidth, int nHeight)
{ 
	// Make sure we've received valid data   
	if (pBuffer && (nWidth == cColorWidth) && (nHeight == cColorHeight))
	{
		// Draw the data with OpenCV
		Mat ColorImage(nHeight, nWidth, CV_8UC4, pBuffer);
		Mat showImage = ColorImage.clone();
		resize(ColorImage, showImage, Size(nWidth/2 , nHeight/2));
		Rect rect(145,0,702,538);
		int x = 33,y =-145; 
		//CV_8UC4 
		for(int i = 0;i <540;i++)
			for(int j = 145;j < 960-114;j++)
				showImage.at<Vec4b>(i,j) = showImageDepth.at<Vec4b>(i+x,j+y)*0.6+showImage.at<Vec4b>(i,j)*0.4;  
			Mat image_roi = showImage(rect);
		//imshow("ColorImage", showImage);//imshow("ColorImage", ColorImage);
		imshow("Image_Roi", image_roi);
	}
}


發佈了31 篇原創文章 · 獲贊 19 · 訪問量 16萬+
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章