基於matlab標定數據,使用opencv實現雙目立體攝像頭的標定(源代碼)

我參考了這幾位大牛的博客,受益匪淺:

http://blog.csdn.net/sunanger_wang/article/details/7744025

http://www.opencv.org.cn/forum.php?mod=viewthread&tid=12963

http://blog.csdn.net/scyscyao/article/details/5562024

http://blog.csdn.net/chenyusiyuan/article/details/5967291

最後那位有連續三篇的內容進行了詳細的介紹,非常棒。


由於看到那些博客下面的一些問題都沒人回答,那些算是細節,但大家知道有時候就是這麼一點小時會卡很久。

再好的解釋也沒源代碼和示例圖來的快,小弟就把這幾天摸索的內容放上來。

我的平臺:2個羅技C170攝像頭


嘿嘿。。比較簡陋



matlab標定:

    相信仔細看過原理以及上面的博客就能用matlab計算出雙目的結果,這兒有一點要注意,就是要計算雙目參數,左右兩個攝像頭必須同時拍攝棋盤格,一共20張!我一開始是單獨拍攝,後來發現單目的數據可以標定出來,雙目的就會報錯,,想想原理也是,雙目標定必須是同時拍攝的才行呀。這兒貼一段自己寫的同時採集圖像的代碼,按下c,就能保存圖像。

#include <opencv2/opencv.hpp>  
#include <iostream>  
using namespace std;  

int pic_num = 13;
char* name_l = (char*)malloc(sizeof(char)*200);
char* name_r = (char*)malloc(sizeof(char)*200);
int main()  
{  
	cout << "測試兩個攝像頭同時讀取數據" << endl;  

	CvCapture* cap_left;  
	CvCapture* cap_right;  

	IplImage *img0 , *img1;  
	IplImage *img_left, *img_right;  
	IplImage *img_left_Change, *img_right_Change;  
	cvNamedWindow("camera_left");  
	cvNamedWindow("camera_right");  


	cap_left = cvCreateCameraCapture(1);  
	assert(cap_left != NULL);  
	cap_right = cvCreateCameraCapture(0);  
	assert(cap_right != NULL);  

	while(1)  
	{  
		cvGrabFrame(cap_left);
		cvGrabFrame(cap_right);

		img0 = cvRetrieveFrame(cap_left);//img0 1 只是指針,這個可以在單獨的採圖線程裏刷新
		img1 = cvRetrieveFrame(cap_right);

		if( !img0 || !img1)
		{  
			cout << "camera0 error" << endl;  
			break;  
		}  

		img_left = cvCloneImage(img0);
		img_right = cvCloneImage(img1);

		cvShowImage("camera_left", img_left);  
		cvShowImage("camera_right", img_right);  

		char c = cvWaitKey(33);  

		if(c == 27)   
			break;  
		if(c == 'c')
		{
			sprintf(name_l, "leftPic%d.jpg", pic_num);
			sprintf(name_r, "rightPic%d.jpg", pic_num);
			pic_num++;

			cvSaveImage(name_l, img_left);
			cvSaveImage(name_r, img_right);

		}
		cvReleaseImage(&img_left);
		cvReleaseImage(&img_right);
	}  
	cvReleaseCapture(&cap_left);  
	cvReleaseCapture(&cap_right);  
 
	cvDestroyWindow("camera_left");  
	cvDestroyWindow("camera_right");  
 
	return 0;  

}  
順便嘮叨一句,在循環裏cvReleaseImage(&img_left);cvReleaseImage(&img_right);必須執行,否則會出現內存泄露。。不信的話就打開資源管理器,註釋掉這兩句程序運行時就看得到佔用內存在不斷上升!


opencv上場:

因爲大多數人都推薦使用matlab標定箱,那下一步就是將標定的數據存到xml文件裏。xml不瞭解的請自行百度。

由於有些人一直在問怎麼寫文件,我這兒就把所有要自己寫的文件都貼出來:

Intrinsics_Camera_Left.xml:

<?xml version="1.0"?>
<opencv_storage>
<Intrinsics_Camera_Left type_id="opencv-matrix">
  <rows>3</rows>
  <cols>3</cols>
  <dt>d</dt>
  <data>
  690.09102   0.         325.38988 
  0.          686.25294  286.9196 
  0.          0.         1.
</data>
</Intrinsics_Camera_Left>
</opencv_storage>

Intrinsics_Camera_Right.xml:

<?xml version="1.0"?>
<opencv_storage>
<Intrinsics_Camera_Right type_id="opencv-matrix">
  <rows>3</rows>
  <cols>3</cols>
  <dt>d</dt>
  <data>
  691.94410   0.          342.74569 
  0.          685.94161   231.16984 
  0.          0.          1.
  </data>
</Intrinsics_Camera_Right>
</opencv_storage>

Distortion_Camera_Left.xml:

<?xml version="1.0"?>
<opencv_storage>
<Distortion type_id="opencv-matrix">
  <rows>5</rows>
  <cols>1</cols>
  <dt>d</dt>
  <data>
    -0.02240   -0.05900   0.00894   -0.00590  0.00000
</data>
</Distortion>
</opencv_storage>

Distortion_Camera_Right.xml:

<?xml version="1.0"?>
<opencv_storage>
<Distortion type_id="opencv-matrix">
  <rows>5</rows>
  <cols>1</cols>
  <dt>d</dt>
  <data>
    0.05543   -0.29862   -0.00669   0.01307  0.00000
</data>
</Distortion>
</opencv_storage>

Translation.xml

<?xml version="1.0"?>
<opencv_storage>
<Translation type_id="opencv-matrix">
  <rows>3</rows>
  <cols>1</cols>
  <dt>d</dt>
  <data>
   -194.10083   6.39147  -11.45062
</data>
</Translation>
</opencv_storage>

RotRodrigues.xml

<?xml version="1.0"?>
<opencv_storage>
<RotRodrigues type_id="opencv-matrix">
  <rows>3</rows>
  <cols>1</cols>
  <dt>d</dt>
  <data>
   -0.06416   -0.11879  -0.07601
</data>
</RotRodrigues>
</opencv_storage>


將這些分別保存下來,裏面的內容是根據matlab的結果寫進去的。


下一步就是立體圖像矯正了。具體原理請看之前的博客,非常詳細!

小弟這兒就貼一下代碼,讓各位能更好地理解:

#include <opencv2/opencv.hpp>  
#include <iostream>  

using namespace std;  

int main()  
{  
	cout << "測試兩個攝像頭同時讀取數據" << endl;  

	//讀取內部參數
	CvMat *Intrinsics_Camera_Left = (CvMat *)cvLoad("Intrinsics_Camera_Left.xml");
	CvMat *Intrinsics_Camera_Right = (CvMat *)cvLoad("Intrinsics_Camera_Right.xml");
	CvMat *Distortion_Camera_Left = (CvMat *)cvLoad("Distortion_Camera_Left.xml");
        CvMat *Distortion_Camera_Right = (CvMat *)cvLoad("Distortion_Camera_Right.xml");
	CvMat *Translation_matlab = (CvMat *)cvLoad("Translation.xml");
	CvMat *RotRodrigues_matlab = (CvMat *)cvLoad("RotRodrigues.xml");
	CvMat *R_opencv = cvCreateMat(3, 3, CV_64F);
	cvRodrigues2(RotRodrigues_matlab, R_opencv);

	//創建映射陣
	IplImage *Left_Mapx = cvCreateImage(cvSize(640,480), IPL_DEPTH_32F, 1);
	IplImage *Left_Mapy = cvCreateImage(cvSize(640,480), IPL_DEPTH_32F, 1);
	IplImage *Right_Mapx = cvCreateImage(cvSize(640,480), IPL_DEPTH_32F, 1);
	IplImage *Right_Mapy = cvCreateImage(cvSize(640,480), IPL_DEPTH_32F, 1);
	CvMat *Rl = cvCreateMat(3, 3, CV_64F);
	CvMat *Rr = cvCreateMat(3, 3, CV_64F);
	CvMat *Pl = cvCreateMat(3, 4, CV_64F);
	CvMat *Pr = cvCreateMat(3, 4, CV_64F);
	//cvStereoRectify(Intrinsics_Camera_Left, Intrinsics_Camera_Right, \
		            Distortion_Camera_Left, Distortion_Camera_Right, \
					cvSize(640, 480), R_opencv, Translation_matlab,  \
					Rl, Rr, Pl, Pr);
	cvStereoRectify(Intrinsics_Camera_Left, Intrinsics_Camera_Right, \
		Distortion_Camera_Left, Distortion_Camera_Right, \
		cvSize(640, 480), R_opencv, Translation_matlab,  \
		Rl, Rr, Pl, Pr, 0, 1024, 0);//增加圖像縮放,去除死區
	cvInitUndistortRectifyMap(Intrinsics_Camera_Left, Distortion_Camera_Left, Rl, Pl, \
		                      Left_Mapx, Left_Mapy);
	cvInitUndistortRectifyMap(Intrinsics_Camera_Right, Distortion_Camera_Right, Rr, Pr, \
		                      Right_Mapx, Right_Mapy);

	CvCapture* cap_left;  
	CvCapture* cap_right;  

	IplImage *img0 , *img1;  
	IplImage *img_left, *img_right;  
	IplImage *img_left_Change, *img_right_Change;  
	cvNamedWindow("camera_left");  
	cvNamedWindow("camera_right");  

	cap_left = cvCreateCameraCapture(1);  
	assert(cap_left != NULL);  
	cap_right = cvCreateCameraCapture(0);  
	assert(cap_right != NULL);  

	while(1)  
	{  
		cvGrabFrame(cap_left);
		cvGrabFrame(cap_right);

		img0 = cvRetrieveFrame(cap_left);
		img1 = cvRetrieveFrame(cap_right);

		if( !img0 || !img1)
		{  
			cout << "camera0 error" << endl;  
			break;  
		}  

		img_left = cvCloneImage(img0);
		img_right = cvCloneImage(img1);
		img_left_Change = cvCloneImage(img0);
		img_right_Change = cvCloneImage(img1);
		cvRemap(img_left, img_left_Change, Left_Mapx, Left_Mapy);
		cvRemap(img_right, img_right_Change, Right_Mapx, Right_Mapy);

		cvLine(img_left_Change, cvPoint(0,48), cvPoint(640-1, 48), cvScalar(255, 0, 0));
		cvLine(img_left_Change, cvPoint(0,48*2), cvPoint(640-1, 48*2), cvScalar(255, 0, 0));
		cvLine(img_left_Change, cvPoint(0,48*3), cvPoint(640-1, 48*3), cvScalar(255, 0, 0));
		cvLine(img_left_Change, cvPoint(0,48*4), cvPoint(640-1, 48*4), cvScalar(255, 0, 0));
		cvLine(img_left_Change, cvPoint(0,48*5), cvPoint(640-1, 48*5), cvScalar(255, 0, 0));
		cvLine(img_left_Change, cvPoint(0,48*6), cvPoint(640-1, 48*6), cvScalar(255, 0, 0));
		cvLine(img_left_Change, cvPoint(0,48*7), cvPoint(640-1, 48*7), cvScalar(255, 0, 0));
		cvLine(img_left_Change, cvPoint(0,48*8), cvPoint(640-1, 48*8), cvScalar(255, 0, 0));
		cvLine(img_left_Change, cvPoint(0,48*9), cvPoint(640-1, 48*9), cvScalar(255, 0, 0));
		cvLine(img_right_Change, cvPoint(0,48), cvPoint(640-1, 48), cvScalar(255, 0, 0));
		cvLine(img_right_Change, cvPoint(0,48*2), cvPoint(640-1, 48*2), cvScalar(255, 0, 0));
		cvLine(img_right_Change, cvPoint(0,48*3), cvPoint(640-1, 48*3), cvScalar(255, 0, 0));
		cvLine(img_right_Change, cvPoint(0,48*4), cvPoint(640-1, 48*4), cvScalar(255, 0, 0));
		cvLine(img_right_Change, cvPoint(0,48*5), cvPoint(640-1, 48*5), cvScalar(255, 0, 0));
		cvLine(img_right_Change, cvPoint(0,48*6), cvPoint(640-1, 48*6), cvScalar(255, 0, 0));
		cvLine(img_right_Change, cvPoint(0,48*7), cvPoint(640-1, 48*7), cvScalar(255, 0, 0));
		cvLine(img_right_Change, cvPoint(0,48*8), cvPoint(640-1, 48*8), cvScalar(255, 0, 0));
		cvLine(img_right_Change, cvPoint(0,48*9), cvPoint(640-1, 48*9), cvScalar(255, 0, 0));

		cvShowImage("camera_left", img_left);  
		cvShowImage("camera_right", img_right);  
		cvShowImage("camera_left_Change", img_left_Change);  
		cvShowImage("camera_right_Change", img_right_Change);  

		char c = cvWaitKey(33);  
		if(c == 27)   
			break;  

		cvReleaseImage(&img_left);
		cvReleaseImage(&img_right);
		cvReleaseImage(&img_left_Change);
		cvReleaseImage(&img_right_Change);

	}  

	cvReleaseCapture(&cap_left);  
	cvReleaseCapture(&cap_right);  
	cvDestroyWindow("camera_left");  
	cvDestroyWindow("camera_right");  
 
	return 0;  

}  


cvStereoRectify可以在最後加上參數,去除黑色的死區。





有些博客裏提到位移向量爲什麼是負的,並且說要修改爲正數,比如我這兒是<pre name="code" class="html"> -194.10083   6.39147  -11.45062

第一項要改爲194.10083 ,我不是很理解,程序裏沒有修改,但是結果還是正確的。

最後再次感謝上面提到的各位博主!

</pre><pre name="code" class="cpp">

發佈了36 篇原創文章 · 獲贊 18 · 訪問量 8萬+
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章