基于matlab标定数据,使用opencv实现双目立体摄像头的标定(源代码)

我参考了这几位大牛的博客,受益匪浅:

http://blog.csdn.net/sunanger_wang/article/details/7744025

http://www.opencv.org.cn/forum.php?mod=viewthread&tid=12963

http://blog.csdn.net/scyscyao/article/details/5562024

http://blog.csdn.net/chenyusiyuan/article/details/5967291

最后那位有连续三篇的内容进行了详细的介绍,非常棒。


由于看到那些博客下面的一些问题都没人回答,那些算是细节,但大家知道有时候就是这么一点小时会卡很久。

再好的解释也没源代码和示例图来的快,小弟就把这几天摸索的内容放上来。

我的平台:2个罗技C170摄像头


嘿嘿。。比较简陋



matlab标定:

    相信仔细看过原理以及上面的博客就能用matlab计算出双目的结果,这儿有一点要注意,就是要计算双目参数,左右两个摄像头必须同时拍摄棋盘格,一共20张!我一开始是单独拍摄,后来发现单目的数据可以标定出来,双目的就会报错,,想想原理也是,双目标定必须是同时拍摄的才行呀。这儿贴一段自己写的同时采集图像的代码,按下c,就能保存图像。

#include <opencv2/opencv.hpp>  
#include <iostream>  
using namespace std;  

int pic_num = 13;
char* name_l = (char*)malloc(sizeof(char)*200);
char* name_r = (char*)malloc(sizeof(char)*200);
int main()  
{  
	cout << "测试两个摄像头同时读取数据" << endl;  

	CvCapture* cap_left;  
	CvCapture* cap_right;  

	IplImage *img0 , *img1;  
	IplImage *img_left, *img_right;  
	IplImage *img_left_Change, *img_right_Change;  
	cvNamedWindow("camera_left");  
	cvNamedWindow("camera_right");  


	cap_left = cvCreateCameraCapture(1);  
	assert(cap_left != NULL);  
	cap_right = cvCreateCameraCapture(0);  
	assert(cap_right != NULL);  

	while(1)  
	{  
		cvGrabFrame(cap_left);
		cvGrabFrame(cap_right);

		img0 = cvRetrieveFrame(cap_left);//img0 1 只是指针,这个可以在单独的采图线程里刷新
		img1 = cvRetrieveFrame(cap_right);

		if( !img0 || !img1)
		{  
			cout << "camera0 error" << endl;  
			break;  
		}  

		img_left = cvCloneImage(img0);
		img_right = cvCloneImage(img1);

		cvShowImage("camera_left", img_left);  
		cvShowImage("camera_right", img_right);  

		char c = cvWaitKey(33);  

		if(c == 27)   
			break;  
		if(c == 'c')
		{
			sprintf(name_l, "leftPic%d.jpg", pic_num);
			sprintf(name_r, "rightPic%d.jpg", pic_num);
			pic_num++;

			cvSaveImage(name_l, img_left);
			cvSaveImage(name_r, img_right);

		}
		cvReleaseImage(&img_left);
		cvReleaseImage(&img_right);
	}  
	cvReleaseCapture(&cap_left);  
	cvReleaseCapture(&cap_right);  
 
	cvDestroyWindow("camera_left");  
	cvDestroyWindow("camera_right");  
 
	return 0;  

}  
顺便唠叨一句,在循环里cvReleaseImage(&img_left);cvReleaseImage(&img_right);必须执行,否则会出现内存泄露。。不信的话就打开资源管理器,注释掉这两句程序运行时就看得到占用内存在不断上升!


opencv上场:

因为大多数人都推荐使用matlab标定箱,那下一步就是将标定的数据存到xml文件里。xml不了解的请自行百度。

由于有些人一直在问怎么写文件,我这儿就把所有要自己写的文件都贴出来:

Intrinsics_Camera_Left.xml:

<?xml version="1.0"?>
<opencv_storage>
<Intrinsics_Camera_Left type_id="opencv-matrix">
  <rows>3</rows>
  <cols>3</cols>
  <dt>d</dt>
  <data>
  690.09102   0.         325.38988 
  0.          686.25294  286.9196 
  0.          0.         1.
</data>
</Intrinsics_Camera_Left>
</opencv_storage>

Intrinsics_Camera_Right.xml:

<?xml version="1.0"?>
<opencv_storage>
<Intrinsics_Camera_Right type_id="opencv-matrix">
  <rows>3</rows>
  <cols>3</cols>
  <dt>d</dt>
  <data>
  691.94410   0.          342.74569 
  0.          685.94161   231.16984 
  0.          0.          1.
  </data>
</Intrinsics_Camera_Right>
</opencv_storage>

Distortion_Camera_Left.xml:

<?xml version="1.0"?>
<opencv_storage>
<Distortion type_id="opencv-matrix">
  <rows>5</rows>
  <cols>1</cols>
  <dt>d</dt>
  <data>
    -0.02240   -0.05900   0.00894   -0.00590  0.00000
</data>
</Distortion>
</opencv_storage>

Distortion_Camera_Right.xml:

<?xml version="1.0"?>
<opencv_storage>
<Distortion type_id="opencv-matrix">
  <rows>5</rows>
  <cols>1</cols>
  <dt>d</dt>
  <data>
    0.05543   -0.29862   -0.00669   0.01307  0.00000
</data>
</Distortion>
</opencv_storage>

Translation.xml

<?xml version="1.0"?>
<opencv_storage>
<Translation type_id="opencv-matrix">
  <rows>3</rows>
  <cols>1</cols>
  <dt>d</dt>
  <data>
   -194.10083   6.39147  -11.45062
</data>
</Translation>
</opencv_storage>

RotRodrigues.xml

<?xml version="1.0"?>
<opencv_storage>
<RotRodrigues type_id="opencv-matrix">
  <rows>3</rows>
  <cols>1</cols>
  <dt>d</dt>
  <data>
   -0.06416   -0.11879  -0.07601
</data>
</RotRodrigues>
</opencv_storage>


将这些分别保存下来,里面的内容是根据matlab的结果写进去的。


下一步就是立体图像矫正了。具体原理请看之前的博客,非常详细!

小弟这儿就贴一下代码,让各位能更好地理解:

#include <opencv2/opencv.hpp>  
#include <iostream>  

using namespace std;  

int main()  
{  
	cout << "测试两个摄像头同时读取数据" << endl;  

	//读取内部参数
	CvMat *Intrinsics_Camera_Left = (CvMat *)cvLoad("Intrinsics_Camera_Left.xml");
	CvMat *Intrinsics_Camera_Right = (CvMat *)cvLoad("Intrinsics_Camera_Right.xml");
	CvMat *Distortion_Camera_Left = (CvMat *)cvLoad("Distortion_Camera_Left.xml");
        CvMat *Distortion_Camera_Right = (CvMat *)cvLoad("Distortion_Camera_Right.xml");
	CvMat *Translation_matlab = (CvMat *)cvLoad("Translation.xml");
	CvMat *RotRodrigues_matlab = (CvMat *)cvLoad("RotRodrigues.xml");
	CvMat *R_opencv = cvCreateMat(3, 3, CV_64F);
	cvRodrigues2(RotRodrigues_matlab, R_opencv);

	//创建映射阵
	IplImage *Left_Mapx = cvCreateImage(cvSize(640,480), IPL_DEPTH_32F, 1);
	IplImage *Left_Mapy = cvCreateImage(cvSize(640,480), IPL_DEPTH_32F, 1);
	IplImage *Right_Mapx = cvCreateImage(cvSize(640,480), IPL_DEPTH_32F, 1);
	IplImage *Right_Mapy = cvCreateImage(cvSize(640,480), IPL_DEPTH_32F, 1);
	CvMat *Rl = cvCreateMat(3, 3, CV_64F);
	CvMat *Rr = cvCreateMat(3, 3, CV_64F);
	CvMat *Pl = cvCreateMat(3, 4, CV_64F);
	CvMat *Pr = cvCreateMat(3, 4, CV_64F);
	//cvStereoRectify(Intrinsics_Camera_Left, Intrinsics_Camera_Right, \
		            Distortion_Camera_Left, Distortion_Camera_Right, \
					cvSize(640, 480), R_opencv, Translation_matlab,  \
					Rl, Rr, Pl, Pr);
	cvStereoRectify(Intrinsics_Camera_Left, Intrinsics_Camera_Right, \
		Distortion_Camera_Left, Distortion_Camera_Right, \
		cvSize(640, 480), R_opencv, Translation_matlab,  \
		Rl, Rr, Pl, Pr, 0, 1024, 0);//增加图像缩放,去除死区
	cvInitUndistortRectifyMap(Intrinsics_Camera_Left, Distortion_Camera_Left, Rl, Pl, \
		                      Left_Mapx, Left_Mapy);
	cvInitUndistortRectifyMap(Intrinsics_Camera_Right, Distortion_Camera_Right, Rr, Pr, \
		                      Right_Mapx, Right_Mapy);

	CvCapture* cap_left;  
	CvCapture* cap_right;  

	IplImage *img0 , *img1;  
	IplImage *img_left, *img_right;  
	IplImage *img_left_Change, *img_right_Change;  
	cvNamedWindow("camera_left");  
	cvNamedWindow("camera_right");  

	cap_left = cvCreateCameraCapture(1);  
	assert(cap_left != NULL);  
	cap_right = cvCreateCameraCapture(0);  
	assert(cap_right != NULL);  

	while(1)  
	{  
		cvGrabFrame(cap_left);
		cvGrabFrame(cap_right);

		img0 = cvRetrieveFrame(cap_left);
		img1 = cvRetrieveFrame(cap_right);

		if( !img0 || !img1)
		{  
			cout << "camera0 error" << endl;  
			break;  
		}  

		img_left = cvCloneImage(img0);
		img_right = cvCloneImage(img1);
		img_left_Change = cvCloneImage(img0);
		img_right_Change = cvCloneImage(img1);
		cvRemap(img_left, img_left_Change, Left_Mapx, Left_Mapy);
		cvRemap(img_right, img_right_Change, Right_Mapx, Right_Mapy);

		cvLine(img_left_Change, cvPoint(0,48), cvPoint(640-1, 48), cvScalar(255, 0, 0));
		cvLine(img_left_Change, cvPoint(0,48*2), cvPoint(640-1, 48*2), cvScalar(255, 0, 0));
		cvLine(img_left_Change, cvPoint(0,48*3), cvPoint(640-1, 48*3), cvScalar(255, 0, 0));
		cvLine(img_left_Change, cvPoint(0,48*4), cvPoint(640-1, 48*4), cvScalar(255, 0, 0));
		cvLine(img_left_Change, cvPoint(0,48*5), cvPoint(640-1, 48*5), cvScalar(255, 0, 0));
		cvLine(img_left_Change, cvPoint(0,48*6), cvPoint(640-1, 48*6), cvScalar(255, 0, 0));
		cvLine(img_left_Change, cvPoint(0,48*7), cvPoint(640-1, 48*7), cvScalar(255, 0, 0));
		cvLine(img_left_Change, cvPoint(0,48*8), cvPoint(640-1, 48*8), cvScalar(255, 0, 0));
		cvLine(img_left_Change, cvPoint(0,48*9), cvPoint(640-1, 48*9), cvScalar(255, 0, 0));
		cvLine(img_right_Change, cvPoint(0,48), cvPoint(640-1, 48), cvScalar(255, 0, 0));
		cvLine(img_right_Change, cvPoint(0,48*2), cvPoint(640-1, 48*2), cvScalar(255, 0, 0));
		cvLine(img_right_Change, cvPoint(0,48*3), cvPoint(640-1, 48*3), cvScalar(255, 0, 0));
		cvLine(img_right_Change, cvPoint(0,48*4), cvPoint(640-1, 48*4), cvScalar(255, 0, 0));
		cvLine(img_right_Change, cvPoint(0,48*5), cvPoint(640-1, 48*5), cvScalar(255, 0, 0));
		cvLine(img_right_Change, cvPoint(0,48*6), cvPoint(640-1, 48*6), cvScalar(255, 0, 0));
		cvLine(img_right_Change, cvPoint(0,48*7), cvPoint(640-1, 48*7), cvScalar(255, 0, 0));
		cvLine(img_right_Change, cvPoint(0,48*8), cvPoint(640-1, 48*8), cvScalar(255, 0, 0));
		cvLine(img_right_Change, cvPoint(0,48*9), cvPoint(640-1, 48*9), cvScalar(255, 0, 0));

		cvShowImage("camera_left", img_left);  
		cvShowImage("camera_right", img_right);  
		cvShowImage("camera_left_Change", img_left_Change);  
		cvShowImage("camera_right_Change", img_right_Change);  

		char c = cvWaitKey(33);  
		if(c == 27)   
			break;  

		cvReleaseImage(&img_left);
		cvReleaseImage(&img_right);
		cvReleaseImage(&img_left_Change);
		cvReleaseImage(&img_right_Change);

	}  

	cvReleaseCapture(&cap_left);  
	cvReleaseCapture(&cap_right);  
	cvDestroyWindow("camera_left");  
	cvDestroyWindow("camera_right");  
 
	return 0;  

}  


cvStereoRectify可以在最后加上参数,去除黑色的死区。





有些博客里提到位移向量为什么是负的,并且说要修改为正数,比如我这儿是<pre name="code" class="html"> -194.10083   6.39147  -11.45062

第一项要改为194.10083 ,我不是很理解,程序里没有修改,但是结果还是正确的。

最后再次感谢上面提到的各位博主!

</pre><pre name="code" class="cpp">

发布了36 篇原创文章 · 获赞 18 · 访问量 8万+
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章