測量兩個匹配點之間的距離對測量目相機標定精度

相機標定注意事項

聽了上海交大的博士的相機標定直播,列舉了以下注意事項

  1. 拍攝15-20張圖片進行標定;
  2. 使用標準的標定板減小誤差,最好不要直接打印一張A4的棋盤格就去標定,由於張氏標定法假設z=0,A4紙粘在紙板上無法保證z=0,從標定處便引入了誤差,圓形標定板的精度比棋盤格高;
  3. 拍攝圖片中棋盤格的位置最好在中心和邊緣都有分佈;
  4. 標定精度可用重投影誤差計算,還可以從2D到3D投影特定點,比較點之間的距離。

使用邊長爲25mm的12*9的標準棋盤格標定相機,使用標定好的參數將兩對匹配點投影到3D控件,計算點與點之間的距離,並與實際的3D點的距離比較,從而得到精度。
以下程序選取棋盤格的距離最遠的兩個角點。

#include<opencv2/opencv.hpp>
#include<iostream>
using namespace cv;
using namespace std;


///雙目相機,y方向存在誤差
Mat cameraMatrixL = (Mat_<float>(3, 3) << 2324.328, 0, 338.492,
	0, 2325.87799, 268.76668, 0, 0, 1);
Mat distCoeffL = (Mat_<float>(5, 1) << -0.603, 1.3469, 0.0, 0.0, 0.00000);

Mat cameraMatrixR = (Mat_<float>(3, 3) << 2324.518, 0, 303.5,
	0, 2324.6, 274.7977, 0, 0, 1);
Mat distCoeffR = (Mat_<float>(5, 1) << -0.6034, 1.6096, 0.0,
	0.0, 0.00000);
//左右目之間的R,t可通過stereoCalibrate()或matlab工具箱calib求得
Mat T = (Mat_<float>(3, 1) << 0.2799, 23.91316, 7.2362);		//T平移向量
Mat R = (Mat_<float>(3, 3) << 1.0, 0.00064, 0.0024,
	-0.00064, 1.0, 0.0013, -0.0024,-0.0013, 1.0);


void reconstruct(Mat& K1, Mat& K2, Mat& R, Mat& T, vector<Point2f>& p1, vector<Point2f>& p2, Mat& structure)
{
	//兩個相機的投影矩陣[R T],triangulatePoints只支持float型
	//K1,K2是相機的內參矩陣
	Mat proj1(3, 4, CV_32FC1);
	Mat proj2(3, 4, CV_32FC1);

	proj1(Range(0, 3), Range(0, 3)) = Mat::eye(3, 3, CV_32FC1);
	proj1.col(3) = Mat::zeros(3, 1, CV_32FC1);

	R.convertTo(proj2(Range(0, 3), Range(0, 3)), CV_32FC1);
	T.convertTo(proj2.col(3), CV_32FC1);

	Mat fK1,fK2;
	K1.convertTo(fK1, CV_32FC1);
	K2.convertTo(fK2, CV_32FC1);

	proj1 = fK1 * proj1;
	proj2 = fK2 * proj2;

	//三角化重建
	triangulatePoints(proj1, proj2, p1, p2, structure);
}



int main()
{
	char left_path[100], right_path[100];
	sprintf_s(left_path, "images/left/left.txt");
	sprintf_s(right_path, "images/right/right.txt");
	ifstream finL(left_path);
	ifstream finR(right_path);
	string left_img, right_img;
	float num= 0;
	float total_dis=0;
	while (getline(finL,left_img)&& getline(finR, right_img))
	{
		left_img = "images/left/"+left_img;
		right_img = "images/right/" + right_img;

		Mat f1 = imread(left_img);
		Mat f2 = imread(right_img);	//左右圖讀入
		Mat undistorf1, undistorf2;
		undistort(f1, undistorf1, cameraMatrixL, distCoeffL);
		undistort(f2, undistorf2, cameraMatrixR, distCoeffR);//圖片去畸變

		vector<Point2f> p1, p2;
		vector<Point2f> p3, p4;
		//vector<Point2f> undistorp3, undistorp4;

		findChessboardCorners(undistorf1, Size(11, 8), p1);
		findChessboardCorners(undistorf2, Size(11, 8), p2);
		p3.push_back(p1[0]);
		p3.push_back(p1[p1.size() - 1]);
		p4.push_back(p2[0]);
		p4.push_back(p2[p2.size() - 1]);
		Mat structure;
		reconstruct(cameraMatrixL, cameraMatrixR, R, T, p3, p4, structure);
		int i = 0;
		vector<float> calculat;
		for (i = 0; i < structure.cols; i++)
		{
			Mat_<float> col = structure.col(i);
			col /= col(3);
		//	cout << "x:" << col(0) << endl << "y:" << col(1) << endl << "z:" << col(2) << endl;
			calculat.push_back(col(0));
			calculat.push_back(col(1));
			calculat.push_back(col(2));
		}
		float cal_dis = sqrtf(powf(calculat[0] - calculat[3], 2) + powf(calculat[1] - calculat[4], 2) +
			powf(calculat[2] - calculat[5], 2));
		calculat.clear();
		cout << "The calculated distance is " << cal_dis << "mm" << endl;
		//cout << "The real distance is " << 305.164 << "mm" << endl;
		total_dis=total_dis+cal_dis;
		num++;
	}
	float mean_dis = total_dis / num;
	cout << "The real distance is " << 305.164 << "mm" << endl;
	cout << "The mean calculated distance is " <<mean_dis << "mm" << endl;
	return 0;
}
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章