基於opencv下的雙目視覺三維重建與重投影殘差計算

一、重投影殘差

1.1基本概念

重投影殘差,按照字面意思解釋就是在第二次重新投影的二維像素點與第一次投影得到的像素點之間的殘差(歐式距離)大小,殘差的大小反映的就是測量儀器的精度以及外界因素對測量結果的干擾,即測量誤差的大小。

如下圖所示:

假設P是世界座標系中的一個三維點(真實物點),第一次投影指的就是雙目相機C1、C2拍攝時,P點投影到二維像平面上所捕獲到的圖像點,即圖中的P1和P2點;然後利用雙目視覺原理,針對P1、P2這對匹配點以及標定求出的內外、畸變參數等等,可以求出一個三維點(理論值);然後利用求出來的三維點與相機的位姿關係進行第二次投影得到P’2點;一般用重投影點P’2與獲取的二維像點P2的歐式距離e值的大小反映的就是重投影誤差的大小。
在這裏插入圖片描述

二、關鍵座標系之間的轉換

2.1 像素座標系與圖像座標系

在這裏插入圖片描述
(u , v)表示的是像素座標,單位Pixel;
(x , y)表示的是圖像座標,單位mm;
(u0 , v0)表示的是光心位置,有時候表示爲(Cx,Cy);
dx , dy分別表示的是每個像素在橫縱座標的實際物理尺寸,單位mm;

2.2 圖像座標系與相機座標系

在這裏插入圖片描述
在這裏插入圖片描述
以上公式最後的f,根據標定數據,在x和y方向分別以fx和fy代入,注意這裏的fx和fy表示的是實際物理長度,單位mm,轉換過程(三維座標->像素座標)如下圖所示:
在這裏插入圖片描述

2.3 殘差計算

(u1,v1)記爲獲取的二維圖像的匹配點,通過畸變矯正(undistortPoints())之後的結果設爲(x,y),設(u,v)爲矯正之後的像素座標
u = x * fx + Cx
v = y * fy + Cy
最後將得到的(u,v)和上式中的(u’,v’)做歐式計算,結果爲殘差值的大小

code:

// NewPointClouds.cpp: 定義控制檯應用程序的入口點。
//
// MatchExperiment.cpp: 定義控制檯應用程序的入口點。
//

#include "stdafx.h"
#include"iostream"
#include"opencv.hpp"
#include"highgui.hpp"
#include"xfeatures2d.hpp"

#define La1 -0.0765583
#define La2 1.12963
#define La3 -0.000478566
#define La4 -0.00934149   //左相機的四個畸變參數
#define Ra1 -0.0564752
#define Ra2 0.598481
#define Ra3 0.00219212
#define Ra4 -0.000801994  //右相機的四個畸變參數
#define Lcx 563.737           
#define Lcy 449.899    //左相機的光心位置
#define Rcx 638.157
#define Rcy 478.782   //右相機的光心位置
#define fx1 3317.41
#define fy1 3318.89   //左相機的焦距
#define fx2 3346.03
#define fy2 3347.61   //右相機的焦距

using namespace cv;
using namespace std;

int main()
{
	Mat imageL0 = imread("Pattenimage_L.bmp");
	Mat imageR0 = imread("Pattenimage_R.bmp");
	//Mat imageL1, imageR1;
	//GaussianBlur(imageL0, imageL1, Size(3, 3), 0.5);
	//GaussianBlur(imageR0, imageR1, Size(3, 3), 0.5);
	ofstream file,debug;
	file.open("PointClouds.txt");
	debug.open("debug.txt");

	Mat dist1 = (Mat_<float>(5, 1) << La1, La2 ,La3 , La4 , 0); //左相機畸變參數(k1,k2,p1,p2,k3)
	Mat dist2 = (Mat_<float>(5, 1) << Ra1, Ra2, Ra3, Ra4, 0); //右相機畸變參數

	double m1[3][3] = { { fx1,0,Lcx },{ 0,fy1,Lcy },{ 0,0,1 } };
	Mat m1_matrix(Size(3, 3), CV_64F, m1);  //左相機的內參矩陣
								
	double m2[3][3] = { { fx2,0,Rcx },{ 0,fy2,Rcy },{ 0,0,1 } };
	Mat m2_matrix(Size(3, 3), CV_64F, m2);  //右相機的內參矩陣
	float r2[3][1] = { { 0.00778951 },{ -0.121633 },{ 0.0150494 } }; 
	Mat r2_src2(Size(1, 3), CV_32F, r2);
	Mat r2_matrix(Size(3, 3), CV_64F); //右相機旋轉向量
	Rodrigues(r2_src2, r2_matrix);   //旋轉向量轉化爲旋轉矩陣
	Mat t2_matrix = (Mat_<double>(3, 1) << 105.017, 2.22392, 19.288);   //右相機平移向量

	float M1[3][3] = { { fx1,0,Lcx },{ 0,fy1,Lcy },{ 0,0,1 } };
	Mat M1_matrix(Size(3, 3), CV_32F, M1);  //左相機的內參矩陣

	float M2[3][3] = { { fx2,0,Rcx },{ 0,fy2,Rcy },{ 0,0,1 } };
	Mat M2_matrix(Size(3, 3), CV_32F, M2);  //右相機的內參矩陣

	Mat R1 = Mat::eye(3, 3, CV_32F);     //左相機圖像畸變矯正
	Mat map1x = Mat(imageL0.size(), CV_32FC1);
	Mat map1y = Mat(imageL0.size(), CV_32FC1);
	initUndistortRectifyMap(M1_matrix, dist1, R1, M1_matrix, imageL0.size(), CV_32FC1, map1x, map1y);
	Mat picture1 = imageL0.clone();
	remap(imageL0, picture1, map1x, map1y, INTER_LINEAR);

	Mat R2 = Mat::eye(3, 3, CV_32F);     //右相機圖像畸變矯正
	Mat map2x = Mat(imageR0.size(), CV_32FC1);
	Mat map2y = Mat(imageR0.size(), CV_32FC1);
	initUndistortRectifyMap(M2_matrix, dist2, R2, M2_matrix, imageR0.size(), CV_32FC1, map2x, map2y);
	Mat picture2 = imageR0.clone();
	remap(imageR0, picture2, map2x, map2y, INTER_LINEAR);

	//imshow("矯正後左圖", picture1);
	//imshow("矯正後右圖", picture2);

	//SIFT找出特徵點
	Ptr<Feature2D>f2d = xfeatures2d::SIFT::create(0, 3, 0.04, 10); 
	vector<KeyPoint>keyPoint1, keyPoint2;
	Mat descriptors1, descriptors2;

	f2d->detectAndCompute(picture1, noArray(), keyPoint1, descriptors1);
	f2d->detectAndCompute(picture2, noArray(), keyPoint2, descriptors2);

	drawKeypoints(picture1, keyPoint1, picture1, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
	drawKeypoints(picture2, keyPoint2, picture2, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

	vector<DMatch>matches1;
	BFMatcher matcher1(NORM_L2);
	matcher1.match(descriptors1, descriptors2, matches1);

	Mat img_before;
	drawMatches(picture1, keyPoint1, picture2, keyPoint2, matches1, img_before);
	imshow("誤匹配消除前", img_before);


	//用knn算法獲取與該特徵點最匹配的兩個特徵點
	vector<DMatch>matches;

	vector<vector<DMatch>>knn_matches;
	BFMatcher matcher(NORM_L2);
	
	//設置K = 2 ,即對每個匹配返回兩個最近鄰描述符,僅當第一個匹配與第二個匹配之間的距離足夠小時,才認爲這是一個匹配
	matcher.knnMatch(descriptors1, descriptors2, knn_matches, 2);
	
	//獲取滿足Ration Test的最小匹配距離
	float min_dist = FLT_MAX;
	for (int r = 0; r < knn_matches.size(); ++r) {
		//Ratio Test
		if (knn_matches[r][0].distance > 0.6*knn_matches[r][1].distance)
			continue;

		float dist = knn_matches[r][0].distance;
		if (dist < min_dist)
			min_dist = dist;
	}

	for (size_t r = 0; r < knn_matches.size(); r++) {
		//排除不滿足Ratio Test的點和匹配距離過大的點
		if (
			knn_matches[r][0].distance > 0.6*knn_matches[r][1].distance ||
			knn_matches[r][0].distance > 5 * max(min_dist, 10.0f)
			)
			continue;
		matches.push_back(knn_matches[r][0]);
	}
	Mat img_mid;
	drawMatches(picture1, keyPoint1, picture2, keyPoint2, matches, img_mid);
	imshow("誤匹配排除後", img_mid);

	vector<Point2f>points1, points2;
	for (vector<DMatch>::const_iterator it = matches.begin(); it != matches.end(); ++it) {     //容納兩個關鍵點匹配的結構
		points1.push_back(keyPoint1[it->queryIdx].pt);  //取得左側關鍵點的位置
		points2.push_back(keyPoint2[it->trainIdx].pt);  //取得右側關鍵點的位置
	}

	//RANSAC計算單應變換矩陣排除錯誤匹配點對
	double distance = 1;   //點到對極線的最大距離
	double confidence = 0.98;    //矩陣正確的可信度
	vector<uchar>inliers(points1.size(), 0);
	Mat fundaental = findFundamentalMat(points1, points2, inliers, CV_FM_RANSAC, distance, confidence);
	//RANSAC方法,到對極線的距離,可信度,匹配的點,匹配狀態(局內項或局外項)

	//提取存留的局內項
	vector<uchar>::const_iterator itIn = inliers.begin();
	vector<DMatch>::const_iterator itM = matches.begin();

	//遍歷全部匹配項
	vector<DMatch>Matches;
	for (; itIn != inliers.end(); ++itIn, ++itM) {
		if (*itIn) {  //有效的匹配項
			Matches.push_back(*itM);
		}
	}

	Mat imageOutput;
	drawMatches(picture1, keyPoint1, picture2, keyPoint2, Matches, imageOutput, Scalar::all(-1));

	namedWindow("picture of matching");
	imshow("picture of matching", imageOutput);
	Mat R2_matrix(Size(3, 3), CV_64F); //右相機旋轉向量
	Mat R2_src = (Mat_<double>(3, 1) << 0.00778951, -0.121633, 0.0150494);
	Rodrigues(R2_src, R2_matrix);   //旋轉向量轉化爲旋轉矩陣
	Mat T2_matrix = (Mat_<double>(3, 1) << 105.017, 2.22392, 19.288);   //右相機平移向量


	Mat _src1(points1.size(), 1, CV_32FC2);
	Mat _src2(points1.size(), 1, CV_32FC2);
	
	Vec2f dd;
	for (int i = 0; i < points1.size(); i++) {
		dd[0] = points1[i].x;
		dd[1] = points1[i].y;
		_src1.at<Vec2f>(i, 0) = dd;
	}
	for (int i = 0; i < points2.size(); i++) {
		dd[0] = points2[i].x;
		dd[1] = points2[i].y;
		_src2.at<Vec2f>(i, 0) = dd;
	}

	Mat _dst1;
	Mat _dst2;

	//畸變矯正(將像素座標轉換爲了圖像座標)
	undistortPoints(_src1, _dst1, m1_matrix, dist1); //校正後的座標需要乘以焦距+中心座標
	undistortPoints(_src2, _dst2, m2_matrix, dist2);
	
	//存儲二維像素點
	ofstream PointsL;
	PointsL.open("PointsL.txt");
	ofstream PointsR;
	PointsR.open("PointsR.txt");

	Vec2f ee,ef;
	for (size_t i = 0; i < points1.size(); i++) {
		ee = _dst1.at<Vec2f>(i, 0);
		PointsL << ee[0] << " " << ee[1] << endl;

		ef = _dst2.at<Vec2f>(i, 0);
		PointsR << ef[0] << " " << ef[1] << endl;

	}

	//立體校正,校正後的立體相機光軸平行,且行逐行對齊
	Mat Rl, Rr, Pl, Pr, Q;
	stereoRectify(m1_matrix, dist1, m2_matrix, dist2, picture1.size(), R2_matrix, T2_matrix, Rl, Rr, Pl, Pr, Q, 0, -1, picture1.size());

	debug << m1_matrix << endl << m2_matrix << endl << Pl << endl << Pr << endl;

	//分別投影原立體相機圖像座標到校正後立體相機圖像座標
	Mat iRl(3, 3, CV_64F);
	Mat iRr(3, 3, CV_64F);
	for (int i = 0; i<3; i++)
	{
		for (int j = 0; j<3; j++)
		{
			iRl.at<double>(i, j) = Pl.at<double>(i, j);//取Pl的-2列所構成的*3矩陣與Rl相乘獲得從原左相機平面圖像到矯正後左相機平面圖像的轉換矩陣
			iRr.at<double>(i, j) = Pr.at<double>(i, j);
		}
	}

	iRl = iRl * Rl;
	iRr = iRr * Rr;

	//根據Pl,PR,Q矩陣,計算像平面矯正後的特徵點對座標(x,y)和左右視差d
	Mat Prec3D(4, 1, CV_64F);
	Mat Pworld3D(4, 1, CV_64F);
	double fxl, fyl, cxl, cyl;
	double fxr, fyr, cxr, cyr;
	//投影相機中心
	//左相機
	fxl = m1_matrix.at<double>(0, 0);
	fyl = m1_matrix.at<double>(1, 1);
	cxl = m1_matrix.at<double>(0, 2);
	cyl = m1_matrix.at<double>(1, 2);
	//右相機
	fxr = m2_matrix.at<double>(0, 0);
	fyr = m2_matrix.at<double>(1, 1);
	cxr = m2_matrix.at<double>(0, 2);
	cyr = m2_matrix.at<double>(1, 2);

	//file << "Num : "<<points1.size() << endl;

	double rr[3][3] = { { 1,0,0 },{ 0,1,0 },{ 0,0,1 } };
	Mat RR1(3, 3, CV_64F, rr); //旋轉矩陣
	Mat RR(3, 1, CV_64F); //旋轉向量
	Rodrigues(RR1, RR);
	Mat TT = (Mat_<double>(3, 1) << 0, 0, 0);   //平移向量

    //三維點座標
	vector<Point3d>coord_points;

	//生成的重投影二維座標
	vector<Point2d>res_2d;

	ofstream dev_txt;
	dev_txt.open("dev.txt");

	ofstream of;
	of.open("trianglePoints.txt");
	Mat s;
	triangulatePoints(Pl, Pr, _dst1, _dst2, s);

	Vec2f pp1;
	for (int i = 0; i < points1.size(); i++) {

		dd = _dst1.at<Vec2f>(i, 0);
		double x1_ = dd[0];
		double y1_ = dd[1];
		Mat xx = (Mat_<double>(3, 1) << x1_, y1_, 1);
		Mat xx1(3, 1, CV_64F);
		xx1 = iRl * xx;
		double x1 = xx1.at<double>(0, 0) / xx1.at<double>(2, 0);
		double y1 = xx1.at<double>(1, 0) / xx1.at<double>(2, 0);

		dd = _dst2.at<Vec2f>(i, 0);
		double x2_ = dd[0];
		double y2_ = dd[1];
		Mat yy = (Mat_<double>(3, 1) << x2_, y2_, 1);
		Mat yy1(3, 1, CV_64F);
		yy1 = iRr * yy;
		double x2 = yy1.at<double>(0, 0) / yy1.at<double>(2, 0);
		double y2 = yy1.at<double>(1, 0) / yy1.at<double>(2, 0);

		//(x1,y1,d,1)構成視差矢量
		Prec3D.at<double>(0, 0) = x1;
		Prec3D.at<double>(1, 0) = y1;
		Prec3D.at<double>(2, 0) = x1 - x2;
		Prec3D.at<double>(3, 0) = 1;
		Pworld3D = Q * Prec3D;
		x1 = Pworld3D.at<double>(0, 0);
		y1 = Pworld3D.at<double>(1, 0);
		double z1 = Pworld3D.at<double>(2, 0);
		double w1 = Pworld3D.at<double>(3, 0);
		double wx = x1 / w1;
		double wy = -y1 / w1;
		double wz = -z1 / w1;

		//畸變後的像素座標
		double x_a = x1_ * fx1 + Lcx;
		double y_a = y1_ * fy1 + Lcy;

		//匹配點二維座標
		vector<Point2d>match_2d;
		Mat tem1 = (Mat_<double>(2, 1) << x_a,y_a);
		match_2d = Mat_<Point2d>(tem1);

		Mat cord = (Mat_<double>(3, 1) <<wx,wy,wz);
		coord_points = Mat_<Point3d>(cord);
		
		//生成的重投影二維座標
		vector<Point2d>res_2d;

		double u1 = (fx1*wx) / wz + Lcx;
		double v1 = (fy1*wy) / wz + Lcy;
		Mat ppp = (Mat_< double > (2, 1) << u1, v1);
		res_2d = Mat_<Point2d>(ppp);

		//projectPoints(coord_points, RR, TT, m1_matrix, dist1, res_2d);

		//res_2d[0].x = (res_2d[0].x - Lcx) / fx1;
		//res_2d[0].y = (res_2d[0].y - Lcy) / fy1;

		double dev;
		dev = norm(res_2d, match_2d, CV_L2);

		dev_txt << "coord_3D=" << coord_points << " ; " << " res_2d=" << res_2d << " ; " << "matcher=" << match_2d << " ; " << "dev=" << dev << endl;  //輸出偏差值

		file << wx << "," << wy << "," << wz << endl;
	}


	file,debug.close();
	waitKey(0);
	return 0;
}


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章