一、讀取兩幀圖像,並獲得特徵點以及匹配後的特徵點
Mat img_1 = imread ( argv[1], CV_LOAD_IMAGE_COLOR );
Mat img_2 = imread ( argv[2], CV_LOAD_IMAGE_COLOR );
vector<KeyPoint> keypoints_1, keypoints_2;
vector<DMatch> matches;
find_feature_matches ( img_1, img_2, keypoints_1, keypoints_2, matches );
cout<<"一共找到了"<<matches.size() <<"組匹配點"<<endl;
二、深度圖+rgb建立3D點
- 讀取第一幀的深度圖
Mat d1 = imread ( argv[3], CV_LOAD_IMAGE_UNCHANGED );
- 輸入內參矩陣
Mat K = ( Mat_<double> ( 3,3 ) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1 );
- 從圖1中找到匹配後的關鍵點座標(x,y),通過(x,y)匹配到圖1的特徵圖中,獲得該特徵點的深度d。
將關鍵點像素座標轉換爲相機歸一化座標記爲p1;
保存第一幀圖像的3D點(x, y, z)存入pts_3d;
保存第二幀對應匹配點的2D點(x, y)存入pts_2d;
此時就完成了3D-2D的配對。
代碼爲:vector<Point3f> pts_3d; vector<Point2f> pts_2d; for ( DMatch m:matches ) { ushort d = d1.ptr<unsigned short> (int ( keypoints_1[m.queryIdx].pt.y )) [ int ( keypoints_1[m.queryIdx].pt.x ) ]; if ( d == 0 ) // bad depth continue; float dd = d/5000.0; Point2d p1 = pixel2cam ( keypoints_1[m.queryIdx].pt, K ); pts_3d.push_back ( Point3f ( p1.x*dd, p1.y*dd, dd ) ); pts_2d.push_back ( keypoints_2[m.trainIdx].pt ); }
- 通過以上3D-2D的配對點求解R,t, 該過程叫做求解PnP
代碼爲:Mat r, t; solvePnP ( pts_3d, pts_2d, K, Mat(), r, t, false ); // 調用OpenCV 的 PnP 求解,可選擇EPNP,DLS等方法 Mat R; cv::Rodrigues ( r, R ); // r爲旋轉向量形式,用Rodrigues公式轉換爲矩陣 cout<<"R="<<endl<<R<<endl; cout<<"t="<<endl<<t<<endl;
-
通過以上R,t以及3D點可以計算重投影誤差,然後讓重投影誤差最小構建最小而成,通過g2o方法求解。得到更加準確的R,t值。
bundleAdjustment ( pts_3d, pts_2d, K, R, t );