立體視覺匹配

題目

已知以下參數:
左相機內參(依次爲fx\cx\fy\cy):
double Ml[4] = { 1674.86, 658.302, 1674.76, 529.925 };

左相機畸變係數:(k1\k2\p1\p2\k3\k4\k5\k6)
double Dl[8] = { -0.0315542, - 0.0298582, 0.000139779, - 0.000262658, - 0.308588, 0.0312938 ,- 0.100438, - 0.376244 };

右相機內參(依次爲fx\cx\fy\cy):
double Mr[4] = { 1671.91, 686.756, 1671.78, 516.332 };

左右相機之間旋轉矩陣R:
double R[3][3] = { 0.888061, - 0.00125292, 0.459724,
0.00433001, 0.999975, - 0.0056391,
- 0.459705, 0.00699847, 0.888044 };

左右相機之間平移向量T:
double T[3] = { -204.265, - 0.252961, 50.1539 };

其中RT滿足關係:R*Pl+T=Pr。Pl爲左相機中某一三維點(xl,yl,zl),Pr爲右相機相機座標系中某一三維點(xr,yr,zr)。

問:現有左相機圖像像素座標系中一二維點uvL(900,615)(未進行畸變校正),和右相機座標系圖像像素座標系中一二維點uvR(939.325073,597.613770)(已進行畸變校正)。已知兩點爲正確匹配點對,根據以上信息,編程實現:
1、 重建以右相機爲世界座標系下的真實三維點座標(X,Y,Z)。
2、 求滿足關係:R’*Pr+T’=Pl的旋轉矩陣R’和平移向量T’。
3、 求重建出來的三維座標(X,Y,Z)重投影回左右相機後分別的圖像座標uvL’和uvR’,以及誤差值。
4、 求二維點uvR(939.325073,597.613770)與二維點uvL(900,615)在右相機中的極線L的距離。

解析

  1. 先將左邊的像素點去畸變,然後通過中垂線投影解算出三維座標
  2. R=RT,T=TR' = R^T,T=-T
  3. 已知三維座標投影到兩個camera中,求取像素座標

重點解析第1題:
使用異面直線公垂線中點去逼近物體空間點的模型如下(來自於參考文獻1的內容)
在這裏插入圖片描述
在這裏插入圖片描述
注意:要相使用上述模型,必須將所有點的座標轉換到以右camera爲世界座標系下的值

  • 左相機原點:Ol=R(0,0,0)+TO_l = R*(0,0,0)+T
  • 左像素點:
    • 第一步:像素座標轉換到左camera座標下的三維座標(歸一化平面座標)
      Pl=[x,y,1]T=k1[u,v,1]TPl'= [x,y,1]^T=k^-1*[u,v,1]^T
    • 第二步:將歸一化座標轉換到右camera下的座標
      Pl=RPl+TPl = R*Pl'+T
  • 右相機原點:Or=(0,0,0)O_r = (0,0,0)
  • 右像素點: Pr=[x,y,1]T=k1[u,v,1]TPr= [x,y,1]^T=k^-1*[u,v,1]^T

源代碼

#include <iostream>
#include <Eigen/Core>
#include <Eigen/Geometry>

#include <opencv2/core/eigen.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>

#include "sophus/so3.h"
#include "sophus/se3.h"

using namespace std;
using namespace Eigen;
using namespace cv;

void pixel2camera(const Mat &k , const Point2d &point_uv, Vector3d &point_cam){
    point_cam.x()=(point_uv.x-k.at<double>(0,2))/k.at<double>(0,0);
    point_cam.y()=(point_uv.y-k.at<double>(1,2))/k.at<double>(1,1);
    point_cam.z()=1;       
}
void pixel2camera(const Mat &k , const Vector2d &point_uv, Vector3d &point_cam){
    point_cam.x()=(point_uv.x()-k.at<double>(0,2))/k.at<double>(0,0);
    point_cam.y()=(point_uv.y()-k.at<double>(1,2))/k.at<double>(1,1);
    point_cam.z()=1;
}

void camera2pixel(const Mat &k ,  Vector2d &point_uv, const Point2d &point_cam){

    point_uv.x() = k.at<double>(0,0) * point_cam.x+ k.at<double>(0,2);
    point_uv.y() = k.at<double>(1,1) * point_cam.y+ k.at<double>(1,2);
}

int main(){
    Matrix3d R;
    Vector3d T;
    Vector3d PL,PR;                               //p點在左右相機下的三維座標
    Vector2d uvl;                                 //左邊去畸變的像素座標
    Point2d uvr= Point2d(939.325073,597.613770);   //右邊去畸變的像素座標
    Point2d uvl_undistorted = Point2d(900,615);  //左camera的帶畸變的像素座標
    Vector2d uvl_,uvr_;                              //重投影座標

    R<<0.888061, -0.00125292, 0.459724,
       0.00433001, 0.999975, -0.0056391,
       -0.459705, 0.00699847, 0.888044 ;

    T<<-204.265,-0.252961,50.1539;

    Mat k_left, k_right;         //左相機內參
    k_left=(Mat_<double>(3,3)<<1674.86, 0, 658.302,
                            0, 1674.76, 529.925,
                             0, 0, 1);
    k_right = (Mat_<double>(3,3)<<1671.91, 0, 686.756,
            0, 1617.78, 516.332,
            0, 0, 1);

    Matrix3d k_left_,k_right_;
    cv2eigen(k_left,k_left_);
    cv2eigen(k_right,k_right_);

    vector<Point2d> uvl_array;              //爲調用去畸變函數準備
    vector<Point2d> uvl_undistorted_array;  //爲調用去畸變函數準備
    uvl_undistorted_array.push_back(uvl_undistorted);
    //左camera的畸變參數
    vector<double> Distort_Coefficients_left;
    double k1= -0.0315542;double k2= -0.0298582;double p1= 0.000139779;double p2= -0.000262658;
    double k3= -0.308588;double k4= 0.0312938;double k5= -0.100438;double k6= -0.376244;
    Distort_Coefficients_left.push_back(k1);Distort_Coefficients_left.push_back(k2);
    Distort_Coefficients_left.push_back(p1);Distort_Coefficients_left.push_back(p2);
    Distort_Coefficients_left.push_back(k3);Distort_Coefficients_left.push_back(k4);
    Distort_Coefficients_left.push_back(k5);Distort_Coefficients_left.push_back(k6);

    undistortPoints(uvl_undistorted_array,uvl_array,k_left,Distort_Coefficients_left);   //矯正左邊的點
     //需要將變換後的點先變化爲齊次座標系,然後還需要乘以相機參數矩陣,纔是最終的變化的座標。
    Point2d _uvl_;                  //去畸變的歸一化平面座標
    _uvl_=uvl_array.front();
    camera2pixel(k_left,uvl,_uvl_);
    cout<<"左邊去畸變的像素座標uvl = \n"<<uvl<<endl;


    /***************公垂線中點座標解算三維點P座標*************/
    Vector3d Or = Vector3d(0,0,0);
    Vector3d Ol = T;

    Vector3d Pr = Vector3d(0,0,0);
    pixel2camera(k_right,uvr,Pr);      //得到在右camera座標下的uvr三維座標

    Vector3d Pl_ = Vector3d(0,0,0);    //左像素點的歸一化平面座標
    Vector3d Pl = Vector3d(0,0,0);     //左像素點在右camera下的三維座標
    pixel2camera(k_left,uvl,Pl_);
    Pl = R*Pl_+T;                      //得到在右camera座標下的uvl三維座標

    Vector3d Pl_Ol = Pl-Ol;
    Vector3d Pr_Or = Pr-Or;
    Vector3d ll = Pl_Ol/sqrt( pow(Pl_Ol.x(),2) + pow(Pl_Ol.y(),2) + pow(Pl_Ol.z(),2) );
    Vector3d lr = Pr_Or/sqrt( pow(Pr_Or.x(),2) + pow(Pr_Or.y(),2) + pow(Pr_Or.z(),2) );   //單位方向向量

    Vector3d Pr_Pl = Pr-Pl;
    double l1l2=ll.dot(lr);
    double a1 = ( Pr_Pl.dot(ll) - ((Pr_Pl.dot(lr)) * l1l2 ))/( 1 - pow(l1l2,2) );
    double a2 = ( Pr_Pl.dot(ll) * l1l2 - Pr_Pl.dot(lr) )/( 1- pow(l1l2,2) );

    Vector3d Ql = Pl+a1*ll;
    Vector3d Qr = Pr+a2*lr;
    Vector3d Ql_Qr = Ql-Qr;

    PR=(Ql+Qr)/2;

    cout<<"公垂線長度||Ql_Qr|| = \n"<<sqrt(pow(Ql_Qr.x(),2)+pow(Ql_Qr.y(),2)+pow(Ql_Qr.z(),2))<<endl;
    cout<<"以右camera爲世界座標系的三維座標 PR = \n"<<PR<<endl;

    cout<<"滿足關係:R’*Pr+T’=Pl的旋轉矩陣R' = \n"<<R.transpose()<<endl;  //右camera到左camera的轉換
    cout<<"滿足關係:R’*Pr+T’=Pl的平移向量T' = \n"<<-T<<endl;

    //計算重投影像素座標

    Sophus::SE3 SE3_Rt(R.transpose(), -T);  //注意旋轉矩陣的方向
    Vector4d p_=SE3_Rt.matrix() * Vector4d(PR.x(),PR.y(),PR.z(),1); //齊次座標化爲非齊次座標,注意維度
    Vector3d uvl_1 =  k_left_ * Vector3d( p_.x()/p_.z(),p_.y()/p_.z(),1);  //注意:先除z再與K相乘
    uvl_=Vector2d(uvl_1.x(),uvl_1.y());
    cout<<"左camera重投影的像素座標 uvl'=  \n"<<uvl_<<endl;

    uvr_.x()=k_right_(0,0)*PR.x()/PR.z()+k_right_(0,2);
    uvr_.y()=k_right_(1,1)*PR.y()/PR.z()+k_right_(1,2);
    cout<<"右camera重投影的像素座標 uvr' = \n"<<uvr_<<endl;

    //計算重投影座標誤差,兩點距離
    double error_left =sqrt(pow(uvl.x()-uvl_.x(),2)+pow(uvl.y()-uvl_.y(),2) );
    double error_right=sqrt(pow(uvr.x-uvr_.x(),2)+pow(uvr.y-uvr_.y(),2) );

    cout<<"左邊的重投影誤差爲:"<<error_left<<endl;
    cout<<"右邊的重投影誤差爲:"<<error_right<<endl;

   //計算F_matrix
   Matrix3d f_matrix;
   Matrix3d T_hat = Sophus::SO3::hat(T);
   f_matrix = (k_right_.inverse().transpose()) * T_hat * R * k_right_.inverse();
   cout<<"fundamental_matrix = \n"<<f_matrix<<endl;
   Mat fundamental;
   eigen2cv(f_matrix,fundamental);

   //計算極線L的距離
   Mat correspondent_lines;
   vector<Point2d> points;
    points.push_back(uvr);
   computeCorrespondEpilines(points,2,fundamental,correspondent_lines);   //極線計算函數
    //whichImage Index of the image (1 or 2) that contains the points .
   cout<<"correspondent_lines= \n"<<correspondent_lines<<endl;

   return 0;
}

結果顯示

左邊去畸變的像素座標uvl = 
900.373
615.122
公垂線長度||Ql_Qr|| = 
0.689768
以右camera爲世界座標系的三維座標 PR = 
 64.285
21.0348
425.536
滿足關係:R’*Pr+T’=Pl的旋轉矩陣R' = 
   0.888061  0.00433001   -0.459705
-0.00125292    0.999975  0.00699847
   0.459724  -0.0056391    0.888044
滿足關係:R’*Pr+T’=Pl的平移向量T' = 
 204.265
0.252961
-50.1539
左camera重投影的像素座標 uvl'=  
966.962
643.325
右camera重投影的像素座標 uvr' = 
939.328
596.301
左邊的重投影誤差爲:72.316
右邊的重投影誤差爲:1.31275
fundamental_matrix = 
-3.60892e-08 -1.85429e-05   0.00963386
-1.82499e-05  5.22198e-07     0.138642
  0.00905312    -0.113795     -11.4706
correspondent_lines= 
[-0.01441517678927577, -0.9998960959410401, 614.3981576164301]

參考文獻

[1] 羅世民, 李茂西. 雙目視覺測量中三維座標的求取方法研究[D]. , 2006.
[2] Gao X, Zhang T, Liu Y, et al. Visual SLAM fourteen lectures: From theory to practice[M].Beijing: Electronic Industry Press, 2017.高翔, 張濤, 劉毅, 等. 視覺SLAM十四講:從理論到實踐[M]. 北京: 電子工業出版社, 2017.
[3] Bradski G, Kaehler A, 於仕琪, 等. 學習 OpenCV (中文版)[J]. 2009.

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章