Mastering Opencv ch4:SFM詳解(二)

前文分析瞭如何進行特徵檢測匹配,接下來分析如何求解攝像機矩陣。

I:求解基礎矩陣

首先介紹兩個代碼段,關於關鍵點和Point2f相互轉化的函數

void KeyPointsToPoints(const vector<KeyPoint>& kps, vector<Point2f>& ps) {
    ps.clear();
    for (unsigned int i=0; i<kps.size(); i++) ps.push_back(kps[i].pt);
}

void PointsToKeyPoints(const vector<Point2f>& ps, vector<KeyPoint>& kps) {
    kps.clear();
    for (unsigned int i=0; i<ps.size(); i++) kps.push_back(KeyPoint(ps[i],1.0f));
}

這兩個函數在求解基礎矩陣的時候用的特別多。這裏提前放着。

下面是求解每兩幅圖像的基礎矩陣

void MultiCameraPnP::PruneMatchesBasedOnF() {
    //prune the match between <_i> and all views using the Fundamental matrix to prune
//#pragma omp parallel for
//求解每兩幅圖像的基礎矩陣
    for (int _i=0; _i < imgs.size() - 1; _i++)
    {
        for (unsigned int _j=_i+1; _j < imgs.size(); _j++) {
            int older_view = _i, working_view = _j;

            GetFundamentalMat( imgpts[older_view], 
                imgpts[working_view], 
                imgpts_good[older_view],
                imgpts_good[working_view], 
                matches_matrix[std::make_pair(older_view,working_view)]
#ifdef __SFM__DEBUG__
                ,imgs_orig[older_view],imgs_orig[working_view]
#endif
            );
            //update flip matches as well
#pragma omp critical
            matches_matrix[std::make_pair(working_view,older_view)] = FlipMatches(matches_matrix[std::make_pair(older_view,working_view)]);
        }
    }
}
核心函數GetFundamentalMat的實現: 
計算出兩幅圖像的基礎矩陣,並優化匹配組,基本去除錯誤匹配
Mat GetFundamentalMat(const vector<KeyPoint>& imgpts1,
                       const vector<KeyPoint>& imgpts2,
                       vector<KeyPoint>& imgpts1_good,
                       vector<KeyPoint>& imgpts2_good,
                       vector<DMatch>& matches
#ifdef __SFM__DEBUG__
                      ,const Mat& img_1,
                      const Mat& img_2
#endif
                      ) 
{
    //Try to eliminate keypoints based on the fundamental matrix
    //(although this is not the proper way to do this)
    vector<uchar> status(imgpts1.size());

#ifdef __SFM__DEBUG__
    std::vector< DMatch > good_matches_;
    std::vector<KeyPoint> keypoints_1, keypoints_2;
#endif      
    //  undistortPoints(imgpts1, imgpts1, cam_matrix, distortion_coeff);
    //  undistortPoints(imgpts2, imgpts2, cam_matrix, distortion_coeff);
    //
    imgpts1_good.clear(); imgpts2_good.clear();

    vector<KeyPoint> imgpts1_tmp;
    vector<KeyPoint> imgpts2_tmp;
    if (matches.size() <= 0) { 
        //points already aligned...
        imgpts1_tmp = imgpts1;
        imgpts2_tmp = imgpts2;
    } else {
        GetAlignedPointsFromMatch(imgpts1, imgpts2, matches, imgpts1_tmp, imgpts2_tmp);//選取特徵點集中匹配的特徵點
    }

    Mat F;
    {
        vector<Point2f> pts1,pts2;
        KeyPointsToPoints(imgpts1_tmp, pts1);//特徵點轉化成Point2f點
        KeyPointsToPoints(imgpts2_tmp, pts2);
#ifdef __SFM__DEBUG__
        cout << "pts1 " << pts1.size() << " (orig pts " << imgpts1_tmp.size() << ")" << endl;
        cout << "pts2 " << pts2.size() << " (orig pts " << imgpts2_tmp.size() << ")" << endl;
#endif
        double minVal,maxVal;
        cv::minMaxIdx(pts1,&minVal,&maxVal);
        F = findFundamentalMat(pts1, pts2, FM_RANSAC, 0.006 * maxVal, 0.99, status); //threshold from [Snavely07 4.1]//核心函數,計算F,並得到優化的匹配組模板status
    }

    vector<DMatch> new_matches;
    cout << "F keeping " << countNonZero(status) << " / " << status.size() << endl; 
    for (unsigned int i=0; i<status.size(); i++) {
        if (status[i]) //根據模板,重新優化匹配組及特徵點,找到最優的匹配組,基本就沒什麼錯誤匹配了。
        {
            imgpts1_good.push_back(imgpts1_tmp[i]);
            imgpts2_good.push_back(imgpts2_tmp[i]);

            if (matches.size() <= 0) { //points already aligned...
                new_matches.push_back(DMatch(matches[i].queryIdx,matches[i].trainIdx,matches[i].distance));
            } else {
                new_matches.push_back(matches[i]);
            }

#ifdef __SFM__DEBUG__
            good_matches_.push_back(DMatch(imgpts1_good.size()-1,imgpts1_good.size()-1,1.0));
            keypoints_1.push_back(imgpts1_tmp[i]);
            keypoints_2.push_back(imgpts2_tmp[i]);
#endif
        }
    }   

    cout << matches.size() << " matches before, " << new_matches.size() << " new matches after Fundamental Matrix\n";
    matches = new_matches; //keep only those points who survived the fundamental matrix

#if 0
    //-- Draw only "good" matches
#ifdef __SFM__DEBUG__
    if(!img_1.empty() && !img_2.empty()) {      
        vector<Point2f> i_pts,j_pts;
        Mat img_orig_matches;
        { //draw original features in red繪製原始特徵點用紅色
            vector<uchar> vstatus(imgpts1_tmp.size(),1);
            vector<float> verror(imgpts1_tmp.size(),1.0);
            img_1.copyTo(img_orig_matches);
            KeyPointsToPoints(imgpts1_tmp, i_pts);
            KeyPointsToPoints(imgpts2_tmp, j_pts);
            drawArrows(img_orig_matches, i_pts, j_pts, vstatus, verror, Scalar(0,0,255));
        }
        { //superimpose filtered features in green用綠色繪製經過此次優化的特徵點
            vector<uchar> vstatus(imgpts1_good.size(),1);
            vector<float> verror(imgpts1_good.size(),1.0);
            i_pts.resize(imgpts1_good.size());
            j_pts.resize(imgpts2_good.size());
            KeyPointsToPoints(imgpts1_good, i_pts);
            KeyPointsToPoints(imgpts2_good, j_pts);
            drawArrows(img_orig_matches, i_pts, j_pts, vstatus, verror, Scalar(0,255,0));
            imshow( "Filtered Matches", img_orig_matches );
        }
        int c = waitKey(0);
        if (c=='s') {
            imwrite("fundamental_mat_matches.png", img_orig_matches);
        }
        destroyWindow("Filtered Matches");
    }
#endif      
#endif

    return F;
}
其中從每幅圖像的所有的特徵集點中選取匹配的特徵點函數
void GetAlignedPointsFromMatch(const std::vector<cv::KeyPoint>& imgpts1,
                               const std::vector<cv::KeyPoint>& imgpts2,
                               const std::vector<cv::DMatch>& matches,
                               std::vector<cv::KeyPoint>& pt_set1,
                               std::vector<cv::KeyPoint>& pt_set2) 
{
    for (unsigned int i=0; i<matches.size(); i++) {
//      cout << "matches[i].queryIdx " << matches[i].queryIdx << " matches[i].trainIdx " << matches[i].trainIdx << endl;
        assert(matches[i].queryIdx < imgpts1.size());
        pt_set1.push_back(imgpts1[matches[i].queryIdx]);
        assert(matches[i].trainIdx < imgpts2.size());
        pt_set2.push_back(imgpts2[matches[i].trainIdx]);
    }   
}

以上就求出了每兩幅圖像最佳匹配組,接下來就是對每兩幅圖像的最佳匹配進行處理。

2:計算兩幅圖像的單應內點 
找到在兩幅圖像中的內點數,通過RANSAC篩選,得到單應矩陣收斂時符合單應變換的內點

//Following Snavely07 4.2 - find how many inliers are in the Homography between 2 views
//找到在兩幅圖像中的內點數,通過RANSAC篩選
int MultiCameraPnP::FindHomographyInliers2Views(int vi, int vj) 
{
    vector<cv::KeyPoint> ikpts,jkpts; vector<cv::Point2f> ipts,jpts;
    GetAlignedPointsFromMatch(imgpts[vi],imgpts[vj],matches_matrix[make_pair(vi,vj)],ikpts,jkpts);
    KeyPointsToPoints(ikpts,ipts); KeyPointsToPoints(jkpts,jpts);

    double minVal,maxVal; cv::minMaxIdx(ipts,&minVal,&maxVal); //TODO flatten point2d?? or it takes max of width and height

    vector<uchar> status;
    cv::Mat H = cv::findHomography(ipts,jpts,status,CV_RANSAC, 0.004 * maxVal); //threshold from Snavely07
    return cv::countNonZero(status); //number of inliers
}

使用map容器,按內點概率從高到底依次排列 
list

//sort pairwise matches to find the lowest Homography inliers [Snavely07 4.2]
    cout << "Find highest match...";
    list<pair<int,pair<int,int> > > matches_sizes;
    //TODO: parallelize!
    for(std::map<std::pair<int,int> ,std::vector<cv::DMatch> >::iterator i = matches_matrix.begin(); i != matches_matrix.end(); ++i) {
        if((*i).second.size() < 100)
            matches_sizes.push_back(make_pair(100,(*i).first));
        else {
            int Hinliers = FindHomographyInliers2Views((*i).first.first,(*i).first.second);
            int percent = (int)(((double)Hinliers) / ((double)(*i).second.size()) * 100.0);
            cout << "[" << (*i).first.first << "," << (*i).first.second << " = "<<percent<<"] ";
            matches_sizes.push_back(make_pair((int)percent,(*i).first));
        }
    }
    cout << endl;
    matches_sizes.sort(sort_by_first);//按內點概率從大到小排列匹配組
3:根據上面排好序的匹配組,依次計算攝像機矩陣

bool goodF = false;
    int highest_pair = 0;
    m_first_view = m_second_view = 0;
    //reverse iterate by number of matches
    for(list<pair<int,pair<int,int> > >::iterator highest_pair = matches_sizes.begin(); 
        highest_pair != matches_sizes.end() && !goodF; 
        ++highest_pair) 
    {
        m_second_view = (*highest_pair).second.second;
        m_first_view  = (*highest_pair).second.first;

        std::cout << " -------- " << imgs_names[m_first_view] << " and " << imgs_names[m_second_view] << " -------- " <<std::endl;
        //what if reconstrcution of first two views is bad? fallback to another pair
        //See if the Fundamental Matrix between these two views is good
        goodF = FindCameraMatrices(K, Kinv, distortion_coeff,
            imgpts[m_first_view], 
            imgpts[m_second_view], 
            imgpts_good[m_first_view],
            imgpts_good[m_second_view], 
            P, 
            P1,
            matches_matrix[std::make_pair(m_first_view,m_second_view)],
            tmp_pcloud
#ifdef __SFM__DEBUG__
            ,imgs[m_first_view],imgs[m_second_view]
#endif
        );
這裏預設
 cv::Matx34d P(1,0,0,0,
                  0,1,0,0,
                  0,0,1,0),
                P1(1,0,0,0,
                   0,1,0,0,
                   0,0,1,0);
4:計算基礎矩陣,本質矩陣
Mat F = GetFundamentalMat(imgpts1,imgpts2,imgpts1_good,imgpts2_good,matches
#ifdef __SFM__DEBUG__
                                  ,img_1,img_2
#endif
                                  );
        if(matches.size() < 100) { // || ((double)imgpts1_good.size() / (double)imgpts1.size()) < 0.25
            cerr << "not enough inliers after F matrix" << endl;
            return false;
        }

        //Essential matrix: compute then extract cameras [R|t]
        Mat_<double> E = K.t() * F * K; //according to HZ (9.12)

        //according to http://en.wikipedia.org/wiki/Essential_matrix#Properties_of_the_essential_matrix
        if(fabsf(determinant(E)) > 1e-07) {
            cout << "det(E) != 0 : " << determinant(E) << "\n";
            P1 = 0;
            return false;
        }

通過計算出的基礎矩陣計算本質矩陣E。

5:接下來,就是把本質矩陣進行SVD奇異值分解,得到旋轉部分和平移部分。首先若fabsf(determinant(E)) > 1e-07,則E無法分解。若得出的determinant(R1)+1.0 < 1e-09,就把E取負值重新進行分解。若fabsf(determinant(R))-1.0 > 1e-07,那麼分解得到的R1是錯誤的。

bool DecomposeEtoRandT(
    Mat_<double>& E,
    Mat_<double>& R1,
    Mat_<double>& R2,
    Mat_<double>& t1,
    Mat_<double>& t2) 
{
#ifdef DECOMPOSE_SVD
    //Using HZ E decomposition
    Mat svd_u, svd_vt, svd_w;
    TakeSVDOfE(E,svd_u,svd_vt,svd_w);//進行SVD分解

    //check if first and second singular values are the same (as they should be)檢查第一個和第二個奇異值是不是相同,應該相同的。
    double singular_values_ratio = fabsf(svd_w.at<double>(0) / svd_w.at<double>(1));
    if(singular_values_ratio>1.0) singular_values_ratio = 1.0/singular_values_ratio; // flip ratio to keep it [0,1]
    if (singular_values_ratio < 0.7) {
        cout << "singular values are too far apart\n";
        return false;
    }

    Matx33d W(0,-1,0,   //HZ 9.13
        1,0,0,
        0,0,1);
    Matx33d Wt(0,1,0,
        -1,0,0,
        0,0,1);
    R1 = svd_u * Mat(W) * svd_vt; //HZ 9.19
    R2 = svd_u * Mat(Wt) * svd_vt; //HZ 9.19
    t1 = svd_u.col(2); //u3
    t2 = -svd_u.col(2); //u3
#else
    //Using Horn E decomposition,使用另一種方法分解
    DecomposeEssentialUsingHorn90(E[0],R1[0],R2[0],t1[0],t2[0]);
#endif
    return true;
}
其中TakeSVDOfE函數實現了E的SVD分解,得到u,vt,w。
void TakeSVDOfE(Mat_<double>& E, Mat& svd_u, Mat& svd_vt, Mat& svd_w) {
#if 1
    //Using OpenCV's SVD
    SVD svd(E,SVD::MODIFY_A);
    svd_u = svd.u;
    svd_vt = svd.vt;
    svd_w = svd.w;
#else
    //Using Eigen's SVD
    cout << "Eigen3 SVD..\n";
    Eigen::Matrix3f  e = Eigen::Map<Eigen::Matrix<double,3,3,Eigen::RowMajor> >((double*)E.data).cast<float>();
    Eigen::JacobiSVD<Eigen::MatrixXf> svd(e, Eigen::ComputeThinU | Eigen::ComputeThinV);
    Eigen::MatrixXf Esvd_u = svd.matrixU();
    Eigen::MatrixXf Esvd_v = svd.matrixV();
    svd_u = (Mat_<double>(3,3) << Esvd_u(0,0), Esvd_u(0,1), Esvd_u(0,2),
                          Esvd_u(1,0), Esvd_u(1,1), Esvd_u(1,2), 
                          Esvd_u(2,0), Esvd_u(2,1), Esvd_u(2,2)); 
    Mat_<double> svd_v = (Mat_<double>(3,3) << Esvd_v(0,0), Esvd_v(0,1), Esvd_v(0,2),
                          Esvd_v(1,0), Esvd_v(1,1), Esvd_v(1,2), 
                          Esvd_v(2,0), Esvd_v(2,1), Esvd_v(2,2));
    svd_vt = svd_v.t();
    svd_w = (Mat_<double>(1,3) << svd.singularValues()[0] , svd.singularValues()[1] , svd.singularValues()[2]);
#endif

    cout << "----------------------- SVD ------------------------\n";
    cout << "U:\n"<<svd_u<<"\nW:\n"<<svd_w<<"\nVt:\n"<<svd_vt<<endl;
    cout << "----------------------------------------------------\n";
}

下面是另一種方法進行分解的E,得到R1,R2,T1,T2。

void DecomposeEssentialUsingHorn90(double _E[9], double _R1[9], double _R2[9], double _t1[3], double _t2[3]) {
    //from : http://people.csail.mit.edu/bkph/articles/Essential.pdf
#ifdef USE_EIGEN
    using namespace Eigen;

    Matrix3d E = Map<Matrix<double,3,3,RowMajor> >(_E);
    Matrix3d EEt = E * E.transpose();
    Vector3d e0e1 = E.col(0).cross(E.col(1)),e1e2 = E.col(1).cross(E.col(2)),e2e0 = E.col(2).cross(E.col(2));
    Vector3d b1,b2;

#if 1
    //Method 1
    Matrix3d bbt = 0.5 * EEt.trace() * Matrix3d::Identity() - EEt; //Horn90 (12)
    Vector3d bbt_diag = bbt.diagonal();
    if (bbt_diag(0) > bbt_diag(1) && bbt_diag(0) > bbt_diag(2)) {
        b1 = bbt.row(0) / sqrt(bbt_diag(0));
        b2 = -b1;
    } else if (bbt_diag(1) > bbt_diag(0) && bbt_diag(1) > bbt_diag(2)) {
        b1 = bbt.row(1) / sqrt(bbt_diag(1));
        b2 = -b1;
    } else {
        b1 = bbt.row(2) / sqrt(bbt_diag(2));
        b2 = -b1;
    }
#else
    //Method 2
    if (e0e1.norm() > e1e2.norm() && e0e1.norm() > e2e0.norm()) {
        b1 = e0e1.normalized() * sqrt(0.5 * EEt.trace()); //Horn90 (18)
        b2 = -b1;
    } else if (e1e2.norm() > e0e1.norm() && e1e2.norm() > e2e0.norm()) {
        b1 = e1e2.normalized() * sqrt(0.5 * EEt.trace()); //Horn90 (18)
        b2 = -b1;
    } else {
        b1 = e2e0.normalized() * sqrt(0.5 * EEt.trace()); //Horn90 (18)
        b2 = -b1;
    }
#endif

    //Horn90 (19)
    Matrix3d cofactors; cofactors.col(0) = e1e2; cofactors.col(1) = e2e0; cofactors.col(2) = e0e1;
    cofactors.transposeInPlace();

    //B = [b]_x , see Horn90 (6) and http://en.wikipedia.org/wiki/Cross_product#Conversion_to_matrix_multiplication
    Matrix3d B1; B1 <<  0,-b1(2),b1(1),
                        b1(2),0,-b1(0),
                        -b1(1),b1(0),0;
    Matrix3d B2; B2 <<  0,-b2(2),b2(1),
                        b2(2),0,-b2(0),
                        -b2(1),b2(0),0;

    Map<Matrix<double,3,3,RowMajor> > R1(_R1),R2(_R2);

    //Horn90 (24)
    R1 = (cofactors.transpose() - B1*E) / b1.dot(b1);
    R2 = (cofactors.transpose() - B2*E) / b2.dot(b2);
    Map<Vector3d> t1(_t1),t2(_t2); 
    t1 = b1; t2 = b2;

    cout << "Horn90 provided " << endl << R1 << endl << "and" << endl << R2 << endl;
#endif
}
到這裏就已經分解出R1,R2,t1,t2了,P0就是預設的單位矩陣,P1可以由這四個值組合成四種。應該取哪一個,下一章再分析

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章