SLAM十四講---前端0.3---基本的VO---兩幀視覺里程計基礎上加入g2o以優化PnP

之前用的是RANSAC的PnP求解,opencv一行命令就搞定了。

但是容易受到噪聲影響。除了可以光流法搞一下(如同第二版),但是maybe用g2o會更好

因此這裏加入g2o優化,以ransac的pnp結果爲圖優化的初始值

具體爲3d-2d的BA,單元邊

g2o_type.h

(1)定義了三種誤差邊
(i)3d-3d的二元邊
(ii)3d-3d的一元邊
(iii)3d-2d的一元邊
(2)然後註釋掉計算雅克比的函數:因爲計算雅克比矩陣是個費力不討好的活。。。。。。
這樣,g2o直接提供一個數值計算的雅克比函數
(3)在所有虛繼承的函數中加上override,比如 virtual void computeError() override;
以作個提醒


#ifndef MYSLAM_G2O_TYPES_H
#define MYSLAM_G2O_TYPES_H

#include "myslam/common_include.h"
#include "camera.h"

#include <g2o/core/base_vertex.h>
#include <g2o/core/base_unary_edge.h>
#include <g2o/core/block_solver.h>
#include <g2o/core/optimization_algorithm_levenberg.h>
#include <g2o/types/sba/types_six_dof_expmap.h>
#include <g2o/solvers/dense/linear_solver_dense.h>
#include <g2o/core/robust_kernel.h>
#include <g2o/core/robust_kernel_impl.h>

namespace myslam
{
class EdgeProjectXYZRGBD : public g2o::BaseBinaryEdge<3, Eigen::Vector3d, g2o::VertexSBAPointXYZ, g2o::VertexSE3Expmap> // 用3d-3d的g2o二元邊優化
{
public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW;
    virtual void computeError() override;
//     virtual void linearizeOplus() override;  // 喜歡提供數值計算的雅克比函數,所以果斷註釋
    virtual bool read( std::istream& in ) override{}
    virtual bool write( std::ostream& out) const override {}
    
};

// only to optimize the pose, no point
class EdgeProjectXYZRGBDPoseOnly: public g2o::BaseUnaryEdge<3, Eigen::Vector3d, g2o::VertexSE3Expmap >  // 用3d-3d的g2o優化
{
public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW
    // Error: measure = R*point+t
    virtual void computeError() override;  // 計算誤差
//     virtual void linearizeOplus() override;  // 計算雅克比矩陣
    
    virtual bool read( std::istream& in ) override{}
    virtual bool write( std::ostream& out) const override{}
    
    Vector3d point_;
};

class EdgeProjectXYZ2UVPoseOnly: public g2o::BaseUnaryEdge<2, Eigen::Vector2d, g2o::VertexSE3Expmap >  // 3d-2d的g2o優化
{
public:
    EIGEN_MAKE_ALIGNED_OPERATOR_NEW
    
    virtual void computeError() override;
//     virtual void linearizeOplus() override;
    
    virtual bool read( std::istream& in ) override{}
    virtual bool write(std::ostream& os) const override{};
    
    Vector3d point_;
    Camera* camera_;  // 涉及內參的,相機內部的變換,因此需要有個成員變量爲相機類
};

}


#endif // MYSLAM_G2O_TYPES_H

g2o_type.cpp

(1)這裏需要修改一下g2o的初始化方式(因爲g2o版本迭代的問題,第一版代碼會有一些莫名其妙的bug)
(2)然後我將計算雅克比的函數實現註釋掉

#include "myslam/g2o_types.h"

namespace myslam
{
void EdgeProjectXYZRGBD::computeError()  // 二元邊,所以兩個頂點。// 誤差爲3d向量
{
    const g2o::VertexSBAPointXYZ* point = static_cast<const g2o::VertexSBAPointXYZ*> ( _vertices[0] );  // 點是左系的點
    const g2o::VertexSE3Expmap* pose = static_cast<const g2o::VertexSE3Expmap*> ( _vertices[1] );
    _error = _measurement - pose->estimate().map ( point->estimate() );  // pose目測是左系轉到右系Tcr
}

// void EdgeProjectXYZRGBD::linearizeOplus()
// {
//     g2o::VertexSE3Expmap* pose = static_cast<g2o::VertexSE3Expmap *> ( _vertices[1] );
//     g2o::SE3Quat T ( pose->estimate() );  // 這個初始化方法值得探究
//     g2o::VertexSBAPointXYZ* point = static_cast<g2o::VertexSBAPointXYZ*> ( _vertices[0] );
//     Eigen::Vector3d xyz = point->estimate();
//     Eigen::Vector3d xyz_trans = T.map ( xyz );  // 右系的投影點
//     double x = xyz_trans[0];
//     double y = xyz_trans[1];
//     double z = xyz_trans[2];
// 
//     _jacobianOplusXi = - T.rotation().toRotationMatrix();
// 
//     _jacobianOplusXj ( 0,0 ) = 0;
//     _jacobianOplusXj ( 0,1 ) = -z;
//     _jacobianOplusXj ( 0,2 ) = y;
//     _jacobianOplusXj ( 0,3 ) = -1;
//     _jacobianOplusXj ( 0,4 ) = 0;
//     _jacobianOplusXj ( 0,5 ) = 0;
// 
//     _jacobianOplusXj ( 1,0 ) = z;
//     _jacobianOplusXj ( 1,1 ) = 0;
//     _jacobianOplusXj ( 1,2 ) = -x;
//     _jacobianOplusXj ( 1,3 ) = 0;
//     _jacobianOplusXj ( 1,4 ) = -1;
//     _jacobianOplusXj ( 1,5 ) = 0;
// 
//     _jacobianOplusXj ( 2,0 ) = -y;
//     _jacobianOplusXj ( 2,1 ) = x;
//     _jacobianOplusXj ( 2,2 ) = 0;
//     _jacobianOplusXj ( 2,3 ) = 0;
//     _jacobianOplusXj ( 2,4 ) = 0;
//     _jacobianOplusXj ( 2,5 ) = -1;
// }

void EdgeProjectXYZRGBDPoseOnly::computeError()  // 誤差爲3d向量
{
    const g2o::VertexSE3Expmap* pose = static_cast<const g2o::VertexSE3Expmap*> ( _vertices[0] );
    _error = _measurement - pose->estimate().map ( point_ );
}

// void EdgeProjectXYZRGBDPoseOnly::linearizeOplus()
// {
//     g2o::VertexSE3Expmap* pose = static_cast<g2o::VertexSE3Expmap*> ( _vertices[0] );
//     g2o::SE3Quat T ( pose->estimate() );
//     Vector3d xyz_trans = T.map ( point_ );
//     double x = xyz_trans[0];
//     double y = xyz_trans[1];
//     double z = xyz_trans[2];
// 
//     _jacobianOplusXi ( 0,0 ) = 0;
//     _jacobianOplusXi ( 0,1 ) = -z;
//     _jacobianOplusXi ( 0,2 ) = y;
//     _jacobianOplusXi ( 0,3 ) = -1;
//     _jacobianOplusXi ( 0,4 ) = 0;
//     _jacobianOplusXi ( 0,5 ) = 0;
// 
//     _jacobianOplusXi ( 1,0 ) = z;
//     _jacobianOplusXi ( 1,1 ) = 0;
//     _jacobianOplusXi ( 1,2 ) = -x;
//     _jacobianOplusXi ( 1,3 ) = 0;
//     _jacobianOplusXi ( 1,4 ) = -1;
//     _jacobianOplusXi ( 1,5 ) = 0;
// 
//     _jacobianOplusXi ( 2,0 ) = -y;
//     _jacobianOplusXi ( 2,1 ) = x;
//     _jacobianOplusXi ( 2,2 ) = 0;
//     _jacobianOplusXi ( 2,3 ) = 0;
//     _jacobianOplusXi ( 2,4 ) = 0;
//     _jacobianOplusXi ( 2,5 ) = -1;
// }

void EdgeProjectXYZ2UVPoseOnly::computeError()  //  誤差爲2d向量
{
    const g2o::VertexSE3Expmap* pose = static_cast<const g2o::VertexSE3Expmap*> ( _vertices[0] );
    _error = _measurement - camera_->camera2pixel ( 
        pose->estimate().map(point_) );
}

// void EdgeProjectXYZ2UVPoseOnly::linearizeOplus()
// {
//     g2o::VertexSE3Expmap* pose = static_cast<g2o::VertexSE3Expmap*> ( _vertices[0] );
//     g2o::SE3Quat T ( pose->estimate() );
//     Vector3d xyz_trans = T.map ( point_ );
//     double x = xyz_trans[0];
//     double y = xyz_trans[1];
//     double z = xyz_trans[2];
//     double z_2 = z*z;
// 
//     _jacobianOplusXi ( 0,0 ) =  x*y/z_2 *camera_->fx_;
//     _jacobianOplusXi ( 0,1 ) = - ( 1+ ( x*x/z_2 ) ) *camera_->fx_;
//     _jacobianOplusXi ( 0,2 ) = y/z * camera_->fx_;
//     _jacobianOplusXi ( 0,3 ) = -1./z * camera_->fx_;
//     _jacobianOplusXi ( 0,4 ) = 0;
//     _jacobianOplusXi ( 0,5 ) = x/z_2 * camera_->fx_;
// 
//     _jacobianOplusXi ( 1,0 ) = ( 1+y*y/z_2 ) *camera_->fy_;
//     _jacobianOplusXi ( 1,1 ) = -x*y/z_2 *camera_->fy_;
//     _jacobianOplusXi ( 1,2 ) = -x/z *camera_->fy_;
//     _jacobianOplusXi ( 1,3 ) = 0;
//     _jacobianOplusXi ( 1,4 ) = -1./z *camera_->fy_;
//     _jacobianOplusXi ( 1,5 ) = y/z_2 *camera_->fy_;
// }

}

然後在visual_odometry.cpp中改寫位姿估計函數

void VisualOdometry::poseEstimationPnP()
{
    // construct the 3d 2d observations
    vector<cv::Point3f> pts3d;
    vector<cv::Point2f> pts2d;
    
    for ( cv::DMatch m:feature_matches_ )
    {
        pts3d.push_back( pts_3d_ref_[m.queryIdx] );
        pts2d.push_back( keypoints_curr_[m.trainIdx].pt );
    }
    
    Mat K = ( cv::Mat_<double>(3,3)<<
        ref_->camera_->fx_, 0, ref_->camera_->cx_,
        0, ref_->camera_->fy_, ref_->camera_->cy_,
        0,0,1
    );
    Mat rvec, tvec, inliers;
    cv::solvePnPRansac( pts3d, pts2d, K, Mat(), rvec, tvec, false, 100, 4.0, 0.99, inliers );  // 用BA也可以,可以用默認頂點和邊(單元邊)  // rvec, tvec,爲旋轉和平移
    num_inliers_ = inliers.rows;  // 多少個局內點(相當於用上了多少3d-2d作爲局內點)
    cout<<"pnp inliers: "<<num_inliers_<<endl;

    T_c_r_estimated_ = SE3(
        SO3::exp(Vector3d(rvec.at<double>(0,0), rvec.at<double>(1,0), rvec.at<double>(2,0))),  // 原先的SO3改掉
        Vector3d( tvec.at<double>(0,0), tvec.at<double>(1,0), tvec.at<double>(2,0))
    );
    
    // 加入g2o優化 *******************************************************************
    // using bundle adjustment to optimize the pose 
//     typedef g2o::BlockSolver<g2o::BlockSolverTraits<6,2>> Block;
//     Block::LinearSolverType* linearSolver = new g2o::LinearSolverDense<Block::PoseMatrixType>();
//     Block* solver_ptr = new Block( linearSolver );
//     g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg ( solver_ptr );
//     g2o::SparseOptimizer optimizer;
//     optimizer.setAlgorithm ( solver );
    
    // 構建圖優化,先設定g2o
    typedef g2o::BlockSolver<g2o::BlockSolverTraits<6, 3>> BlockSolverType;  // pose is 6, landmark is 3  位姿和路標點的維度(相當於頂點們的維度,當然,這裏路標點沒算做頂點)
    typedef g2o::LinearSolverDense<BlockSolverType::PoseMatrixType> LinearSolverType; // 線性求解器類型
    // 梯度下降方法,可以從GN, LM, DogLeg 中選
    auto solver = new g2o::OptimizationAlgorithmLevenberg(
      g2o::make_unique<BlockSolverType>(g2o::make_unique<LinearSolverType>()));
    g2o::SparseOptimizer optimizer;     // 圖模型
    optimizer.setAlgorithm(solver);   // 設置求解器
    optimizer.setVerbose(true);       // 打開調試輸出
    
    
    
    g2o::VertexSE3Expmap* pose = new g2o::VertexSE3Expmap();
    pose->setId ( 0 );
    pose->setEstimate ( g2o::SE3Quat (      // 設置初始值爲ransac的結果
        T_c_r_estimated_.rotationMatrix(), 
        T_c_r_estimated_.translation()
    ) );
    optimizer.addVertex ( pose );

    // edges
    for ( int i=0; i<inliers.rows; i++ )
    {
        int index = inliers.at<int>(i,0);
        // 3D -> 2D projection
        EdgeProjectXYZ2UVPoseOnly* edge = new EdgeProjectXYZ2UVPoseOnly();  // 3d-2d的單元邊,僅僅優化位姿
        edge->setId(i);
        edge->setVertex(0, pose);
        edge->camera_ = curr_->camera_.get();
        edge->point_ = Vector3d( pts3d[index].x, pts3d[index].y, pts3d[index].z );
        edge->setMeasurement( Vector2d(pts2d[index].x, pts2d[index].y) );
        edge->setInformation( Eigen::Matrix2d::Identity() );
        optimizer.addEdge( edge );
    }
    
    optimizer.initializeOptimization();
    optimizer.optimize(10);
    
    T_c_r_estimated_ = SE3 (
        pose->estimate().rotation(),  // 四元數
        pose->estimate().translation()  // 平移
    );
    
    
}


最後記得CMakeLists中加上g2o,要注意,因爲使用了默認的g2o頂點,因此需要這樣(g2o_types_sba決定了你能夠使用g2o默認頂點):

# G2O
LIST( APPEND CMAKE_MODULE_PATH /home/mjy/slambook2/3rdparty/g2o/g2o/cmake_modules ) 
SET( G2O_ROOT /usr/local/include/g2o ) 
find_package( G2O REQUIRED )
include_directories( ${G2O_INCLUDE_DIRS} )

set( THIRD_PARTY_LIBS 
    ${OpenCV_LIBS}
    ${Sophus_LIBRARIES}
    g2o_core g2o_stuff g2o_types_sba)

此外,直接運行可能會報錯:double free or corruption (out),需要更改CMakelists.txt

將set( CMAKE_CXX_FLAGS “-std=c++11 -march=native -O3” )改成set(
CMAKE_CXX_FLAGS “-std=c++11 -O3” )

結果發現迭代error變化很小,可能是因爲迭代一次用了很多點,所以第一次迭代就搞定了。而且初始值也不錯。

但是仍然有缺陷,比如:

中間某幀丟失的話可能會出現比如第一幀和第五幀進行PnP。這樣匹配的特徵點可能比較少。因此ransac的PnP效果不是太好。

因此考慮加入局部地圖:local mapping

就是VO維護的map_地圖

這是下一篇要講述的

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章