SLAM十四講:直接法(8)

內容總覽:一些直接法的代碼

RGBD稀疏直接法(g2o)

簡述:
假設周圍像素不變,不必計算描述子,只使用數百個像素

只能計算稀疏結構

參數
輸入:測量值(空間點的灰度),新的灰度圖,相機內參

輸出相機位姿

計算誤差:

virtual void computeError()
    {
        const VertexSE3Expmap* v  =static_cast<const VertexSE3Expmap*> ( _vertices[0] );
        Eigen::Vector3d x_local = v->estimate().map ( x_world_ );        //將世界座標映射到了圖像上,local代表圖像局部
        float x = x_local[0]*fx_/x_local[2] + cx_;
        float y = x_local[1]*fy_/x_local[2] + cy_;
        // check x,y is in the image
        if ( x-4<0 || ( x+4 ) >image_->cols || ( y-4 ) <0 || ( y+4 ) >image_->rows )    //範圍檢測?
        {
            _error ( 0,0 ) = 0.0;
            this->setLevel ( 1 );
        }
        else
        {
            _error ( 0,0 ) = getPixelValue ( x,y ) - _measurement;        //計算誤差,計算值減真實值?
        }
    }

將I1的點通過estimate().map映射到另一個圖像上,然後用像素公式計算,誤差爲I2-I1

計算雅可比J

    virtual void linearizeOplus( )        //計算呀可比矩陣
    {
        if ( level() == 1 )    //範圍不對,或者第一次的時候,初始化1*6的0矩陣
        {
            _jacobianOplusXi = Eigen::Matrix<double, 1, 6>::Zero();
            return;
        }
        VertexSE3Expmap* vtx = static_cast<VertexSE3Expmap*> ( _vertices[0] );
        Eigen::Vector3d xyz_trans = vtx->estimate().map ( x_world_ );   // q in book        估計出來的世界座標映射?

        double x = xyz_trans[0];
        double y = xyz_trans[1];
        double invz = 1.0/xyz_trans[2];
        double invz_2 = invz*invz;

        float u = x*fx_*invz + cx_;
        float v = y*fy_*invz + cy_;

        // jacobian from se3 to u,v    
        // NOTE that in g2o the Lie algebra is (\omega, \epsilon), where \omega is so(3) and \epsilon the translation
        Eigen::Matrix<double, 2, 6> jacobian_uv_ksai;

        jacobian_uv_ksai ( 0,0 ) = - x*y*invz_2 *fx_;
        jacobian_uv_ksai ( 0,1 ) = ( 1+ ( x*x*invz_2 ) ) *fx_;
        jacobian_uv_ksai ( 0,2 ) = - y*invz *fx_;
        jacobian_uv_ksai ( 0,3 ) = invz *fx_;
        jacobian_uv_ksai ( 0,4 ) = 0;
        jacobian_uv_ksai ( 0,5 ) = -x*invz_2 *fx_;

        jacobian_uv_ksai ( 1,0 ) = - ( 1+y*y*invz_2 ) *fy_;
        jacobian_uv_ksai ( 1,1 ) = x*y*invz_2 *fy_;
        jacobian_uv_ksai ( 1,2 ) = x*invz *fy_;
        jacobian_uv_ksai ( 1,3 ) = 0;
        jacobian_uv_ksai ( 1,4 ) = invz *fy_;
        jacobian_uv_ksai ( 1,5 ) = -y*invz_2 *fy_;

        Eigen::Matrix<double, 1, 2> jacobian_pixel_uv;

        jacobian_pixel_uv ( 0,0 ) = ( getPixelValue ( u+1,v )-getPixelValue ( u-1,v ) ) /2;
        jacobian_pixel_uv ( 0,1 ) = ( getPixelValue ( u,v+1 )-getPixelValue ( u,v-1 ) ) /2;

        _jacobianOplusXi = jacobian_pixel_uv*jacobian_uv_ksai;
    }

J矩陣,在計算位姿時要用到

插值

    inline float getPixelValue ( float x, float y )
    {
        uchar* data = & image_->data[ int ( y ) * image_->step + int ( x ) ];
        float xx = x - floor ( x );
        float yy = y - floor ( y );
        return float (
                   ( 1-xx ) * ( 1-yy ) * data[0] +
                   xx* ( 1-yy ) * data[1] +
                   ( 1-xx ) *yy*data[ image_->step ] +
                   xx*yy*data[image_->step+1]
               );
    }
   

爲了更精確的計算像素亮度,我們使用簡單的雙線性插值(應該是這個程序段)

以上得出誤差項(一元邊),下面使用g2o加上優化變量(頂點)

初始化g2o:

typedef g2o::BlockSolver<g2o::BlockSolverTraits<6,1>> DirectBlock;  // 求解的向量是6*1的
//定義線性求解器類型
DirectBlock::LinearSolverType* linearSolver = new g2o::LinearSolverDense< DirectBlock::PoseMatrixType > ();
//根據求解器求得矩陣塊
DirectBlock* solver_ptr = new DirectBlock ( linearSolver );
//使用L-M優化
g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg ( solver_ptr ); // L-M

g2o::SparseOptimizer optimizer;
optimizer.setAlgorithm ( solver );
optimizer.setVerbose( true );
//初始化李代數
g2o::VertexSE3Expmap* pose = new g2o::VertexSE3Expmap();
pose->setEstimate ( g2o::SE3Quat ( Tcw.rotation(), Tcw.translation() ) );
pose->setId ( 0 );
optimizer.addVertex ( pose );

直接估計法

添加邊

int id=1;
for ( Measurement m: measurements )
{
    //輸入座標,內參,灰度矩陣
  EdgeSE3ProjectDirect* edge = new EdgeSE3ProjectDirect (
      m.pos_world,
      K ( 0,0 ), K ( 1,1 ), K ( 0,2 ), K ( 1,2 ), gray
  );
  //後面應該是一套的操作
  edge->setVertex ( 0, pose );
  edge->setMeasurement ( m.grayscale );
  edge->setInformation ( Eigen::Matrix<double,1,1>::Identity() );
  edge->setId ( id++ );
  optimizer.addEdge ( edge );
}
cout<<"edges in graph: "<<optimizer.edges().size() <<endl;
optimizer.initializeOptimization();
optimizer.optimize ( 30 );
Tcw = pose->estimate();

main函數中的操作

流程
讀取圖像信息,相機內參,對第一個圖提取FAST特徵,後面的用直接法
代碼如下

 for ( int index=0; index<10; index++ )
    {
        cout<<"*********** loop "<<index<<" ************"<<endl;
               //準備操作,讀取各種信息
        fin>>time_rgb>>rgb_file>>time_depth>>depth_file;
        color = cv::imread ( path_to_dataset+"/"+rgb_file );
        depth = cv::imread ( path_to_dataset+"/"+depth_file, -1 );
        if ( color.data==nullptr || depth.data==nullptr )
            continue;
        cv::cvtColor ( color, gray, cv::COLOR_BGR2GRAY );
        if ( index ==0 )
        {
            // 對第一幀提取FAST特徵點
            vector<cv::KeyPoint> keypoints;
            cv::Ptr<cv::FastFeatureDetector> detector = cv::FastFeatureDetector::create();
            detector->detect ( color, keypoints );
            for ( auto kp:keypoints )
            {
                // 去掉鄰近邊緣處的點
                if ( kp.pt.x < 20 || kp.pt.y < 20 || ( kp.pt.x+20 ) >color.cols || ( kp.pt.y+20 ) >color.rows )
                    continue;
                ushort d = depth.ptr<ushort> ( cvRound ( kp.pt.y ) ) [ cvRound ( kp.pt.x ) ];
                if ( d==0 )
                    continue;
                //獲得世界座標點
                Eigen::Vector3d p3d = project2Dto3D ( kp.pt.x, kp.pt.y, d, fx, fy, cx, cy, depth_scale );
                float grayscale = float ( gray.ptr<uchar> ( cvRound ( kp.pt.y ) ) [ cvRound ( kp.pt.x ) ] );
                measurements.push_back ( Measurement ( p3d, grayscale ) );
            }
            prev_color = color.clone();
            continue;
        }
        // 使用直接法計算相機運動
        chrono::steady_clock::time_point t1 = chrono::steady_clock::now();

        poseEstimationDirect ( measurements, &gray, K, Tcw );

        chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
        chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>> ( t2-t1 );
        cout<<"direct method costs time: "<<time_used.count() <<" seconds."<<endl;
        cout<<"Tcw="<<Tcw.matrix() <<endl;W
        

半稠密直接

代碼

for ( int x=10; x<gray.cols-10; x++ )
  for ( int y=10; y<gray.rows-10; y++ )
   {
     Eigen::Vector2d delta (gray.ptr<uchar>(y)[x+1] - gray.ptr<uchar> (y)[x-1],
     gray.ptr<uchar>(y+1)[x] - gray.ptr<uchar>(y-1)[x]
         );
      if ( delta.norm() < 50 )
           continue;
      ushort d = depth.ptr<ushort> (y)[x];
      if ( d==0 )
           continue;
      Eigen::Vector3d p3d = project2Dto3D ( x, y, d, fx, fy, cx, cy, depth_scale );
      float grayscale = float ( gray.ptr<uchar> (y) [x] );
      measurements.push_back ( Measurement ( p3d, grayscale ) );
    }

稀疏特徵點改成了帶有明顯梯度的像素,這些都會變化圖優化中的邊

優點
1.省去計算特徵點,描述子的時間
2.有像素梯度即可,無需計算特徵點
3.少量的漸變圖片也可以使用直接法

缺點
1.計算梯度,但圖像是強烈的非凸函數,除非運動很小,否則容易得出極小值點
2.灰度不變的假設並不一定成立
3.單個像素沒有區分度,每個像素對相機方向的調整想法不一樣,只能少數服從多數,數量代替質量

   
            for ( int x=10; x<gray.cols-10; x++ )
                for ( int y=10; y<gray.rows-10; y++ )
                {
                    Eigen::Vector2d delta (
                        gray.ptr<uchar>(y)[x+1] - gray.ptr<uchar>(y)[x-1],
                        gray.ptr<uchar>(y+1)[x] - gray.ptr<uchar>(y-1)[x]
                    );
                    if ( delta.norm() < 50 )
                        continue;
                    ushort d = depth.ptr<ushort> (y)[x];
                    if ( d==0 )
                        continue;
                    Eigen::Vector3d p3d = project2Dto3D ( x, y, d, fx, fy, cx, cy, depth_scale );
                    float grayscale = float ( gray.ptr<uchar> (y) [x] );
                    measurements.push_back ( Measurement ( p3d, grayscale ) );
                }

發佈了53 篇原創文章 · 獲贊 2 · 訪問量 7174
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章