利用kinect V1相機獲取點雲擬合平面獲取相機角度信息

  1. 利用深度相機獲取點雲二維圖像
  2. 將二維圖像通過相機內參轉換成點雲
  3. 取局部點雲,對點雲進行平面擬合(採用的是SVD分解),利用最小特徵值對應的特徵向量爲平面法向量

利用深度相機獲取點雲二維圖像

  1. 深度相機原理介紹
  2. 安裝深度相機(Kinect V1)驅動
    1. 借鑑創客智造的博客:鏈接
  3. kinect V1的圖像校準:利用上面的教程安裝驅動,保證可以獲取RGB和IR圖像,然後按照鏈接進行標定
    1. RGB鏈接
      1. 過程文件如圖

         

    2. IR鏈接
      1. 點擊save保存結果,點擊commit直接儲存結果信息
      2. 過程文件如圖

         

    3. RGB和IR的標定
      1. 一些比較好的博文(可以參考):zkl999999
      2. OpenCv 的Stereo Calibration
      3. 本文采用(因爲後續的特徵匹配也採用這種方式,這種方式只要拍一次照片就可以)
        1. 直接利用
  4. 將二維圖像通過相機內參轉換成點雲
  5. 深度圖和可見光圖的對應
    1. 獲取IR中點雲的世界座標(相對於紅外相機)
    2. 將IR點通過IR-RGB相機外參轉換轉移到可見光相機座標系下
    3. 通過可見光相機的內參矩陣進行投影(將IR中的點和可見光中的圖像數據進行對應,並且進行存儲-接下來稱呼這個信息爲RGB_with_Depth)
    4. 具體代碼如下
  6. 通過Kinect V1拍攝兩次,獲取相機相對位姿變換
    1. 通過Kinect V1對同一個場景,在不同位置拍攝兩張照片
    2. 通過步驟5計算兩個位置下的RGB_with_Depth
    3. 對兩個場景下的RGB圖像進行特徵點匹配,獲取到特徵點對應
    4. 對特徵點對分別在各自的RGB_with_Depth裏面尋找對應點,獲取對應的點的三維信息
    5. 對上面的三維空間點對進行外參矩陣的擬合求解,獲取兩個位置的外參矩陣

 

代碼:

#include<ros/ros.h> //ros標準庫頭文件
#include<iostream> //C++標準輸入輸出庫
#include<math.h>
#include<cv_bridge/cv_bridge.h>
#include<sensor_msgs/image_encodings.h>
#include<image_transport/image_transport.h>
#include <pthread.h>
#include <pcl/point_cloud.h>
#include <pcl_conversions/pcl_conversions.h>
#include <sensor_msgs/PointCloud2.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/features/integral_image_normal.h>
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <Eigen/SVD>
#include <Eigen/Dense>

//using Eigen::MatrixXf;
using namespace Eigen;
using namespace Eigen::internal;
using namespace Eigen::Architecture;
using std::cout;
using std::endl;
using std::stringstream;
using std::string;
bool saveCloud(false);
unsigned int fileNum = 1;
#define PI 3.14159265354
float fx = 525.0;  // focal length x
float fy = 525.0;  // focal length y
float cx = 319.5;  // optical center x
float cy = 239.5;  // optical center y

float factor = 1000.0;// for the 32-bit float images in the ROS bag files
float angle=0.0;
float angle_wall=0.0;


class RGB_GRAY
{
public:
    ros::NodeHandle nh_;
    image_transport::ImageTransport it_;
    float angle=0.0;
    RGB_GRAY()
      :it_(nh_)
    {

    }

    ~RGB_GRAY()
    {
    }

};


void rgb_callback(const sensor_msgs::ImageConstPtr& msg)
{
    //ROS_INFO("rgb callback");
    cv::Mat image;
    try
    {
        image =  cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8) -> image;
        char key;
        key=cvWaitKey(33);
        if(key==32)           //the Ascii of "Space key" is 32
        saveCloud = true;
        if(saveCloud)
        {
          stringstream stream;
          stringstream stream1;
          stream <<"Goal RgbImage" << fileNum<<".jpg";
          stream1 <<"/home/leonsun/catkin_navi/imgs/" << fileNum <<".jpg";
          string filename = stream.str();
          string filename1 = stream1.str();
          imwrite(filename1,cv_bridge::toCvShare(msg)->image);
          saveCloud = false;
          fileNum++;
          cout << filename << " had Saved."<< endl;
        }
    }
    catch(cv_bridge::Exception& e)
    {
        ROS_ERROR("cv_bridge exception: %s", e.what());
        return;
    }

    int width,height;
    width=image.cols/4;
    height=image.rows/4;


    cv::circle(image, cvPoint(image.cols / 2, image.rows / 2), 2, cv::Scalar(255, 0, 0), -1);
    cv::Rect rect(image.cols/2-width/2, image.rows/2-height/2, width, height);//左上座標(x,y)和矩形的長(x)寬(y)
    cv::rectangle(image, rect, cv::Scalar(255, 0, 0),1, cv::LINE_8,0);
    float x_col,y_row;
    x_col=100*sin(angle_wall)+image.cols / 2;
    y_row=-100*cos(angle_wall)+image.rows / 2;

    cv::line(image,cvPoint(image.cols / 2, image.rows / 2),cvPoint(x_col,y_row),cv::Scalar(255, 0, 0));//畫直線
    cv::imshow("image", image);
    cv::waitKey(5);
}

void depth_callback(const sensor_msgs::ImageConstPtr& msg)
{
    //ROS_INFO("depth callback");
  cv::Mat image;
    try
    {
        image =  cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::TYPE_32FC1) -> image; //image.type==CV32FC1   5
    }
    catch(cv_bridge::Exception& e)
    {
        ROS_ERROR("cv_bridge exception: %s", e.what());
        return;
    }
    int width,height;
    width=image.cols/4;
    height=image.rows/4;
    float a,b,a_depth,b_depth,delta,distance;
    a=image.cols/2-width/2;
    b=image.cols/2+width/2;
    int u_num=height;
    int v_num=width;
    pcl::PointCloud<pcl::PointXYZ> cloud;
    sensor_msgs::PointCloud2 output;
    cloud.width  = width;
    cloud.height = height;
    cloud.points.resize(width*height);
    float Z,X,Y;
    std::vector<cv::Point3f> points;

    //MatrixXd positions;
    int num=0;
    int points_num=0;
    float X_sum=0;
    float Y_sum=0;
    float Z_sum=0;
    for(int u=image.rows/2-height/2;u<image.rows/2+height/2;u++)
    {
      for(int v=image.cols/2-width/2;v<image.cols/2+width/2;v++)
      {
        cloud.points[num].z=image.at<float>(u,v)/factor;
        Z=image.at<float>(u,v)/factor;
        cloud.points[num].x=(u - cx) * Z / fx;
        cloud.points[num].y=(v - cy) * Z / fy;
        X_sum=X_sum+cloud.points[num].x;
        Y_sum=Y_sum+cloud.points[num].y;
        Z_sum=Z_sum+cloud.points[num].z;

        if (Z!=0)
          points_num=points_num+1;
        num=num+1;
      }
    }

    if (points_num!=0)
    {
      MatrixXf positions(points_num,3);
      float X_mean=X_sum/points_num;
      float Y_mean=Y_sum/points_num;
      float Z_mean=Z_sum/points_num;

      int b=0;
      for (int number=0;number<cloud.size();number++)
      {
        if (cloud.points[number].z!=0)
        {
          positions(b,0)=cloud.points[number].x-X_mean;

          positions(b,1)=cloud.points[number].y-Y_mean;

          positions(b,2)=cloud.points[number].z-Z_mean;

          b++;
        }
      }
      MatrixXf Covariance_matrix(3,3);
      Covariance_matrix=positions.transpose()*positions;
      JacobiSVD<Eigen::MatrixXf> svd(Covariance_matrix, ComputeThinU | ComputeThinV );
      Matrix3f V = svd.matrixV(), U = svd.matrixU();
      Matrix3f  S = U.inverse() * Covariance_matrix * V.transpose().inverse(); // S = U^-1 * A * VT * -1
      //std::cout<<"A :\n"<<positions<<std::endl;
      //cout<<Covariance_matrix<<endl;
      //std::cout<<"U :\n"<<U<<std::endl;
      //std::cout<<"S :\n"<<S<<std::endl;
      //std::cout<<"V :\n"<<V<<std::endl;
      Vector3f Fn;

      if (V(2,2)<0)
      {
        Fn(2)=-V(2,2);
        Fn(0)=-V(2,0);
        Fn(1)=-V(2,1);
      }
      else
      {
        Fn(2)=V(2,2);
        Fn(0)=V(2,0);
        Fn(1)=V(2,1);
      }


      angle_wall=atan(Fn(0)/Fn(2));
      ROS_INFO("Angle: %f", angle_wall*180/PI);
    }
    else
    {
      ROS_INFO("Please Dont stand infront of camera.");
    }
    cv::circle(image, cvPoint(image.cols / 2, image.rows / 2), 2, cv::Scalar(0, 0, 0), -1);
    cv::Rect rect(image.cols/2-width/2, image.rows/2-height/2, width, height);//左上座標(x,y)和矩形的長(x)寬(y)
    cv::rectangle(image, rect, cv::Scalar(0, 0, 0),1, cv::LINE_8,0);
    cv::imshow("depth", image);
    cv::waitKey(5);
}

int main(int argc, char** argv)
{  
    ros::init(argc, argv, "RGB");
    ros::NodeHandle nh_;
    //image_transport::ImageTransport it_(nh_);
    ros::Subscriber image_sub_,depth_sub_;

    cv::namedWindow("image");
    cv::namedWindow("depth");

    image_sub_ = nh_.subscribe("/camera/rgb/image_raw", 1, rgb_callback);
    depth_sub_ = nh_.subscribe("/camera/depth_registered/image_raw", 1, depth_callback);
    int err;
    pthread_t id;
    ros::spin();
}

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章