PCL-基於對應分組的三維物體識別

pcl 官網該教程
本教程旨在解釋如何基於pcl_recognition模塊執行3D對象識別。 具體來說,它解釋瞭如何使用對應分組算法,以便將在3D描述符匹配階段之後獲得的點對點對應集合聚類到當前場景中存在的模型實例中。 對於表示場景中可能的模型實例的每個聚類,對應分組算法還輸出識別當前場景中該模型的6DOF姿態估計的變換矩陣

關於輸入一個具體的物體的點雲,從場景中找出與該物體點雲相匹配的,這種方法可以用來抓取指定的物體等等,代碼說明:

  1. 對模型點雲、場景點雲分別進行下采樣,得到稀疏關鍵點;

  2. 對模型點雲、場景點雲關鍵點,分別計算描述子;

  3. 利用KdTreeFLANN搜索對應點對;

  4. 使用【對應點聚類算法】將【對應點對】聚類爲待識別模型;

  5. 返回識別到的每一個模型的變換矩陣(旋轉矩陣+平移矩陣),以及對應點對聚類結果

具體代碼:

#include <pcl/io/pcd_io.h>
#include <pcl/point_cloud.h>
#include <pcl/correspondence.h>
#include <pcl/features/normal_3d_omp.h>
#include <pcl/features/shot_omp.h>
#include <pcl/features/board.h>
#include <pcl/keypoints/uniform_sampling.h>
#include <pcl/recognition/cg/hough_3d.h>
#include <pcl/recognition/cg/geometric_consistency.h>
#include <pcl/visualization/pcl_visualizer.h>
#include <pcl/kdtree/kdtree_flann.h>
#include <pcl/kdtree/impl/kdtree_flann.hpp>
#include <pcl/common/transforms.h>
#include <pcl/console/parse.h>
 
typedef pcl::PointXYZRGBA PointType;
typedef pcl::Normal NormalType;
typedef pcl::ReferenceFrame RFType;
typedef pcl::SHOT352 DescriptorType;
 
std::string model_filename_;
std::string scene_filename_;
 
//Algorithm params
bool show_keypoints_ (false);
bool show_correspondences_ (false);
bool use_cloud_resolution_ (false);
bool use_hough_ (true);
float model_ss_ (0.01f);
float scene_ss_ (0.03f);
float rf_rad_ (0.015f);
float descr_rad_ (0.02f);
float cg_size_ (0.01f);
float cg_thresh_ (5.0f);
 
void
showHelp (char *filename)
{
  std::cout << std::endl;
  std::cout << "***************************************************************************" << std::endl;
  std::cout << "*                                                                         *" << std::endl;
  std::cout << "*             Correspondence Grouping Tutorial - Usage Guide              *" << std::endl;
  std::cout << "*                                                                         *" << std::endl;
  std::cout << "***************************************************************************" << std::endl << std::endl;
  std::cout << "Usage: " << filename << " model_filename.pcd scene_filename.pcd [Options]" << std::endl << std::endl;
  std::cout << "Options:" << std::endl;
  std::cout << "     -h:                     Show this help." << std::endl;
  std::cout << "     -k:                     Show used keypoints." << std::endl;
  std::cout << "     -c:                     Show used correspondences." << std::endl;
  std::cout << "     -r:                     Compute the model cloud resolution and multiply" << std::endl;
  std::cout << "                             each radius given by that value." << std::endl;
  std::cout << "     --algorithm (Hough|GC): Clustering algorithm used (default Hough)." << std::endl;
  std::cout << "     --model_ss val:         Model uniform sampling radius (default 0.01)" << std::endl;
  std::cout << "     --scene_ss val:         Scene uniform sampling radius (default 0.03)" << std::endl;
  std::cout << "     --rf_rad val:           Reference frame radius (default 0.015)" << std::endl;
  std::cout << "     --descr_rad val:        Descriptor radius (default 0.02)" << std::endl;
  std::cout << "     --cg_size val:          Cluster size (default 0.01)" << std::endl;
  std::cout << "     --cg_thresh val:        Clustering threshold (default 5)" << std::endl << std::endl;
}
 
void
parseCommandLine (int argc, char *argv[])
{
  //Show help
  if (pcl::console::find_switch (argc, argv, "-h"))
  {
    showHelp (argv[0]);
    exit (0);
  }
 
  //Model & scene filenames
  std::vector<int> filenames;
  filenames = pcl::console::parse_file_extension_argument (argc, argv, ".pcd");
  if (filenames.size () != 2)
  {
    std::cout << "Filenames missing.\n";
    showHelp (argv[0]);
    exit (-1);
  }
 
  model_filename_ = argv[filenames[0]];
  scene_filename_ = argv[filenames[1]];
 
  //Program behavior
  if (pcl::console::find_switch (argc, argv, "-k"))//可視化構造對應點時用到的關鍵點
  {
    show_keypoints_ = true;
  }
  if (pcl::console::find_switch (argc, argv, "-c"))//可視化支持實例假設的對應點對
  {
    show_correspondences_ = true;
  }
  if (pcl::console::find_switch (argc, argv, "-r"))//計算點雲的分辨率和多樣性
  {
    use_cloud_resolution_ = true;
  }
 
  std::string used_algorithm;
  if (pcl::console::parse_argument (argc, argv, "--algorithm", used_algorithm) != -1)
  {
    if (used_algorithm.compare ("Hough") == 0)
    {
      use_hough_ = true;
    }else if (used_algorithm.compare ("GC") == 0)
    {
      use_hough_ = false;
    }
    else
    {
      std::cout << "Wrong algorithm name.\n";
      showHelp (argv[0]);
      exit (-1);
    }
  }
 
  //General parameters
  pcl::console::parse_argument (argc, argv, "--model_ss", model_ss_);
  pcl::console::parse_argument (argc, argv, "--scene_ss", scene_ss_);
  pcl::console::parse_argument (argc, argv, "--rf_rad", rf_rad_);
  pcl::console::parse_argument (argc, argv, "--descr_rad", descr_rad_);
  pcl::console::parse_argument (argc, argv, "--cg_size", cg_size_);
  pcl::console::parse_argument (argc, argv, "--cg_thresh", cg_thresh_);
}
 
double
computeCloudResolution (const pcl::PointCloud<PointType>::ConstPtr &cloud)
{
  double res = 0.0;
  int n_points = 0;
  int nres;
  std::vector<int> indices (2);
  std::vector<float> sqr_distances (2);
  pcl::search::KdTree<PointType> tree;
  tree.setInputCloud (cloud);
 
  for (size_t i = 0; i < cloud->size (); ++i)
  {
    if (! pcl_isfinite ((*cloud)[i].x))
    {
      continue;
    }
    //Considering the second neighbor since the first is the point itself.
    nres = tree.nearestKSearch (i, 2, indices, sqr_distances);
    if (nres == 2)
    {
      res += sqrt (sqr_distances[1]);
      ++n_points;
    }
  }
  if (n_points != 0)
  {
    res /= n_points;
  }
  return res;
}
 
int
main (int argc, char *argv[])
{
  parseCommandLine (argc, argv);
 
  pcl::PointCloud<PointType>::Ptr model (new pcl::PointCloud<PointType> ());           //模型點雲
  pcl::PointCloud<PointType>::Ptr model_keypoints (new pcl::PointCloud<PointType> ()); //模型角點
  pcl::PointCloud<PointType>::Ptr scene (new pcl::PointCloud<PointType> ());           //目標點雲
  pcl::PointCloud<PointType>::Ptr scene_keypoints (new pcl::PointCloud<PointType> ()); //目標角點
  pcl::PointCloud<NormalType>::Ptr model_normals (new pcl::PointCloud<NormalType> ()); //法線
  pcl::PointCloud<NormalType>::Ptr scene_normals (new pcl::PointCloud<NormalType> ()); //
  pcl::PointCloud<DescriptorType>::Ptr model_descriptors (new pcl::PointCloud<DescriptorType> ()); //描述子
  pcl::PointCloud<DescriptorType>::Ptr scene_descriptors (new pcl::PointCloud<DescriptorType> ());
 
  //
  //  Load clouds
  //
  if (pcl::io::loadPCDFile (model_filename_, *model) < 0)
  {
    std::cout << "Error loading model cloud." << std::endl;
    showHelp (argv[0]);
    return (-1);
  }
  if (pcl::io::loadPCDFile (scene_filename_, *scene) < 0)
  {
    std::cout << "Error loading scene cloud." << std::endl;
    showHelp (argv[0]);
    return (-1);
  }
 
  //
  //  Set up resolution invariance
  //
  if (use_cloud_resolution_)
  {
    float resolution = static_cast<float> (computeCloudResolution (model));
    if (resolution != 0.0f)
    {
      model_ss_   *= resolution;
      scene_ss_   *= resolution;
      rf_rad_     *= resolution;
      descr_rad_  *= resolution;
      cg_size_    *= resolution;
    }
 
    std::cout << "Model resolution:       " << resolution << std::endl;
    std::cout << "Model sampling size:    " << model_ss_ << std::endl;
    std::cout << "Scene sampling size:    " << scene_ss_ << std::endl;
    std::cout << "LRF support radius:     " << rf_rad_ << std::endl;
    std::cout << "SHOT descriptor radius: " << descr_rad_ << std::endl;
    std::cout << "Clustering bin size:    " << cg_size_ << std::endl << std::endl;
  }
 
  //
  //  Compute Normals:計算法線
  //
  pcl::NormalEstimationOMP<PointType, NormalType> norm_est;
  norm_est.setNumberOfThreads(4);   //手動設置線程數
  norm_est.setKSearch (10);         //設置k鄰域搜索閾值爲10個點
  norm_est.setInputCloud (model);   //設置輸入模型點雲
  norm_est.compute (*model_normals);//計算點雲法線
 
  norm_est.setInputCloud (scene);
  norm_est.compute (*scene_normals);
 
  //
  // Downsample Clouds to Extract keypoints:均勻採樣點雲並提取關鍵點
  // 類UniformSampling實現對點雲的統一重採樣,具體通過建立點雲的空間體素柵格,然後在此基礎上實現下采樣並且過濾一些數據。
  // 所有采樣後得到的點用每個體素內點集的重心近似,而不是用每個體素的中心點近似,前者速度較後者慢,但其估計的點更接近實際的採樣面。
  //
 
  //pcl::PointCloud<int> sampled_indices;
  pcl::UniformSampling<PointType> uniform_sampling;
  uniform_sampling.setInputCloud (model);        //輸入點雲
  uniform_sampling.setRadiusSearch (model_ss_);  //輸入半徑
  //uniform_sampling.compute (sampled_indices);
  //pcl::copyPointCloud (*model, sampled_indices.points, *model_keypoints);
  uniform_sampling.filter(*model_keypoints);   //濾波
 
  std::cout << "Model total points: " << model->size () << "; Selected Keypoints: " << model_keypoints->size () << std::endl;
 
  uniform_sampling.setInputCloud (scene);
  uniform_sampling.setRadiusSearch (scene_ss_);
  //uniform_sampling.compute (sampled_indices);
  //pcl::copyPointCloud (*scene, sampled_indices.points, *scene_keypoints);
  uniform_sampling.filter(*scene_keypoints);
  std::cout << "Scene total points: " << scene->size () << "; Selected Keypoints: " << scene_keypoints->size () << std::endl;
 
  //
  //  Compute Descriptor for keypoints:爲關鍵點計算描述子
  //
  pcl::SHOTEstimationOMP<PointType, NormalType, DescriptorType> descr_est;
  descr_est.setNumberOfThreads(4);
  descr_est.setRadiusSearch (descr_rad_);     //設置搜索半徑
 
  descr_est.setInputCloud (model_keypoints);  //模型點雲的關鍵點
  descr_est.setInputNormals (model_normals);  //模型點雲的法線 
  descr_est.setSearchSurface (model);         //模型點雲       
  descr_est.compute (*model_descriptors);     //計算描述子
 
  descr_est.setInputCloud (scene_keypoints);
  descr_est.setInputNormals (scene_normals);
  descr_est.setSearchSurface (scene);
  descr_est.compute (*scene_descriptors);
 
  //
  //  Find Model-Scene Correspondences with KdTree:使用Kdtree找出 Model-Scene 匹配點
  //
  pcl::CorrespondencesPtr model_scene_corrs (new pcl::Correspondences ());
 
  pcl::KdTreeFLANN<DescriptorType> match_search; //設置配準方式
  match_search.setInputCloud (model_descriptors);//模型點雲的描述子
 
 
  //每一個場景的關鍵點描述子都要找到模板中匹配的關鍵點描述子並將其添加到對應的匹配向量中。
  for (size_t i = 0; i < scene_descriptors->size (); ++i)
  {
    std::vector<int> neigh_indices (1);    //設置最近鄰點的索引
    std::vector<float> neigh_sqr_dists (1);//設置最近鄰平方距離值
    if (!pcl_isfinite (scene_descriptors->at (i).descriptor[0])) //忽略 NaNs點
    {
      continue;
    }
    int found_neighs = match_search.nearestKSearch (scene_descriptors->at (i), 1, neigh_indices, neigh_sqr_dists);
    if(found_neighs == 1 && neigh_sqr_dists[0] < 0.25f) //僅當描述子與臨近點的平方距離小於0.25(描述子與臨近的距離在一般在0到1之間)才添加匹配
    {
		//neigh_indices[0]給定點,i是配準數neigh_sqr_dists[0]與臨近點的平方距離
      pcl::Correspondence corr (neigh_indices[0], static_cast<int> (i), neigh_sqr_dists[0]);
      model_scene_corrs->push_back (corr);//把配準的點存儲在容器中
    }
  }
  std::cout << "Correspondences found: " << model_scene_corrs->size () << std::endl;
 
  //
  //  Actual Clustering:實際的配準方法的實現
  //
  std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> > rototranslations;
  std::vector<pcl::Correspondences> clustered_corrs;
 
  // 使用 Hough3D算法尋找匹配點
  if (use_hough_)
  {
    //
    //  Compute (Keypoints) Reference Frames only for Hough
    //
	//利用hough算法時,需要計算關鍵點的局部參考座標系(LRF)
    pcl::PointCloud<RFType>::Ptr model_rf (new pcl::PointCloud<RFType> ());
    pcl::PointCloud<RFType>::Ptr scene_rf (new pcl::PointCloud<RFType> ());
	
    pcl::BOARDLocalReferenceFrameEstimation<PointType, NormalType, RFType> rf_est;
    rf_est.setFindHoles (true);
    rf_est.setRadiusSearch (rf_rad_);//估計局部參考座標系時當前點的鄰域搜索半徑
 
    rf_est.setInputCloud (model_keypoints);
    rf_est.setInputNormals (model_normals);
    rf_est.setSearchSurface (model);
    rf_est.compute (*model_rf);
 
    rf_est.setInputCloud (scene_keypoints);
    rf_est.setInputNormals (scene_normals);
    rf_est.setSearchSurface (scene);
    rf_est.compute (*scene_rf);
 
    //  Clustering
    pcl::Hough3DGrouping<PointType, PointType, RFType, RFType> clusterer;
    clusterer.setHoughBinSize (cg_size_);     //hough空間的採樣間隔:0.01
    clusterer.setHoughThreshold (cg_thresh_); //在hough空間確定是否有實例存在的最少票數閾值:5
    clusterer.setUseInterpolation (true);     //設置是否對投票在hough空間進行插值計算
    clusterer.setUseDistanceWeight (false);   //設置在投票時是否將對應點之間的距離作爲權重參與計算
 
    clusterer.setInputCloud (model_keypoints); //設置模型點雲的關鍵點
    clusterer.setInputRf (model_rf);           //設置模型對應的局部座標系
    clusterer.setSceneCloud (scene_keypoints);
    clusterer.setSceneRf (scene_rf);
    clusterer.setModelSceneCorrespondences (model_scene_corrs);//設置模型與場景的對應點的集合
 
    //clusterer.cluster (clustered_corrs);
    clusterer.recognize (rototranslations, clustered_corrs); //結果包含變換矩陣和對應點聚類結果
  }
  else // Using GeometricConsistency:使用幾何一致性性質
  {
    pcl::GeometricConsistencyGrouping<PointType, PointType> gc_clusterer;
    gc_clusterer.setGCSize (cg_size_);        //設置幾何一致性的大小
    gc_clusterer.setGCThreshold (cg_thresh_); //閾值
 
    gc_clusterer.setInputCloud (model_keypoints);
    gc_clusterer.setSceneCloud (scene_keypoints);
    gc_clusterer.setModelSceneCorrespondences (model_scene_corrs);
 
    //gc_clusterer.cluster (clustered_corrs);
    gc_clusterer.recognize (rototranslations, clustered_corrs);
  }
 
  //
  //  Output results:找出輸入模型是否在場景中出現
  //
  std::cout << "Model instances found: " << rototranslations.size () << std::endl;
  for (size_t i = 0; i < rototranslations.size (); ++i)
  {
    std::cout << "\n    Instance " << i + 1 << ":" << std::endl;
    std::cout << "        Correspondences belonging to this instance: " << clustered_corrs[i].size () << std::endl;
 
    //打印處相對於輸入模型的旋轉矩陣與平移矩陣
    Eigen::Matrix3f rotation = rototranslations[i].block<3,3>(0, 0);
    Eigen::Vector3f translation = rototranslations[i].block<3,1>(0, 3);
 
    printf ("\n");
    printf ("            | %6.3f %6.3f %6.3f | \n", rotation (0,0), rotation (0,1), rotation (0,2));
    printf ("        R = | %6.3f %6.3f %6.3f | \n", rotation (1,0), rotation (1,1), rotation (1,2));
    printf ("            | %6.3f %6.3f %6.3f | \n", rotation (2,0), rotation (2,1), rotation (2,2));
    printf ("\n");
    printf ("        t = < %0.3f, %0.3f, %0.3f >\n", translation (0), translation (1), translation (2));
  }
 
  //
  //  Visualization
  //
  pcl::visualization::PCLVisualizer viewer ("點雲庫PCL學習教程第二版-基於對應點聚類的3D模型識別");
  viewer.addPointCloud (scene, "scene_cloud");//可視化場景點雲
  viewer.setBackgroundColor(255,255,255);
  pcl::PointCloud<PointType>::Ptr off_scene_model (new pcl::PointCloud<PointType> ());
  pcl::PointCloud<PointType>::Ptr off_scene_model_keypoints (new pcl::PointCloud<PointType> ());
 
  if (show_correspondences_ || show_keypoints_)//可視化配準點
  {
    //We are translating the model so that it doesn't end in the middle of the scene representation
	//對輸入的模型進行旋轉與平移,使其在可視化界面的中間位置
    pcl::transformPointCloud (*model, *off_scene_model, Eigen::Vector3f (0,0,0), Eigen::Quaternionf (1, 0, 0, 0));
    pcl::transformPointCloud (*model_keypoints, *off_scene_model_keypoints, Eigen::Vector3f (-1,0,0), Eigen::Quaternionf (1, 0, 0, 0));
 
    pcl::visualization::PointCloudColorHandlerCustom<PointType> off_scene_model_color_handler (off_scene_model, 0, 255, 0);
    viewer.addPointCloud (off_scene_model, off_scene_model_color_handler, "off_scene_model");
  }
 
  if (show_keypoints_)//可視化關鍵點:藍色
  {
    pcl::visualization::PointCloudColorHandlerCustom<PointType> scene_keypoints_color_handler (scene_keypoints, 0, 0, 255);
    viewer.addPointCloud (scene_keypoints, scene_keypoints_color_handler, "scene_keypoints");
    viewer.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 5, "scene_keypoints");
 
    pcl::visualization::PointCloudColorHandlerCustom<PointType> off_scene_model_keypoints_color_handler (off_scene_model_keypoints, 0, 0, 255);
    viewer.addPointCloud (off_scene_model_keypoints, off_scene_model_keypoints_color_handler, "off_scene_model_keypoints");
    viewer.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 5, "off_scene_model_keypoints");
  }
 
  for (size_t i = 0; i < rototranslations.size (); ++i)
  {
    pcl::PointCloud<PointType>::Ptr rotated_model (new pcl::PointCloud<PointType> ());
    pcl::transformPointCloud (*model, *rotated_model, rototranslations[i]);//把model轉化爲rotated_model
 
    std::stringstream ss_cloud;
    ss_cloud << "instance" << i;
 
    pcl::visualization::PointCloudColorHandlerCustom<PointType> rotated_model_color_handler (rotated_model, 255, 0, 0);
    viewer.addPointCloud (rotated_model, rotated_model_color_handler, ss_cloud.str ());
 
    if (show_correspondences_)//顯示配準連接線
    {
      for (size_t j = 0; j < clustered_corrs[i].size (); ++j)
      {
        std::stringstream ss_line;
        ss_line << "correspondence_line" << i << "_" << j;
        PointType& model_point = off_scene_model_keypoints->at (clustered_corrs[i][j].index_query);
        PointType& scene_point = scene_keypoints->at (clustered_corrs[i][j].index_match);
 
        //  We are drawing a line for each pair of clustered correspondences found between the model and the scene
        viewer.addLine<PointType, PointType> (model_point, scene_point, 0, 255, 0, ss_line.str ());
      }
    }
  }
 
  while (!viewer.wasStopped ())
  {
    viewer.spinOnce ();
  }
 
  return (0);
}

如果用書中帶的pcd,加載milk_pose_changed.pcd時,會報“Failed to find match for field ‘rgba’.”因爲這個文件中不包括rgba信息;可從官網下載原始文件:源文件github地址

命令行輸入指令說明:

  -k:                在源點雲和場景點雲中    顯示關鍵點 
  -c:                顯示對應點連線

./correspondence_grouping milk.pcd milk_cartoon_all_small_clorox.pcd -c
效果顯示如下
結果顯示
./correspondence_grouping milk.pcd milk_cartoon_all_small_clorox.pcd -c -k效果顯示如下
在這裏插入圖片描述

控制檯輸出結果
結果顯示:關鍵點(藍色),識別出的模型實例(紅色),對應點對連接線(綠色) 源點雲(黃色)

重點:爲什麼R和T的值都是原始值:因爲 源點雲和目標點雲完全重合

我們將源點雲進行旋轉平移後再匹配,結果如下
在這裏插入圖片描述
[pcl::SHOTEstimation::computeFeature] The local reference frame is not valid! Aborting description of point with index 3045

這個錯誤是由於正常半徑較小的事實,一些法線是NaN(在設置半徑的球體內它們的鄰域中沒有點),導致SHOT出現問題。

只需要把代碼中的,在這裏插入圖片描述搜索半徑改大點。

之後我嘗試將源點雲與多個相同點雲進行 匹配,結果如下

在這裏插入圖片描述
在這裏插入圖片描述
但是我用自己的數據集並沒有實現點雲匹配 正在修改參數中 ,哪位可以指點一下,謝謝

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章