OKVIS Reprojection Error

誤差位置

新匹配的 3D 特徵點 和 2D 特徵點像素座標構成投影誤差,代碼在 estimator.hpp 中。

// Add an observation to a landmark.
template<class GEOMETRY_TYPE>
::ceres::ResidualBlockId Estimator::addObservation(uint64_t landmarkId,
                                                   uint64_t poseId,
                                                   size_t camIdx,
                                                   size_t keypointIdx) {
  OKVIS_ASSERT_TRUE_DBG(Exception, isLandmarkAdded(landmarkId),
                        "landmark not added");

  // avoid double observations
  okvis::KeypointIdentifier kid(poseId, camIdx, keypointIdx);
  if (landmarksMap_.at(landmarkId).observations.find(kid)
      != landmarksMap_.at(landmarkId).observations.end()) {
    return NULL;
  }

  // get the keypoint measurement
  okvis::MultiFramePtr multiFramePtr = multiFramePtrMap_.at(poseId);
  Eigen::Vector2d measurement;
  multiFramePtr->getKeypoint(camIdx, keypointIdx, measurement);
  Eigen::Matrix2d information = Eigen::Matrix2d::Identity();
  double size = 1.0;
  multiFramePtr->getKeypointSize(camIdx, keypointIdx, size);
  information *= 64.0 / (size * size);

  // create error term
  std::shared_ptr < ceres::ReprojectionError
      < GEOMETRY_TYPE
          >> reprojectionError(
              new ceres::ReprojectionError<GEOMETRY_TYPE>(
                  multiFramePtr->template geometryAs<GEOMETRY_TYPE>(camIdx),
                  camIdx, measurement, information));

  ::ceres::ResidualBlockId retVal = mapPtr_->addResidualBlock(
      reprojectionError,
      cauchyLossFunctionPtr_ ? cauchyLossFunctionPtr_.get() : NULL,
      mapPtr_->parameterBlockPtr(poseId),
      mapPtr_->parameterBlockPtr(landmarkId),
      mapPtr_->parameterBlockPtr(
          statesMap_.at(poseId).sensors.at(SensorStates::Camera).at(camIdx).at(
              CameraSensorStates::T_SCi).id));

  // remember
  landmarksMap_.at(landmarkId).observations.insert(
      std::pair<okvis::KeypointIdentifier, uint64_t>(
          kid, reinterpret_cast<uint64_t>(retVal)));

  return retVal;
}

構造函數

// Default constructor.
template<class GEOMETRY_T>
ReprojectionError<GEOMETRY_T>::ReprojectionError()
    : cameraGeometry_(new camera_geometry_t) {
}

// Construct with measurement and information matrix.
template<class GEOMETRY_T>
ReprojectionError<GEOMETRY_T>::ReprojectionError(
    std::shared_ptr<const camera_geometry_t> cameraGeometry, uint64_t cameraId,
    const measurement_t & measurement, const covariance_t & information) {
  setCameraId(cameraId);
  setMeasurement(measurement);
  setInformation(information);
  setCameraGeometry(cameraGeometry);
}

// Set the information.
template<class GEOMETRY_T>
void ReprojectionError<GEOMETRY_T>::setInformation(
    const covariance_t& information) {
  information_ = information;
  covariance_ = information.inverse();
  // perform the Cholesky decomposition on order to obtain the correct error weighting
  Eigen::LLT<Eigen::Matrix2d> lltOfInformation(information_);
  squareRootInformation_ = lltOfInformation.matrixL().transpose();
}

和 ceres 接口

// This evaluates the error term and additionally computes the Jacobians.
template<class GEOMETRY_T>
bool ReprojectionError<GEOMETRY_T>::Evaluate(double const* const * parameters,
                                             double* residuals,
                                             double** jacobians) const {

  return EvaluateWithMinimalJacobians(parameters, residuals, jacobians, NULL);  // debug test only
}


// This evaluates the error term and additionally computes
// the Jacobians in the minimal internal representation.
template<class GEOMETRY_T>
bool ReprojectionError<GEOMETRY_T>::EvaluateWithMinimalJacobians(
    double const* const * parameters, double* residuals, double** jacobians,
    double** jacobiansMinimal) const {

  // We avoid the use of okvis::kinematics::Transformation here due to quaternion normalization and so forth.
  // This only matters in order to be able to check Jacobians with numeric differentiation chained,
  // first w.r.t. q and then d_alpha.

  // pose: world to sensor transformation
  Eigen::Map<const Eigen::Vector3d> t_WS_W(&parameters[0][0]);
  const Eigen::Quaterniond q_WS(parameters[0][6], parameters[0][3],
                                parameters[0][4], parameters[0][5]);

  // the point in world coordinates
  Eigen::Map<const Eigen::Vector4d> hp_W(&parameters[1][0]);
  //std::cout << hp_W.transpose() << std::endl;

  // the sensor to camera transformation
  Eigen::Map<const Eigen::Vector3d> t_SC_S(&parameters[2][0]);
  const Eigen::Quaterniond q_SC(parameters[2][6], parameters[2][3],
                                parameters[2][4], parameters[2][5]);

sensor 繫到世界系轉換矩陣:WTWS
相機繫到 sensor 系轉換矩陣:STSC
sensor 繫到世界系轉換矩陣位移部分:WtWS
sensor 繫到世界系轉換旋轉部分(四元數表示):qWS
特徵點世界系下座標:Whp
相機繫到 sensor 系轉換位移部分:StSC
相機繫到 sensor 系轉換旋轉部分(四元數表示):qSC

  // transform the point into the camera:
  Eigen::Matrix3d C_SC = q_SC.toRotationMatrix();
  Eigen::Matrix3d C_CS = C_SC.transpose();
  Eigen::Matrix4d T_CS = Eigen::Matrix4d::Identity();
  T_CS.topLeftCorner<3, 3>() = C_CS;
  T_CS.topRightCorner<3, 1>() = -C_CS * t_SC_S;
  Eigen::Matrix3d C_WS = q_WS.toRotationMatrix();
  Eigen::Matrix3d C_SW = C_WS.transpose();
  Eigen::Matrix4d T_SW = Eigen::Matrix4d::Identity();
  T_SW.topLeftCorner<3, 3>() = C_SW;
  T_SW.topRightCorner<3, 1>() = -C_SW * t_WS_W;
  Eigen::Vector4d hp_S = T_SW * hp_W;
  Eigen::Vector4d hp_C = T_CS * hp_S;

計算投影誤差

CSC=R{qSC}
CCS=CTSC
計算 sensor 繫到相機系轉換矩陣:TCS=I(4,4)TCS(0:2,0:2)=CCSTCS(0:2,3)=CCSStSC
CWS=R{qWS}CSW=CTWS
計算世界繫到 sensor 轉換矩陣:TSW=I(4,4)TSW(0:2,0:2)=CSWTSW(0:2,3)=CSWWtWS
特徵點投影到 sensor 系:Shp=TSWWhp
特徵點投影到相機系:Chp=TCSShp

  // calculate the reprojection error
  measurement_t kp;
  Eigen::Matrix<double, 2, 4> Jh;
  Eigen::Matrix<double, 2, 4> Jh_weighted;
  if (jacobians != NULL) {
    cameraGeometry_->projectHomogeneous(hp_C, &kp, &Jh);
    Jh_weighted = squareRootInformation_ * Jh;
  } else {
    cameraGeometry_->projectHomogeneous(hp_C, &kp);
  }

  measurement_t error = measurement_ - kp;

  // weight:
  measurement_t weighted_error = squareRootInformation_ * error;

  // assign:
  residuals[0] = weighted_error[0];
  residuals[1] = weighted_error[1];

  // check validity:
  bool valid = true;
  if (fabs(hp_C[3]) > 1.0e-8) {
    Eigen::Vector3d p_C = hp_C.template head<3>() / hp_C[3];
    if (p_C[2] < 0.2) {  // 20 cm - not very generic... but reasonable
      //std::cout<<"INVALID POINT"<<std::endl;
      valid = false;
    }
  }

特徵點由相機系投影到像素座標

    cameraGeometry_->projectHomogeneous(hp_C, &kp, &Jh);

投影的得到的像素座標:kp
投影雅各比矩陣 2x4:Jh

wJh=Σ1Jh

特徵圖像提取座標:measurement_
r=Σ1(measurement_kp)

  // calculate jacobians, if required
  // This is pretty close to Paul Furgale's thesis. eq. 3.100 on page 40
  if (jacobians != NULL) {
    if (jacobians[0] != NULL) {
      Eigen::Vector3d p = hp_W.head<3>() - t_WS_W * hp_W[3];
      Eigen::Matrix<double, 4, 6> J;
      J.setZero();
      J.topLeftCorner<3, 3>() = C_SW * hp_W[3];
      J.topRightCorner<3, 3>() = -C_SW * okvis::kinematics::crossMx(p);

      // compute the minimal version
      Eigen::Matrix<double, 2, 6, Eigen::RowMajor> J0_minimal;
      J0_minimal = Jh_weighted * T_CS * J;
      if (!valid)
        J0_minimal.setZero();

      // pseudo inverse of the local parametrization Jacobian:
      Eigen::Matrix<double, 6, 7, Eigen::RowMajor> J_lift;
      PoseLocalParameterization::liftJacobian(parameters[0], J_lift.data());

      // hallucinate Jacobian w.r.t. state
      Eigen::Map<Eigen::Matrix<double, 2, 7, Eigen::RowMajor> > J0(
          jacobians[0]);
      J0 = J0_minimal * J_lift;

      // if requested, provide minimal Jacobians
      if (jacobiansMinimal != NULL) {
        if (jacobiansMinimal[0] != NULL) {
          Eigen::Map<Eigen::Matrix<double, 2, 6, Eigen::RowMajor> > J0_minimal_mapped(
              jacobiansMinimal[0]);
          J0_minimal_mapped = J0_minimal;
        }
      }
    }

p=Whp(0:2)WtWSWhp(3)
J(4,6)=0
J(0:2,0:2)=CSWWhp(3)J(0:2,3:5)=CSW[p]×
J0mini=WJhTCSJ
Jlift=liftJacobian(WTWS)
J0=J0miniJlift

    if (jacobians[1] != NULL) {
      Eigen::Map<Eigen::Matrix<double, 2, 4, Eigen::RowMajor> > J1(
          jacobians[1]);  // map the raw pointer to an Eigen matrix for convenience
      Eigen::Matrix4d T_CW = (T_CS * T_SW);
      J1 = -Jh_weighted * T_CW;
      if (!valid)
        J1.setZero();

      // if requested, provide minimal Jacobians
      if (jacobiansMinimal != NULL) {
        if (jacobiansMinimal[1] != NULL) {
          Eigen::Map<Eigen::Matrix<double, 2, 3, Eigen::RowMajor> > J1_minimal_mapped(
              jacobiansMinimal[1]);
          Eigen::Matrix<double, 4, 3> S;
          S.setZero();
          S.topLeftCorner<3, 3>().setIdentity();
          J1_minimal_mapped = J1 * S;  // this is for Euclidean-style perturbation only.
        }
      }
    }

TCW=TCSTSW
2x4 矩陣:J1
J1=WJhTCW

    if (jacobians[2] != NULL) {
      Eigen::Vector3d p = hp_S.head<3>() - t_SC_S * hp_S[3];
      Eigen::Matrix<double, 4, 6> J;
      J.setZero();
      J.topLeftCorner<3, 3>() = C_CS * hp_S[3];
      J.topRightCorner<3, 3>() = -C_CS * okvis::kinematics::crossMx(p);

      // compute the minimal version
      Eigen::Matrix<double, 2, 6, Eigen::RowMajor> J2_minimal;
      J2_minimal = Jh_weighted * J;
      if (!valid)
        J2_minimal.setZero();

      // pseudo inverse of the local parametrization Jacobian:
      Eigen::Matrix<double, 6, 7, Eigen::RowMajor> J_lift;
      PoseLocalParameterization::liftJacobian(parameters[2], J_lift.data());

      // hallucinate Jacobian w.r.t. state
      Eigen::Map<Eigen::Matrix<double, 2, 7, Eigen::RowMajor> > J2(
          jacobians[2]);
      J2 = J2_minimal * J_lift;

      // if requested, provide minimal Jacobians
      if (jacobiansMinimal != NULL) {
        if (jacobiansMinimal[2] != NULL) {
          Eigen::Map<Eigen::Matrix<double, 2, 6, Eigen::RowMajor> > J2_minimal_mapped(
              jacobiansMinimal[2]);
          J2_minimal_mapped = J2_minimal;
        }
      }
    }

p=Shp(0:2)StSCShp(3)
J(4,6)=[CCSShpCCS[p]×]
J2mini=WJhJ
Jlift=liftJacobian(STSC)
2x7 矩陣:J2
J2=J2miniJlift

公式部分

讀入參數

sensor 繫到世界系轉換矩陣:WTWS
相機繫到 sensor 系轉換矩陣:STSC
sensor 繫到世界系轉換矩陣位移部分:WtWS
sensor 繫到世界系轉換旋轉部分(四元數表示):qWS
特徵點世界系下座標:Whp
相機繫到 sensor 系轉換位移部分:StSC
相機繫到 sensor 系轉換旋轉部分(四元數表示):qSC

計算投影誤差

CSC=R{qSC}
CCS=CTSC
計算 sensor 繫到相機系轉換矩陣:TCS=I(4,4)TCS(0:2,0:2)=CCSTCS(0:2,3)=CCSStSC
CWS=R{qWS}CSW=CTWS
計算世界繫到 sensor 轉換矩陣:TSW=I(4,4)TSW(0:2,0:2)=CSWTSW(0:2,3)=CSWWtWS
特徵點投影到 sensor 系:Shp=TSWWhp
特徵點投影到相機系:Chp=TCSShp

特徵點由相機系投影到像素座標

    cameraGeometry_->projectHomogeneous(hp_C, &kp, &Jh);

投影的得到的像素座標:kp
投影雅各比矩陣 2x4:Jh

wJh=Σ1Jh

特徵圖像提取座標:measurement_
r=Σ1(measurement_kp)

計算雅各比矩陣

對sensor 繫到世界系轉換雅各比

p=Whp(0:2)WtWSWhp(3)
J(4,6)=0
J(0:2,0:2)=CSWWhp(3)J(0:2,3:5)=CSW[p]×
J0mini=WJhTCSJ
Jlift=liftJacobian(WTWS)
J0=J0miniJlift


TCW=TCSTSW
J1=WJhTCW


p=Shp(0:2)StSCShp(3)
J(4,6)=[CCSShpCCS[p]×]
J2mini=WJhJ
Jlift=liftJacobian(STSC)
2x7 矩陣:J2
J2=J2miniJlift

版權聲明:本文爲博主原創文章,未經博主允許不得轉載。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章