仿射變換Affine+RANSAC

  • 仿射變換(平移、翻轉、旋轉、縮放、錯切五種變換的組合)

  • RANSAC

RANSAC是"RANdom SAmple Consensus"(隨機採樣一致)的縮寫。該算法的主要功能是通過一組包含“局外點”(即錯誤點)通過迭代的方式估計數學模型的參數。它是一種不確定的算法——有一定的概率得出一個合理的結果。

1)RANSAC的假設

  • 數據由“局內點組成,數據的分佈可以用一些模型參數來解釋。
  • 數據的“局外點”,是不能適應該模型的數據。
  • 給定一組局內點(數量相對於整體數據少),存在一個可以估計模型參數的過程,該模型能夠解釋或使用局內點。
  • 局外點的產生原因有:噪聲,錯誤的觀測,對數據的錯誤假設等。

2)基本思想

 RANSAC通過反覆選擇數據中的一組隨機子集來達成目標。被選取的子集被假設爲局內點,並用下述方法進行驗證:

  1. 有一個模型適應於假設的局內點,即所有的未知參數都能從假設的局內點計算得出
  2. 用1中得到的模型去測試所有的其它數據,如果某個點適用於估計的模型,認爲它也是局內點。
  3. 如果有足夠多的點被歸類爲假設的局內點,那麼估計的模型就足夠合理
  4. 然後,用所有假設的局內點去重新估計模型,因爲它僅僅被初始的假設局內點估計過。
  5. 最後,通過估計局內點與模型的錯誤率來評估模型。
  6. 這個過程被重複執行固定的次數,每次產生的模型要麼因爲局內點太少而被捨棄,要麼因爲比現有的模型更好而被選用。

3)RANSAC擬合直線

#include<iostream>
#include<opencv2/opencv.hpp>
#include<stdlib.h>
using namespace std;
using namespace cv;

//生成[0,1]之間符合均勻分佈的數
double uniformRandom(void)
{
	return (double)rand() / (double)RAND_MAX;
}
//生成【0,1】之間的符合高斯分佈的隨機數
double gaussianRandom(void)
{
	static int next_gaussian = 0;
	static double saved_gaussian_value;
	double fac,rsq, v1, v2;
	if (next_gaussian == 0) {
		do {
			v1 = 2 * uniformRandom() - 1;
			v2 = 2 * uniformRandom() - 1;
			rsq = v1*v1 + v2*v2;
		} while (rsq >= 1.0 || rsq == 0.0);
		fac = sqrt(-2 * log(rsq) / rsq);
		saved_gaussian_value = v1*fac;
		next_gaussian = 1;
		return v2*fac;
	}
	else {
		next_gaussian = 0;
		return saved_gaussian_value;
	}

}
void get_inline_outline(vector<Point2d> &ptSet)
{
	//圖像大小
	int width = 640;
	int height = 320;

	//直線參數
	double a = 1, b = 2, c = -640;
	//隨機獲取直線上20個點
	srand((unsigned int)time(NULL)); //設置隨機數種子
	while (true)
	{
		double x = uniformRandom()*(width - 1);
		double y = -(a*x + c) / b;
		//加0.5高斯噪聲
		x += gaussianRandom()*0.5;
		y += gaussianRandom()*0.5;
		if (x >= 640 && y >= 320)
			continue;
		Point2d pt(x, y);
		ptSet.push_back(pt);
		if (ptSet.size() == 20)
			break;
	}
	//隨機獲取10個野值點
	while (true)
	{
		double x = uniformRandom() * (width - 1);
		double y = uniformRandom() * (height - 1);

		if (fabs(a*x + b*y + c) < 10)  //野值點到直線距離不小於10個像素
			continue;
		Point2d pt(x, y);
		ptSet.push_back(pt);
		if (ptSet.size() == 30)
			break;
	}
}

//根據點集擬合直線ax+by+c=0
void calcLinePara(vector<Point2d> pts, double &a, double &b, double &c)
{
	Vec4f line;
	fitLine(pts, line, CV_DIST_L2, 0, 1e-2, 1e-2);
	a = line[1];
	b = -line[0];
	c = line[0] * line[3] - line[1] * line[2];

}

//得到直線擬合樣本,即在直線採樣點集上隨機選2個點
bool getSample(vector<int> set, vector<int> &sset)
{
	int i[2];
	if (set.size() > 2)
	{
		do
		{
			for (int n = 0; n < 2; n++)
				i[n] = int(uniformRandom() * (set.size() - 1));
		} while (!(i[1] != i[0]));
		for (int n = 0; n < 2; n++)
		{
			sset.push_back(i[n]);
		}
	}
	else
	{
		return false;
	}
	return true;
}

//RANSAC直線擬合
void fitLineRANSAC(vector<Point2d> ptSet, double &a, double &b, double &c, vector<bool> &inlierFlag)
{
	double residual_error = 2.99; //內點閾值

	bool stop_loop = false;
	int maximum = 0;  //最大內點數

					  //最終內點標識及其殘差
	inlierFlag = vector<bool>(ptSet.size(), false);
	vector<double> resids_(ptSet.size(), 3);
	int sample_count = 0;
	int N = 500;
	// RANSAC
	srand((unsigned int)time(NULL)); //設置隨機數種子
	vector<int> ptsID;
	for (unsigned int i = 0; i < ptSet.size(); i++)
		ptsID.push_back(i);

	while (N > sample_count && !stop_loop)
	{
		vector<bool> inlierstemp;
		vector<double> residualstemp;
		vector<int> ptss;
		int inlier_count = 0;
		if (!getSample(ptsID, ptss))
		{
			stop_loop = true;
			continue;
		}

		vector<Point2d> pt_sam;
		pt_sam.push_back(ptSet[ptss[0]]);
		pt_sam.push_back(ptSet[ptss[1]]);

		if (abs(pt_sam[0].x - pt_sam[1].x) < 5 && abs(pt_sam[0].y - pt_sam[1].y) < 5)
		{
			++sample_count;
			continue;
		}

		// 計算直線方程
		calcLinePara(pt_sam, a, b, c);
		//內點檢驗
		for (unsigned int i = 0; i < ptSet.size(); i++)
		{
			Point2d pt = ptSet[i];
			double resid_ = fabs(pt.x * a + pt.y * b + c);
			residualstemp.push_back(resid_);
			inlierstemp.push_back(false);
			if (resid_ < residual_error)
			{
				++inlier_count;
				inlierstemp[i] = true;
			}
		}
		// 找到最佳擬合直線
		if (inlier_count >= maximum)
		{
			maximum = inlier_count;
			resids_ = residualstemp;
			inlierFlag = inlierstemp;
		}
		// 更新RANSAC迭代次數,以及內點概率
		if (inlier_count == 0)
		{
			N = 500;
		}
		else
		{
			double epsilon = 1.0 - double(inlier_count) / (double)ptSet.size(); //野值點比例
			double p = 0.99; //所有樣本中存在1個好樣本的概率
			double s = 2.0;
			N = int(log(1.0 - p) / log(1.0 - pow((1.0 - epsilon), s)));
		}
		++sample_count;
	}

	//利用所有內點重新擬合直線
	vector<Point2d> pset;
	for (unsigned int i = 0; i < ptSet.size(); i++)
	{
		if (inlierFlag[i])
			pset.push_back(ptSet[i]);
	}

	calcLinePara(pset, a, b, c);
}
int main()
{
	vector<Point2d> ptSet;
	get_inline_outline(ptSet);

	//繪製點集中所有點
	Mat img(321, 641, CV_8UC3, Scalar(255, 255, 255));

	double A, B, C;
	vector<bool> inliers;
	fitLineRANSAC(ptSet, A, B, C, inliers);
	for (unsigned int i = 0; i < ptSet.size(); i++) {
		if (inliers[i])
			circle(img, ptSet[i], 3, Scalar(0, 255, 0), 3, 16);
		else
			circle(img, ptSet[i], 3, Scalar(0, 0, 255), 3, 16);
	}

	B = B / A;
	C = C / A;
	A = A / A;

	//繪製直線
	Point2d ptStart, ptEnd;
	ptStart.x = 0;
	ptStart.y = -(A*ptStart.x + C) / B;
	ptEnd.x = -(B*ptEnd.y + C) / A;
	ptEnd.y = 0;
	line(img, ptStart, ptEnd, Scalar(0, 0, 0), 1, 16);
	cout << "A:" << A << " " << "B:" << B << " " << "C:" << C << " " << endl;

	imshow("line fitting", img);
	waitKey();
}

 3)RANSAC的優缺點:

  1. 優點:魯棒估計模型參數,從包含大量的外點的數據集中估計出高精度參數
  2. 缺點:隨機估計,並設置迭代上限,有可能得不到最優結果甚至是錯誤結果。只能從特定的數據模型中估計一個模型,如果存在兩個或多個模型,則無法估計別的模型。

 4)放射變換ransac實現

#include <iostream>
#include<opencv2/xfeatures2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include<opencv2/opencv.hpp>
#include <opencv2/features2d.hpp>
#include <mynteyed/camera.h>
#include <mynteyed/utils.h>
#include <Eigen/Core>
#include <Eigen/Dense>
#include <time.h>
#include <cstdlib>

MYNTEYE_USE_NAMESPACE
cv::Mat depth;
int Num=500;

using namespace std;
class AffineRansac
{
public:
    AffineRansac(int N,double Epsilon):max_iterations(N),threshold(Epsilon)
    {
        srand((unsigned int)time(NULL));
    }
    bool AffineTransformer(std::vector<cv::Point2d> Image_Coords_1, std::vector<cv::Point2d> Image_Coords_2,Eigen::Matrix<double,3,3> &A)
    {
        if(Image_Coords_1.size()<3||Image_Coords_2.size()<3)
        {
            std::cout<<"Incorrect input dimensions"<<std::endl;
            return false;
        }
        else if(Image_Coords_1.size()!=Image_Coords_2.size())
        {
            std::cout<<"Mismatch between number of points in the image pair"<<std::endl;
            return false;
        }
        Eigen::Matrix<double,Eigen::Dynamic,3> P;
        Eigen::Matrix<double,Eigen::Dynamic,3> Q;
        P.resize(Image_Coords_1.size(),3);
        Q.resize(Image_Coords_1.size(),3);
        for(int i=0;i<Image_Coords_1.size();i++)
        {

            P(i,0)=Image_Coords_1[i].x;
            P(i,1)=Image_Coords_1[i].y;
            P(i,2)=1;

            Q(i,0)=Image_Coords_2[i].x;
            Q(i,1)=Image_Coords_2[i].y;
            Q(i,2)=1;
        }

        Eigen::Matrix<double,3,3> P_inv;
        Eigen::Matrix<double,3,Eigen::Dynamic> P_;
        P_inv=P.transpose()*P;
        P_=(P_inv.inverse())*(P.transpose());
        A =P_*Q;
        return true;
    }
    double SumEucDistance(cv::Point2d const &P1, cv::Point2d const &P1_Co,
                          cv::Point2d const &P2, cv::Point2d const &P2_Co)
    {
        double d=std::sqrt(pow((P1.x-P1_Co.x),2)+pow((P1.y-P1_Co.y),2))+std::sqrt(pow((P2.x-P2_Co.x),2)+pow((P2.y-P2_Co.y),2));
        return d;
    }
    void find_inliers (std::vector<cv::Point2d> Coordinates_Image_1, std::vector<cv::Point2d> Coordinates_Image_2,Eigen::Matrix<double,3,3> const &A, std::vector<int> &result)
    {
        result.resize(0);
        Eigen::Matrix<double,1,3> X2;
        Eigen::Matrix<double,1,3> X1;

        Eigen::Matrix<double,1,3> x2;
        Eigen::Matrix<double,1,3> x1;
        cv::Point2d P1,P2,P1_Co,P2_Co;
        double ssd;

        for(int i=0;i<Coordinates_Image_1.size();i++)
        {
            x1(0,0)=Coordinates_Image_1[i].x;
            x1(0,1)=Coordinates_Image_1[i].y;
            x1(0,2)=1;

            x2(0,0)=Coordinates_Image_2[i].x;
            x2(0,1)=Coordinates_Image_2[i].y;
            x2(0,2)=1;

            X2=A*x1.transpose();
            X1=A.inverse()*x2.transpose();

            P1.x=X1(0,0);
            P1.y=X1(0,1);
            P2.x=X2(0,0);
            P2.y=X2(0,1);

            P1_Co.x=x1(0,0);
            P1_Co.y=x1(0,1);
            P2_Co.x=x2(0,0);
            P2_Co.y=x2(0,1);
            //std::cout<<"******X1: "<<P1<<std::endl;
            //std::cout<<"******X2: "<<P2<<std::endl;
            //std::cout<<"******x1: "<<P1_Co<<std::endl;
            //std::cout<<"******x2: "<<P2_Co<<std::endl;

            ssd=SumEucDistance(P1,P1_Co,P2,P2_Co);
            //std::cout<<"ssd: "<<ssd<<std::endl;
            if(ssd<threshold)
            {
                result.push_back(i);
            }
        }
        //std::cout<<"*************"<<std::endl;
    }

    void Affine_Ransac(std::vector<cv::Point2d> Coordinates_Image_1, std::vector<cv::Point2d> Coordinates_Image_2)
    {
        int In_count = 0;
        std::vector<int> inliers;
        inliers.reserve(Coordinates_Image_1.size());

        std::vector<cv::Point2d> Image_Coords_1;
        std::vector<cv::Point2d> Image_Coords_2;

        Image_Coords_1.reserve(Coordinates_Image_1.size()/2+1);
        Image_Coords_2.reserve(Coordinates_Image_1.size()/2+1);

        std::cout<<"max_iterations: "<<max_iterations<<std::endl;
        while (In_count < max_iterations)
        {
            // Randomly pick out points from the matches
            std::set<int> subsample;
            generateRandomArray(subsample,Coordinates_Image_1.size()/2+1,Coordinates_Image_1.size());
            for(std::set<int>::iterator it=subsample.begin();it!=subsample.end();it++)
            {
                //std::cout<<"set: "<<*it<<" ";
                Image_Coords_1.push_back(Coordinates_Image_1[*it]);
                //std::cout<<"p1: "<<Coordinates_Image_1[*it].x<<" "<<Coordinates_Image_1[*it].y<<" ";
                Image_Coords_2.push_back(Coordinates_Image_2[*it]);
                //std::cout<<"p2: "<<Coordinates_Image_2[*it].x<<" "<<Coordinates_Image_2[*it].y<<" ";
            }
            subsample.clear();
            //std::cout<<"size: "<<subsample.size()<<std::endl;
            //affine
            Eigen::Matrix<double,3,3> A;
            bool flag=AffineTransformer(Image_Coords_1, Image_Coords_2,A);
            //std::cout<<"A: "<<A<<std::endl;
            if(!flag)
                continue;
            find_inliers(Coordinates_Image_1,Coordinates_Image_2,A,inliers);
            //std::cout<<"size: "<<inliers.size()<<std::endl;
            if (inliers.size() > result_inliers.size())
            {
                std::cout<<"counter: "<<In_count<<std::endl;
                std::cout<<"size1: "<<inliers.size()<<std::endl;
                std::cout<<"size2: "<<Coordinates_Image_1.size()<<std::endl;
                AffineMatrix = A;
                std::swap(result_inliers, inliers);
                inliers.reserve(Coordinates_Image_1.size());
            }
            In_count++;
            //std::cout<<"counter: "<<In_count<<std::endl;
        }


    }


private:

    double uniformRandom(void)
    {

        return (double)std::rand()/(double)RAND_MAX;
    }
    // 創建一個 n個元素的數組
    void generateRandomArray(std::set<int> &arr, unsigned int n, int size)
    {

        // 隨機種子
        while(arr.size()<n)
            arr.insert(int(uniformRandom()*(size-1)));
    }

    int max_iterations;

    /**
     * Threshold used to determine inliers. Defaults to 0.0015.
     * This threshold assumes that the input points are normalized.
     */
    double threshold;


    /**
 * The resulting AffineMatrix  which led to the inliers.
 * This is NOT the re-computed matrix from the inliers.
 */
    Eigen::Matrix<double,3,3> AffineMatrix;

    /**
     * The indices of inliers in the correspondences which led to the
     * homography matrix.
     */
    std::vector<int> result_inliers;

};

cv::Point2f pixel2cam ( const cv::Point2d& p, const cv::Mat& K )
{
    return cv::Point2f
            (
                    ( p.x - K.at<double>(0,2) ) / K.at<double>(0,0),
                    ( p.y - K.at<double>(1,2) ) / K.at<double>(1,1)
            );
}







int main(int argc, char const *argv[])
{

    //左右視圖的特徵點
    std::vector<cv::KeyPoint> keypoints_l,keypoints_r;

    cv::Mat descriptors_l,descriptors_r;

    //存儲左圖、右圖的特徵點
    std::vector<cv::KeyPoint> pre_keypoints_l,pre_keypoints_r;

    //存儲左圖和右圖的描述子
    cv::Mat pre_descriptors_l,pre_descriptors_r;

    //前一幀和當前幀
    cv::Mat previous_frame_l,previous_frame_r,current_frame_l,current_frame_r;

    std::vector<cv::Point2d> Coordinates_Image_1, Coordinates_Image_2;

    //相機
    Camera cam;
    //設備
    DeviceInfo dev_info;
    //選擇相機
    if (!util::select(cam, &dev_info)) {
        return 1;
    }
    util::print_stream_infos(cam, dev_info.index);
    std::cout << "Open device: " << dev_info.index << ", "
              << dev_info.name << std::endl << std::endl;
    //參數設置
    OpenParams params(dev_info.index);//構造函數顯式調用成功
    params.color_mode=ColorMode ::COLOR_RECTIFIED;
    params.depth_mode = DepthMode::DEPTH_COLORFUL;
    params.stream_mode = StreamMode::STREAM_1280x480;
    params.ir_intensity = 10;
    params.framerate = 60;
    cam.Open(params);

    std::cout << std::endl;
    if (!cam.IsOpened()) {
        std::cerr << "Error: Open camera failed" << std::endl;
        return 1;
    }
    std::cout << "Open device success" << std::endl << std::endl;
    std::cout << "Press ESC/Q on Windows to terminate" << std::endl;

    cv::namedWindow("left");
    cv::namedWindow("right");
    cv::namedWindow("depth");

    cv::Ptr<cv::xfeatures2d::SIFT> f2d=cv::xfeatures2d::SIFT::create(2000);

    //獲取第一幀圖像
    auto right_color = cam.GetStreamData(ImageType::IMAGE_RIGHT_COLOR);
    auto left_color = cam.GetStreamData(ImageType::IMAGE_LEFT_COLOR);
    if (right_color.img && left_color.img)
    {
        auto left_img = left_color.img->To(ImageFormat::COLOR_BGR);
        previous_frame_l = cv::Mat(left_img->height(), left_img->width(), CV_8UC3,
                                   left_img->data());

        auto right_img = right_color.img->To(ImageFormat::COLOR_BGR);
        previous_frame_r = cv::Mat(right_img->height(), right_img->width(), CV_8UC3,
                                   right_img->data());

        //檢測並計算前一幀的特徵點和描述子
        f2d->detectAndCompute(previous_frame_l,cv::Mat(),keypoints_l,descriptors_l);
        f2d->detectAndCompute(previous_frame_r,cv::Mat(),keypoints_r,descriptors_r);

        //存儲左圖和右圖的特徵點
        pre_keypoints_l.insert(pre_keypoints_l.end(),keypoints_l.begin(),keypoints_l.end());
        pre_keypoints_r.insert(pre_keypoints_r.end(),keypoints_r.begin(),keypoints_r.end());
        //存儲左圖的描述子
        descriptors_l.copyTo(pre_descriptors_l);
    }
    while(true)
    {

        right_color = cam.GetStreamData(ImageType::IMAGE_RIGHT_COLOR);
        left_color = cam.GetStreamData(ImageType::IMAGE_LEFT_COLOR);
        if (right_color.img && left_color.img)
        {
            auto left_img = left_color.img->To(ImageFormat::COLOR_BGR);
            current_frame_l=cv::Mat(left_img->height(), left_img->width(), CV_8UC3,
                                    left_img->data());
            //獲取當前幀圖像
            auto right_img = right_color.img->To(ImageFormat::COLOR_BGR);
            current_frame_r=cv::Mat(right_img->height(), right_img->width(), CV_8UC3,
                                    right_img->data());
            //顯示左圖和右圖
            cv::imshow("cright", current_frame_r);
            cv::imshow("cleft", current_frame_l);

            //檢測並計算當前幀的特徵點和描述子
            f2d->detectAndCompute(current_frame_r,cv::Mat(),keypoints_r,descriptors_r);
            f2d->detectAndCompute(current_frame_l,cv::Mat(),keypoints_l,descriptors_l);

            cv::Ptr<cv::DescriptorMatcher> matcher=cv::DescriptorMatcher::create(cv::DescriptorMatcher::FLANNBASED);
            std::vector<std::vector<cv::DMatch>> knn_matches;
            matcher->knnMatch(pre_descriptors_l,descriptors_l,knn_matches,2);

            const float ratio_thresh=0.8f;
            std::vector<cv::DMatch> good_matches;
            for(int i=0;i<knn_matches.size();i++)
            {
                if((knn_matches[i][0].distance<ratio_thresh*knn_matches[i][1].distance))
                    good_matches.push_back(knn_matches[i][0]);

            }
            cv::Mat img_matches;
            cv::drawMatches(previous_frame_l,pre_keypoints_l,current_frame_l,keypoints_l,good_matches,img_matches);
            cv::imshow("match",img_matches);

            for(int i=0;i<good_matches.size();i++)
            {
                Coordinates_Image_1.push_back(pre_keypoints_l[good_matches[i].queryIdx].pt);
                //std::cout<<"size: "<<Coordinates_Image_1.size()<<" x1: "<<pre_keypoints_l[good_matches[i].queryIdx].pt.x<<" y1: "<<pre_keypoints_l[good_matches[i].queryIdx].pt.y<<std::endl;
                Coordinates_Image_2.push_back(keypoints_l[good_matches[i].trainIdx].pt);
                //std::cout<<"size: "<<Coordinates_Image_1.size()<<" x2: "<<keypoints_l[good_matches[i].queryIdx].pt.x<<" y2: "<<keypoints_l[good_matches[i].queryIdx].pt.y<<std::endl;
            }
            AffineRansac affine(100,1.5);
            Eigen::Matrix<double,3,3> A;

            affine.Affine_Ransac(Coordinates_Image_1,Coordinates_Image_2);
            Coordinates_Image_1.clear();
            Coordinates_Image_2.clear();

            cv::swap(previous_frame_l,current_frame_l);
            cv::swap(pre_descriptors_l,descriptors_l);
            std::swap(pre_keypoints_l,keypoints_l);


            char key = static_cast<char>(cv::waitKey(1));
            if (key == 27 || key == 'q' || key == 'Q') {  // ESC/Q
                break;
            }
        }
        auto image_depth = cam.GetStreamData(ImageType::IMAGE_DEPTH);
        if (image_depth.img) {
            auto depth_img = image_depth.img->To(ImageFormat::COLOR_BGR);
            depth=cv::Mat(depth_img->height(), depth_img->width(), CV_8UC3,
                          depth_img->data());
            cv::imshow("depth", depth);
        }
    }
    cam.Close();
    cv::destroyAllWindows();
    return 0;
}


 

參考文章:

https://blog.csdn.net/YunlinWang/article/details/78147026

https://www.bbsmax.com/A/KE5QEOgy5L/

https://www.bbsmax.com/A/obzbym7QdE/

https://www.qingtingip.com/h_170146.html

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章