使用OpenCV實現單目、雙目相機標定

單目相機標定與雙目相機標定是進行視覺測量,視覺定位的關鍵步驟之一,對於其具體的理論部分的理解可以參考:

https://blog.csdn.net/Kano365/article/details/90721424

https://www.cnblogs.com/zyly/p/9366080.html#_label3

這些文章都寫的非常好了,因此就不在進行闡述。下面貼出自己在測試過程中使用的代碼:

1、單目相機標定

單目相機標定類的頭文件monocular.h

#ifndef MONOCULAR_H
#define MONOCULAR_H

#include <iostream>
#include <string>
#include <opencv2/opencv.hpp>

using namespace std;
using namespace cv;

class monocular
{
public:
    /*構造函數*/
    monocular(int boardWidth, int boardHeight, int squareSize);

    /*實現相機標定*/
    void calibrate();

    /*析構函數*/
    ~monocular();

private:
    /*計算標定板上模塊的實際物理座標*/
    void calRealPoint(vector<vector<Point3f> >& obj, int boardwidth, int boardheight, int imgNumber, int squaresize);

    /*設置相機的初始參數 也可以不估計*/
    void guessCameraParam();

    /*輸出標定參數*/
    void saveCameraParam();

    /*計算重投影誤差*/
    void reproject_error();

    /*原始圖像的畸變矯正*/
    void distort_correct();

    /*讀取相機內部參數,輸出到界面*/
    void ReadCameraParam();

private:
    Mat mIntrinsic;                               //相機內參數
    Mat mDistortion_coeff;                        //相機畸變參數  
    vector<Mat> mvRvecs;                          //旋轉向量  
    vector<Mat> mvTvecs;                          //平移向量  
    vector<vector<Point2f> > mvCorners;           //各個圖像找到的角點的集合 和objRealPoint 一一對應  
    vector<vector<Point3f> > mvObjRealPoint;      //各副圖像的角點的實際物理座標集合  
    vector<Point2f> mvCorner;                     //某一副圖像找到的角點  

    vector<String> mFiles;//所有標定圖像的路徑
    vector<Mat> mImages;  //所有標定圖像

    int mBoardWidth;      //橫向的角點數目  
    int mBoardHeight;     //縱向的角點數據  
    int mBoardCorner;     //總的角點數據   
    int mSquareSize;      //標定板黑白格子的大小 單位mm  
    Size mBoardSize;      //總的內角點

    double mdRMS_error;   //返回總的均方根重投影誤差

    double mdtotal_err;   //返回總的重投影誤差
    double mdave_error;   //返回平均的重投影誤差
};
#endif

單目相機標定類的實現文件monoculai.cc

#include "monocular.h"

monocular::monocular(int boardWidth, int boardHeight, int squareSize): 
	mBoardWidth(boardWidth), mBoardHeight(boardHeight), mSquareSize(squareSize)
{
    mBoardCorner = boardWidth * boardHeight;
    mBoardSize = Size(boardWidth, boardHeight);
}

monocular::~monocular()
{
}

void monocular::calRealPoint(vector<vector<Point3f> >& obj, int boardwidth, int boardheight, int imgNumber, int squaresize)
{
	vector<Point3f> imgpoint;
	for (int rowIndex = 0; rowIndex < boardheight; rowIndex++)
	{
		for (int colIndex = 0; colIndex < boardwidth; colIndex++)
		{
			imgpoint.push_back(Point3f(rowIndex * squaresize, colIndex * squaresize, 0));
		}
	}
	for (int imgIndex = 0; imgIndex < imgNumber; imgIndex++)
	{
		obj.push_back(imgpoint);
	}
}

/*設置相機的初始參數 也可以不估計*/
void monocular::guessCameraParam()
{
	/*分配內存*/
	mIntrinsic.create(3, 3, CV_64FC1);           //相機內參數
	mDistortion_coeff.create(5, 1, CV_64FC1);    //畸變參數

	/*
	fx 0 cx
	0 fy cy
	0 0  1     內參數
	*/
	mIntrinsic.at<double>(0, 0) = 256.8093262;   //fx         
	mIntrinsic.at<double>(0, 2) = 160.2826538;   //cx  
	mIntrinsic.at<double>(1, 1) = 254.7511139;   //fy  
	mIntrinsic.at<double>(1, 2) = 127.6264572;   //cy  

	mIntrinsic.at<double>(0, 1) = 0;
	mIntrinsic.at<double>(1, 0) = 0;
	mIntrinsic.at<double>(2, 0) = 0;
	mIntrinsic.at<double>(2, 1) = 0;
	mIntrinsic.at<double>(2, 2) = 1;

	/*
	k1 k2 p1 p2 p3    畸變參數
	*/
	mDistortion_coeff.at<double>(0, 0) = -0.193740;  //k1  
	mDistortion_coeff.at<double>(1, 0) = -0.378588;  //k2  
	mDistortion_coeff.at<double>(2, 0) = 0.028980;   //p1  
	mDistortion_coeff.at<double>(3, 0) = 0.008136;   //p2  
	mDistortion_coeff.at<double>(4, 0) = 0;          //p3  
}

void monocular::saveCameraParam()
{
    FileStorage fs("./result/monocular.yaml", FileStorage::WRITE); 
	if(!fs.isOpened())
	{
		cout << "open file error!" << endl;
		return;
	}

    fs << "cameraMatrix" << mIntrinsic;
    fs << "distCoeffs" << mDistortion_coeff;

    fs << "the overall RMS re-projection error" << mdRMS_error;
    fs << "the Mean pixel re-projection error" << mdave_error;

    fs.release();  
}

void monocular::ReadCameraParam()
{
    FileStorage fs("./result/monocular.yaml", FileStorage::READ);   
	if(!fs.isOpened())
	{
		cout << "open file error!" << endl;
		return;
	}

    fs["cameraMatrix"] >> mIntrinsic;  
    fs["distCoeffs"] >> mDistortion_coeff;  

    cout << "cameraMatrix is: " << mIntrinsic << endl;  
    cout << "distCoeffs is:" << mDistortion_coeff << endl;

    fs.release();	
}

void monocular::reproject_error()
{
	mdtotal_err = 0.0;       
	mdave_error = 0.0;             
 
	vector<Point2f> image_points2; // 保存重新計算得到的投影點
    image_points2.clear();

	for(size_t i = 0; i < mvRvecs.size(); i++)
	{
		vector<Point3f> tempPointSet = mvObjRealPoint[i];
		
		//通過得到的攝像機內外參數,對空間的三維點進行重新投影計算,得到新的投影點
		projectPoints(tempPointSet, mvRvecs[i], mvTvecs[i], mIntrinsic, mDistortion_coeff, image_points2);
 
		// 計算新的投影點和舊的投影點之間的誤差
		vector<Point2f> tempImagePoint = mvCorners[i];
 
		Mat tempImagePointMat = Mat(1, tempImagePoint.size(), CV_32FC2); //cornerSubPix
		Mat image_points2Mat = Mat(1, image_points2.size(), CV_32FC2);   //projectPoints
 
		//對標定結果進行評價
		for (int j = 0; j < tempImagePoint.size(); j++)
		{
            //分別給兩個角點座標賦值他x,y座標                                                               
			image_points2Mat.at<Vec2f>(0, j) = Vec2f(image_points2[j].x, image_points2[j].y);   
			tempImagePointMat.at<Vec2f>(0, j) = Vec2f(tempImagePoint[j].x, tempImagePoint[j].y);
		}

		//norm計算數組src1和src2的相對差範數
		mdtotal_err = mdtotal_err + norm(image_points2Mat, tempImagePointMat, NORM_L2);
		std::cout << "the " << i + 1 << " image Mean error:" << mdtotal_err << " pixel" << endl;
	}
	
    mdave_error = mdtotal_err/mvRvecs.size();
	std::cout << "The all Mean error: " << mdave_error << " pixel" << endl;	
}

void monocular::distort_correct()
{
	Mat mapx = Mat(mImages[0].size(), CV_32FC1);
	Mat mapy = Mat(mImages[0].size(), CV_32FC1);
	Mat R = Mat::eye(3, 3, CV_32F);      

    //計算出對應的映射表
    //第三個參數R,可選的輸入,是第一和第二相機座標之間的旋轉矩陣;
	//第四個參數newCameraMatrix,輸入的校正後的3X3攝像機矩陣;
	//第五個參數size,攝像機採集的無失真的圖像尺寸;
	//第六個參數m1type,定義map1的數據類型,可以是CV_32FC1或者CV_16SC2;
	//第七個參數map1和第八個參數map2,輸出的X / Y座標重映射參數;
	initUndistortRectifyMap(mIntrinsic, mDistortion_coeff, R, mIntrinsic, mImages[0].size(), CV_32FC1, mapx, mapy);
 
	for (int i = 0; i < mImages.size(); i++)
	{
		Mat imageSource = mImages[i];
		Mat newimage = mImages[i].clone();
		Mat newimage1 = mImages[i].clone();
 
		//方法一:使用initUndistortRectifyMap和remap兩個函數配合實現。
		//initUndistortRectifyMap用來計算畸變映射,remap把求得的映射應用到圖像上。
		remap(imageSource, newimage, mapx, mapy, INTER_LINEAR);
 
		//方法二:使用undistort函數實現
		undistort(imageSource, newimage1, mIntrinsic, mDistortion_coeff);		
		
		//輸出圖像
		string str = "./result/remap/" + to_string(i + 1) + ".jpg";
		string str1 = "./result/undistort/" + to_string(i + 1) + ".jpg";

		imwrite(str, newimage);
		imwrite(str1, newimage1);
	}
}

void monocular::calibrate()
{
    int Width, Height = 0;

    string filespath = "./data/left/";
    glob(filespath, mFiles, false);

    for(size_t i = 0; i < mFiles.size(); i++) 
        cout << "calibrate image path is: " << mFiles[i] << endl;

    for (size_t i = 0; i < mFiles.size(); i++) 
        mImages.push_back(imread(mFiles[i])); 
	
	if(mImages.size() < 5)
	{
		cout << "the image of calibration is not enough!" << endl;
		return;
	}

    Width = mImages[0].cols;
    Height = mImages[0].rows;

    for(size_t i = 0; i < mImages.size(); i++)
    {
        cv::Mat SrcImage = mImages[i];
        if(SrcImage.channels() != 1)
            cvtColor(SrcImage, SrcImage, CV_BGR2GRAY);

        bool isFind = findChessboardCorners(SrcImage, mBoardSize, mvCorner, CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE);

        if (isFind == true)
		{
			//精確角點位置,亞像素角點檢測
			cornerSubPix(SrcImage, mvCorner, Size(5, 5), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 20, 0.1));
			//繪製角點
			drawChessboardCorners(SrcImage, mBoardSize, mvCorner, isFind);
			mvCorners.push_back(mvCorner);

            imshow("chessboard_corner", SrcImage);
			waitKey(50);

			cout << "The image " << i << " is good" << endl;
		}
		else
			cout << "The image " << i << " is bad, try again" << endl;
    }

	/*設置實際初始參數 根據calibrateCamera來 如果flag = 0 也可以不進行設置*/
	guessCameraParam();
	cout << "guess successful" << endl;

	/*計算實際的校正點的三維座標*/
	calRealPoint(mvObjRealPoint, mBoardWidth, mBoardHeight, mImages.size(), mSquareSize);
	cout << "calculate real successful" << endl;

	/*標定攝像頭*/

	//第一個參數:objectPoints,爲世界座標系中的三維點:vector<vector<Point3f>> object_points,
	//需要依據棋盤上單個黑白矩陣的大小,計算出(初始化)每一個內角點的世界座標。
	//長100*寬75
	//第二個參數:imagePoints,爲每一個內角點對應的圖像座標點:vector<vector<Point2f>> image_points
	//第三個參數:imageSize,爲圖像的像素尺寸大小
	//第四個參數:cameraMatrix爲相機的內參矩陣:Mat cameraMatrix=Mat(3,3,CV_32FC1,Scalar::all(0));
	//第五個參數:distCoeffs爲畸變矩陣Mat distCoeffs=Mat(1,5,CV_32FC1,Scalar::all(0));
   
    //內參數矩陣 M=[fx γ u0,0 fy v0,0 0 1]
    //外參矩陣  5個畸變係數k1,k2,p1,p2,k3
 
	//第六個參數:rvecs旋轉向量R,vector<Mat> tvecs;
	//第七個參數:tvecs位移向量T,和rvecs一樣,應該爲vector<Mat> tvecs;
	//第八個參數:flags爲標定時所採用的算法  第九個參數:criteria是最優迭代終止條件設定。
	//return:重投影的總的均方根誤差。
 
	//總結:得到相機內參矩陣K、相機的5個畸變係數、每張圖片屬於自己的平移向量T、旋轉向量R
	mdRMS_error = calibrateCamera(mvObjRealPoint, mvCorners, Size(Width, Height), mIntrinsic, mDistortion_coeff, mvRvecs, mvTvecs, 0);
	cout << "the overall RMS re-projection error is: " << mdRMS_error << endl;
	cout << "calibration successful" << endl;

    /*計算並得到重投影誤差*/
    reproject_error();
    cout << "re-project_error compute successful" << endl;

	/*保存並輸出參數*/
	saveCameraParam();
	cout << "save camera param successful" << endl;

	/*畸變校正*/
    distort_correct();
    cout << "distort_correct successful" << endl;

	/*讀取相機內部參數,輸出到界面*/
	ReadCameraParam();
	cout << "read camera param finished!" << endl;
}

單目相機標定的主函數文件main.cc

#include "monocular.h"

int main ( int argc, char** argv )
{
    //標定板的大小和小方格的尺寸
    monocular Mono_cali(6, 9, 25);
    Mono_cali.calibrate();

    return 0;
}

最後給出整個代碼的下載地址:https://github.com/zwl2017/monocular_calibrate

裏面包括了測試圖片,以及運行方式。當然,也可在Windows上使用VS進行測試,只需新建一個工程即可。

2、雙目相機標定

爲了使用的方便,直接使用前面的單目相機標定類獲取相機的內部參數。整個代碼下載地址爲:https://github.com/zwl2017/stereo_calibrate

整個過程中比較難以理解的是雙目的校正算法,OpenCV中使用的是Bouguet算法進行校正,具體內容參考博客https://blog.csdn.net/u011574296/article/details/73826420

至於具體的校正算法如何推到,有待後續研究!!!

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章