相機標定python、cpp--關於標定板一奇一偶--深度圖與RGB對齊

相機標定:

https://docs.opencv.org/3.4/d9/d47/samples_2cpp_2tutorial_code_2features2D_2Homography_2homography_from_camera_displacement_8cpp-example.html

教程:

https://docs.opencv.org/3.4/d9/dab/tutorial_homography.html

https://docs.opencv.org/3.4/examples.html

https://docs.opencv.org/3.4/d3/d81/tutorial_contrib_root.html

 

http://www.technolabsz.com/2012/07/camera-calibration-using-opencv.html

https://github.com/warp1337/opencv_cam_calibration/blob/master/src/camera_calibration.cpp

https://github.com/warp1337/opencv_cam_calibration

https://docs.opencv.org/2.4/doc/tutorials/calib3d/camera_calibration/camera_calibration.html

 

相機校準和3D重建
相機校準
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html
姿態估計
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_pose/py_pose.html
對極幾何
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.html
立體圖像的深度圖
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_depthmap/py_depthmap.html

 

彩色圖對齊深度圖:::

深度圖像配準(Registration)原理

https://www.cnblogs.com/cv-pr/p/5769617.html

 

intel realsense SR300 深度圖像和彩色圖像對齊

https://blog.csdn.net/jay463261929/article/details/53582800

 

Kinect深度圖與RGB攝像頭的標定與配準

https://blog.csdn.net/aichipmunk/article/details/9264703

補充我寫的一些:

# coding:utf-8
import cv2
import numpy as np
import matplotlib.pyplot as plt
import json
import os

#爲深度圖像中的每一個像素附上對應的RGB顏色: 深度圖座標系轉換到RGB座標系

#爲RGB圖像中的每一個像素附上對應的深度: RGB座標系轉換到深度圖座標系

'''
-0.991604  -0.126243  -0.027989  214.997275
-0.122135  0.985481  -0.117944  133.474350
0.042472  -0.113535  -0.992626  407.073850
0.000000  0.000000  0.000000  1.000000
'''
R_dep = np.array([[-0.991604,  -0.126243,  -0.027989],
                  [-0.122135,  0.985481,  -0.117944],
                  [0.042472,  -0.113535,  -0.992626]])
t_dep = np.array([[214.997275,133.474350,407.073850]]).T

'''
-0.990949  -0.125081  -0.048724  189.587175
-0.118729  0.986054  -0.116626  194.357782
0.062633  -0.109786  -0.991980  383.686418
0.000000  0.000000  0.000000  1.000000
'''
R_rgb = np.array([[-0.990949,  -0.125081,  -0.048724],
                  [-0.118729,  0.986054,  -0.116626],
                  [0.062633,  -0.109786,  -0.991980]])
t_rgb = np.array([[189.587175,194.357782,383.686418]]).T


R_dep = np.matrix(R_dep)
t_dep = np.matrix(t_dep)
R_rgb = np.matrix(R_rgb)
t_rgb = np.matrix(t_rgb)

print("R_dep\n", R_dep, "\n")
print("t_dep\n", t_dep, "\n")
print("R_rgb\n", R_rgb, "\n")
print("t_rgb\n", t_rgb, "\n")

#爲深度圖像中的每一個像素附上對應的RGB顏色: 深度圖座標系轉換到RGB座標系
#所有旋轉矩陣都是正交陣,因此可用轉置運算代替求逆運算
#R_rgb2dep = R_rgb * R_dep.I
R_dep2rgb = R_rgb * R_dep.T
T_dep2rgb = t_rgb - (R_dep2rgb * t_dep)
print("R_rgb2dep\n", R_dep2rgb, "\n")
print("T_rgb2dep\n", T_dep2rgb, "\n")

#XYZ = K_rgb * R_dep2rgb * K_dep.T * Z_dep * Point_dep + K_rgb * T_dep2rgb
'''
void computeC2MC1(const Mat &R1, const Mat &tvec1, const Mat &R2, const Mat &tvec2,
                  Mat &R_1to2, Mat &tvec_1to2)
{
    //c2Mc1 = c2Mo * oMc1 = c2Mo * c1Mo.inv()
    R_1to2 = R2 * R1.t();
    tvec_1to2 = R2 * (-R1.t()*tvec1) + tvec2;
}
Mat R_1to2, t_1to2;
computeC2MC1(R1, tvec1, R2, tvec2, R_1to2, t_1to2);
'''

# R2 = R_rgb
# R1 = R_dep
# tvec2 = t_rgb
# tvec1= t_dep
#
# R_dep2rgb = R2 * R1.T;
# T_dep2rgb = R2 * (-R1.T*tvec1) + tvec2;
#
# print("R_rgb2dep\n", R_dep2rgb, "\n")
# print("T_rgb2dep\n", T_dep2rgb, "\n")





#結果
''' 
 [[ 0.99978415  0.00351069  0.02047798]
 [-0.00348663  0.99999324 -0.00122868]
 [-0.02048311  0.00115602  0.9997894 ]] 

T_rgb2dep
 [[-34.16833064]
 [ 62.1341124 ]
 [-19.05218977]] 
'''

import pcl
import pcl.pcl_visualization

def depth2cloud(depth, intrinsics):
    # https://blog.csdn.net/renyuanxingxing/article/details/89846404
    cloud = pcl.PointCloud()
    rows = len(depth)
    cols = len(depth[0])
    pointcloud = []
    for m in range(0, rows):
        for n in range(0, cols):
            d = depth[m][n]
            if d == 0:
                pass
            else:
                z = float(d) / intrinsics[2][2]
                x = (n - intrinsics[0][2]) * z / intrinsics[0][0]
                y = (m - intrinsics[1][2]) * z / intrinsics[1][1]
                points = [x, y, z]
                pointcloud.append(points)
    pointcloud = np.array(pointcloud, dtype=np.float32)
    cloud.from_array(pointcloud)
    return cloud

'''
'''
        //相機1
        Mat rvec1, tvec1;
        solvePnP(objectPoints, corners1, leftcameraMatrix, leftdistCoeffs, rvec1, tvec1);


        //相機2
        Mat rvec2, tvec2;
        solvePnP(objectPoints, corners2, rightcameraMatrix, rightdistCoeffs, rvec2, tvec2);


        //利用羅德里格斯公式,將旋轉向量轉爲旋轉矩陣
        Mat R1, R2;
        Rodrigues(rvec1, R1);
        Rodrigues(rvec2, R2);



        //同一場景下,計算兩個相機之間的RT
        //相機1到相機2的Rt
        Mat R_1to2, tvec_1to2;
        R_1to2 = R2 * R1.t();
        tvec_1to2 = R2 * (-R1.t()*tvec1) + tvec2;

        std::cout<<"1to2_rgb2dep"<<std::endl;
        cout << fixed << setprecision(0) << R_1to2 << endl;
        cout << fixed << setprecision(0) << t_1to2 << endl;











        //相反求是否也對????
        //相機2到相機1的Rt
        Mat R_2to1, tvec_2to1;
        R_2to1 = R1 * R2.t();
        tvec_2to1 = R1 * (-R2.t()*tvec2) + tvec1;

        std::cout<<"2to1_dep2rgb"<<std::endl;
        cout << fixed << setprecision(0) << R_2to1 << endl;
        cout << fixed << setprecision(0) << t_2to1 << endl;

 

Kinect深度圖與攝像頭RGB的標定與配準(轉載文章)有很多新東西

https://blog.csdn.net/aidem_brown/article/details/83713961

 

kinect 2.0 SDK學習筆記(四)--深度圖與彩色圖對齊

https://blog.csdn.net/jiaojialulu/article/details/53154045

 

kinect深度圖與彩圖匹配

https://blog.csdn.net/cocoaqin/article/details/77428100?fps=1&locationNum=3

 

深度攝像頭與彩色攝像頭的對齊

https://www.cnblogs.com/rogerjin/p/7845866.html

 

請教關於Kinect深度和彩色圖融合的問題?

https://www.zhihu.com/question/29631310

https://devblogs.microsoft.com/cppblog/kinect-for-windows-c-samples/

國外的標定:

http://rgbdemo.org/index.php/Documentation/Calibration

http://burrus.name/index.php/Research/KinectCalibration#tocLink5

 

Kinect彩色圖深度圖配準(分辨率不一樣時的處理方式):

http://blog.csdn.net/shihz_fy/article/details/43602393

 

ROS下的驅動與圖像序列保存及opencv顯示深度座標:

http://blog.csdn.net/sunbibei/article/details/51594824

 

SDK獲取出廠內參數代碼,MATLAB 標定Kinect v2等

http://blog.csdn.net/jiaojialulu/article/details/77430563

 

https://blog.csdn.net/h532600610/article/details/51800488

Corners中的角點座標順序排列規律不一定是以行從左上到右下。

使用座標計算映射關係時應提高警惕,對座標進行重新排列

 

標定板橫縱方向點數一奇一偶,檢測到的點順序就是固定的。板不標準,無法判斷起始座標點。

 

總結:該函數的功能就是判斷圖像內是否包含完整的棋盤圖,如果能夠檢測完全,就把他們的角點座標按順序(逐行,從左到右)記錄下來,並返回非0數,否則返回0。這裏對size參數要求非常嚴格,函數必須檢測到相同的size纔會返回非0,否則返回0,這裏一定要注意。角點檢測不完全時,可能畫不出圖像,或者畫出紅色角點;正確的圖像後面有參考。 
該函數檢測的角點的座標是不精確的,要想精確結果,需要使用cornerSubPix()函數,進行亞像素精度的調整。

 

recoverPose求Rt

https://python.hotexamples.com/examples/cv2/-/recoverPose/python-recoverpose-function-examples.html

https://github.com/CarlosHVMoraes/py3DRec

c++版本:https://blog.csdn.net/AIchipmunk/article/details/48157369

# coding:utf-8
import cv2
import numpy as np
import matplotlib.pyplot as plt
import json
import os

def mkdir(path):
    import os
    isExists = os.path.exists(path)
    if not isExists:
        os.makedirs(path)

def searchCheckerboardDirFile(rootDir, garyimglist, rgbimglist, namelist, bboxflag):
    for dir_or_file in os.listdir(rootDir):
        filePath = os.path.join(rootDir, dir_or_file)
        # 判斷是否爲文件
        if os.path.isfile(filePath):
            # 如果是文件再判斷是否以.jpg結尾,不是則跳過本次循環
            if bboxflag in filePath:
                if os.path.basename(filePath).endswith('.png'):
                    if os.path.exists(filePath.replace("_IMG_Texture_8Bit.png", "_IMG_DepthMap.tif")):
                        nameTemp = filePath.split('/')[-1]
                        tempPath = filePath.split(nameTemp)[0]
                        tempName = tempPath.split('/')[-2]
                        garyimglist.append(os.path.join(tempPath, "OtherSampleFrame_IMG_Texture_8Bit.png"))
                        rgbimglist.append(os.path.join(tempPath, "OtherSampleFrame_IMG_Rgb.jpg"))
                        namelist.append(tempName)
            else:
                continue
        # 如果是個dir,則再次調用此函數,傳入當前目錄,遞歸處理。
        elif os.path.isdir(filePath):
            searchCheckerboardDirFile(filePath, garyimglist, rgbimglist, namelist, bboxflag)
        else:
            print('not file and dir '+os.path.basename(filePath))
    return garyimglist,rgbimglist, namelist

R1path = "./R2"
R1undistort = "./R2_undistort"
R1draw = "./R2_draw"
flagTemp = "-R2-"
mkdir(R1undistort)
mkdir(R1draw)

R1garyimglist = []
R1rgbimglist = []
R1namelist = []
R1garyimglist, rgbimglist, R1namelist = searchCheckerboardDirFile(R1path, R1garyimglist, R1rgbimglist, R1namelist, flagTemp)


patternSize = (9, 9)
patternH = 9
patternW = 9
# 世界座標系中的棋盤格點,例如(0,0,0), (1,0,0), (2,0,0) ....,(8,5,0),去掉Z座標,記爲二維矩陣,認爲在棋盤格這個平面上Z=0
objp = np.zeros((patternW*patternH,3), np.float32) #構造0矩陣,81行3列,用於存放角點的世界座標
objp[:,:2] = np.mgrid[0:patternW,0:patternH].T.reshape(-1,2)# 三維網格座標劃
#棋盤格在真實世界的大小 20mm
patternDistance = 20
objp = objp * patternDistance
# 儲存棋盤格角點的世界座標和圖像座標對
objpoints = [] # 在世界座標系中的三維點
imgpoints = [] # 在圖像平面的二維點


cornersSubPix1 = []
cornersSubPix2 = []
for exR1 in range(len(R1garyimglist)):
    print(exR1, "/", len(R1garyimglist), "\n")
    grayTemp = cv2.imread(R1garyimglist[exR1], -1)
    GRAYH, GRAYW = grayTemp.shape[:2]

    rgb = cv2.imread(rgbimglist[exR1], -1)
    fliprgb = np.flip(rgb).copy()
    # flip 會把讀入的bgr圖像,轉換乘rgb圖像,那麼等會兒的cvtColor,其實是rgb去做這個函數了,爲了統一,我們把他弄回BGR
    fliprgb = cv2.cvtColor(fliprgb, cv2.COLOR_RGB2BGR)

    LeftImgNewgray = cv2.cvtColor(fliprgb, cv2.COLOR_BGR2GRAY)
    RightImggray = cv2.cvtColor(grayTemp, cv2.COLOR_BGR2GRAY)

    patternSize = (9, 9)
    cornersLeft = None
    cornersRight = None

    test = 0
    if test == 0:
        retLeft, cornersLeft = cv2.findChessboardCorners(LeftImgNewgray, patternSize, cornersLeft,
                                                         cv2.CALIB_CB_ADAPTIVE_THRESH)
        retRight, cornersRight = cv2.findChessboardCorners(RightImggray, patternSize, cornersRight,
                                                           cv2.CALIB_CB_ADAPTIVE_THRESH)
    else:
        retLeft, cornersLeft = cv2.findChessboardCorners(LeftImgNewgray, patternSize, cornersLeft,
                                                        cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FILTER_QUADS)
        retRight, cornersRight = cv2.findChessboardCorners(RightImggray, patternSize, cornersRight,
                                                        cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FILTER_QUADS)

    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    if (retLeft and retRight):
        SubPix = 1
        if SubPix:
            img1 = cv2.drawChessboardCorners(LeftImgNewgray.copy(), patternSize, cornersLeft, retLeft)
            #cv2.imwrite("./R1/"+R1namelist[exR1]+"LeftResult.png", img1)
            cornersSubPix1 = cv2.cornerSubPix(LeftImgNewgray, cornersLeft, (11, 11), (-1, -1), criteria)
            img1_ = cv2.drawChessboardCorners(LeftImgNewgray.copy(), patternSize, cornersSubPix1, retLeft)
            cv2.imwrite(R1draw + "/"+R1namelist[exR1]+"LeftSubPixResult.png", img1_)


            # img2 = cv2.drawChessboardCorners(RightImggray.copy(), patternSize, cornersRight, retLeft)
            # #cv2.imwrite("./R1/"+R1namelist[exR1]+"RightResult.png", img2)
            # cornersSubPix2 = cv2.cornerSubPix(RightImggray, cornersRight, (11, 11), (-1, -1), criteria)
            # img2_ = cv2.drawChessboardCorners(RightImggray.copy(), patternSize, cornersSubPix2, retRight)
            # #cv2.imwrite(R1draw + "/"+R1namelist[exR1]+"RightSubPixResult.png", img2_)

            #求彩色相機內參畸變
            objpoints.append(objp)
            imgpoints.append(cornersSubPix1)

        else:
            cornersSubPix1.extend(cornersLeft)
            cornersSubPix2.extend(cornersRight)
    else:
        print("棋盤格檢測有問題")
        print(R1namelist[exR1])
        continue

rgbTemp = cv2.imread(rgbimglist[0], -1)
fliprgbTemp = np.flip(rgbTemp).copy()
fliprgbTemp = cv2.cvtColor(fliprgbTemp, cv2.COLOR_RGB2BGR)
gary = cv2.cvtColor(fliprgbTemp, cv2.COLOR_BGR2GRAY)


# 標定
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gary.shape[::-1], None, None)

print("mtx\n", mtx, "\n")
print("dist\n", dist, "\n")

for exR1 in range(len(rgbimglist)):
    print("去畸變:",exR1, "/", len(rgbimglist), "\n")

    rgb = cv2.imread(rgbimglist[exR1], -1)
    fliprgb = np.flip(rgb).copy()
    fliprgb = cv2.cvtColor(fliprgb, cv2.COLOR_RGB2BGR)

    h,  w = LeftImgNewgray.shape[:2]
    newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),0,(w,h)) # 自由比例參數
    dst = cv2.undistort(fliprgb, mtx, dist, None, newcameramtx)
    cv2.imwrite(R1undistort + '/'+R1namelist[exR1] + ".jpg", dst)

# 反投影誤差
total_error = 0
for i in range(len(objpoints)):
    imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
    error = cv2.norm(imgpoints[i],imgpoints2, cv2.NORM_L2)/len(imgpoints2)
    total_error += error
print("total error: ", total_error/len(objpoints))


 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章