自動駕駛之車位線檢測之二單路魚眼相機矯正裁剪(python ,C++,opencv)

前篇博文https://blog.csdn.net/xiao__run/article/details/89190112涉及到了使用四路魚眼圖像拼接並識別車位線,這個過程涉及到魚眼圖像的標定,矯正以及裁剪,這篇博文我就就這個工作詳細介紹一下,工作流程如下:
在這裏插入圖片描述1採集圖像
首先採用opencv採集到棋盤格圖像,大約20幾張,我的棋盤格是11x8個角點,60mm大小.如下:
在這裏插入圖片描述2 標定圖像
將20幾張圖像放入一個文件夾,標定得出內參與畸變參數:,代碼如下,opencv3版本與python

import cv2
import numpy as np
import os
import glob
import matplotlib.pylab as plt

def get_K_and_D(checkerboard, imgsPath):

    CHECKERBOARD = checkerboard
    subpix_criteria = (cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
    calibration_flags = cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC+cv2.fisheye.CALIB_CHECK_COND+cv2.fisheye.CALIB_FIX_SKEW
    objp = np.zeros((1, CHECKERBOARD[0]*CHECKERBOARD[1], 3), np.float32)
    objp[0, :, :2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)
    _img_shape = None
    objpoints = [] 
    imgpoints = [] 
    images = glob.glob(imgsPath + '/*.jpg')
    for fname in images:
        img = cv2.imread(fname)
        plt.figure('original image')
        plt.imshow(img[..., ::-1])
        if _img_shape == None:
            _img_shape = img.shape[:2]
        else:
            assert _img_shape == img.shape[:2], "All images must share the same size."
        
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        ret, corners = cv2.findChessboardCorners(gray, CHECKERBOARD,cv2.CALIB_CB_ADAPTIVE_THRESH+cv2.CALIB_CB_FAST_CHECK+cv2.CALIB_CB_NORMALIZE_IMAGE)
        for c in corners:
            plt.plot(c[0][0],c[0][1], 'r*')
        # plt.show()
        if ret == True:
            objpoints.append(objp)
            cv2.cornerSubPix(gray,corners,(3,3),(-1,-1),subpix_criteria)
            imgpoints.append(corners)

    N_OK = len(objpoints)
    K = np.zeros((3, 3))
    D = np.zeros((4, 1))
    rvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]
    tvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]
    rms, _, _, _, _ = \
    cv2.fisheye.calibrate(
                                objpoints,
                                imgpoints,
                                gray.shape[::-1],
                                K,
                                D,
                                rvecs,
                                tvecs,
                                calibration_flags,
                                (cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-6)
                                )
    DIM = _img_shape[::-1]
    print("Found " + str(N_OK) + " valid images for calibration")
    print("DIM=" + str(_img_shape[::-1]))
    print("K=np.array(" + str(K.tolist()) + ")")
    print("D=np.array(" + str(D.tolist()) + ")")
    
    return DIM, K, D

# 計算內參和矯正係數
'''
# checkerboard: 棋盤格的格點數目
# imgsPath: 存放魚眼圖片的路徑
'''
print(get_K_and_D((8,11), '/home/crj/Desktop/車位線檢測與跟蹤/fisheye/snapshot'))

3 矯正圖像
側面攝像頭矯正之前圖像
在這裏插入圖片描述
通過opencv3版本的fisheye.undistortImage 函數即可矯正圖像,代碼如下:

assert float(cv2.__version__.rsplit('.', 1)[0]) >= 3, 'OpenCV version 3 or newer required.'
'''
K = np.array([[689.21, 0., 1295.56],
              [0., 690.48, 942.17],
              [0., 0., 1.]])
'''
K = np.array([[271.40629437753046, 0.0, 329.829270787], [0.0, 271.00780962876536, 216.9546102473427], [0.0, 0.0, 1.0]])


# zero distortion coefficients work well for this image
# D = np.array([0., 0., 0., 0.])

D=np.array([-0.01515303912651211, 0.09148012072176424, -0.2080496385404225, 0.13211726263924817])

# use Knew to scale the output
Knew = K.copy()
Knew[(0,1), (0,1)] = 0.4 * Knew[(0,1), (0,1)]


img = cv2.imread('/home/lenovo/Desktop/a.jpg')
plt.figure('original image')
plt.imshow(img[...,::-1])
img_undistorted = cv2.fisheye.undistortImage(img, K, D=D, Knew=Knew)
cv2.imwrite('right_60_undistorted.jpg', img_undistorted)
plt.figure('undistored image')
plt.imshow(img_undistorted[..., ::-1])

4 裁剪圖像
將中間的圖像部分裁剪出來,代碼如下:

def cal_img(img):
    num_coor_row = []
    num_coor_col = []
    # 記錄每行有多少個黑色像素
    for i in range(img.shape[0]):
        num_row = 0
        for j in range(img.shape[1]):
            if sum(img[i][j]) == 0:
                # print([i, j])
                num_row = num_row + 1
        num_coor_row.append(num_row)
    # 記錄每列有多少個黑色像素
    for k in range(img.shape[1]):
        num_col = 0
        for l in range(img.shape[0]):
            if sum(img[l][k]) == 0:
                # print([l, k])
                num_col = num_col + 1
        num_coor_col.append(num_col)
    return num_coor_row, num_coor_col


def multi_index(alist, f):    # 返回list中多個元素的索引
    return [i for (i,v) in enumerate(alist)if v == f]


# 去除圖像黑色邊緣
num_coor_row, num_coor_col = cal_img(img_undistorted)
edge_index_row = multi_index(num_coor_row, img.shape[1])
edge_index_col = multi_index(num_coor_col, img.shape[0])
img_cropped = np.delete(img_undistorted, edge_index_row, axis=0)
img_cropped = np.delete(img_cropped, edge_index_col, axis=1)
plt.figure('cropped image')
plt.imshow(img_cropped[..., ::-1])


def crop_img(img_cropped):
    # 截取上下感興趣區域
    top_roi = img_cropped[:int(img_cropped.shape[0] / 2),
                          int(img_cropped.shape[1] / 2) - 50:int(img_cropped.shape[1] / 2) + 50, :]
    bottom_roi = img_cropped[int(img_cropped.shape[0] / 2) + 1:,
                             int(img_cropped.shape[1] / 2) - 50:int(img_cropped.shape[1] / 2) + 50, :]
    # 統計黑色像素個數
    _, top_num_coor_row = cal_img(top_roi)
    _, bottom_num_coor_row = cal_img(bottom_roi)
    # 截取左右感興趣區域
    left_roi = img_cropped[int(img_cropped.shape[0] / 2) - 50:int(img_cropped.shape[0] / 2) + 50,
                           :int(img_cropped.shape[1] / 2), :]
    right_roi = img_cropped[int(img_cropped.shape[0] / 2) - 50:int(img_cropped.shape[0] / 2) + 50,
                            int(img_cropped.shape[1] / 2) + 1:, :]
    # 統計黑色像素個數
    left_num_coor_col, _ = cal_img(left_roi)
    right_num_coor_col, _ = cal_img(right_roi)
    # plt.figure('roi')
    # plt.imshow(top_roi)
    '''
    print(top_num_coor_row)
    print(bottom_num_coor_row)
    print(left_num_coor_col)
    print(right_num_coor_col)
    '''
    # 確定黑色像素最多的行和列的索引
    top_max_row = max(top_num_coor_row)
    top_max_row_index = multi_index(top_num_coor_row, top_max_row)
    bottom_max_row = max(bottom_num_coor_row)
    bottom_max_row_index = multi_index(bottom_num_coor_row, bottom_max_row)
    left_max_col = max(left_num_coor_col)
    left_max_col_index = multi_index(left_num_coor_col, left_max_col)
    right_max_col = max(right_num_coor_col)
    right_max_col_index = multi_index(right_num_coor_col, right_max_col)
    '''
    print(top_max_row_index)
    print(bottom_max_row_index)
    print(left_max_col_index)
    print(right_max_col_index)
    '''
    # 取索引的中位數
    top_col_index = int(np.median(top_max_row_index))
    bottom_col_index = int(np.median(bottom_max_row_index))
    left_row_index = int(np.median(left_max_col_index))
    right_row_index = int(np.median(right_max_col_index))
    print(top_col_index,bottom_col_index,left_row_index,right_row_index)
    # 取中位數對應的列
    top_col = top_roi[:, top_col_index, :]
    bottom_col = bottom_roi[:, bottom_col_index, :]
    # 該列第一個不爲零的像素爲校正後圖像內切矩形的上切點
    for top_c in range(top_roi.shape[0]):
        if sum(top_col[top_c]) != 0:
            top = top_c
            break
    # 該列第一個爲零的像素爲校正後圖像內切矩形的下切點
    for bottom_c in range(bottom_roi.shape[0]):
        if sum(bottom_col[bottom_c+1]) == 0:
            bottom = bottom_c
            break
    # 取中位數對應的行
    left_row = left_roi[left_row_index, :, :]
    right_row = right_roi[right_row_index, :, :]
    # 該行第一個不爲零的像素爲校正後圖像內切矩形的上切點
    for left_r in range(left_roi.shape[1]):
        if sum(left_row[left_r]) != 0:
            left = left_r
            break
    # 該行第一個不爲零的像素爲校正後圖像內切矩形的上切點
    for right_r in range(int(img_cropped.shape[1] / 2)):
        if sum(right_row[right_r+1]) == 0:
            right = right_r
            break
    # 計算上下左右切點的真實索引
    top = top
    bottom = bottom + top_roi.shape[0]
    left = left
    right = right + left_roi.shape[1]
    return top, bottom, left, right


top_f,bottom_f,left_f,right_f = crop_img(img_cropped)


print(top_f,bottom_f,left_f,right_f)
img_cropped_f = img_cropped[top_f:bottom_f, left_f:right_f, :]
plt.figure('cropped image')


plt.figure('final result')
plt.imshow(img_cropped_f[...,::-1])
plt.show()
cv2.imwrite('final_result.jpg', img_cropped_f)

最後裁剪完的圖像我貼出實際中拍的圖像吧:
在這裏插入圖片描述
5 完整工程代碼

#-*- coding:utf-8 -*-
import cv2
import numpy as np
import matplotlib.pylab as plt


assert float(cv2.__version__.rsplit('.', 1)[0]) >= 3, 'OpenCV version 3 or newer required.'
'''
K = np.array([[689.21, 0., 1295.56],
              [0., 690.48, 942.17],
              [0., 0., 1.]])
'''
K = np.array([[271.40629437753046, 0.0, 329.829270787], [0.0, 271.00780962876536, 216.9546102473427], [0.0, 0.0, 1.0]])


# zero distortion coefficients work well for this image
# D = np.array([0., 0., 0., 0.])

D=np.array([-0.01515303912651211, 0.09148012072176424, -0.2080496385404225, 0.13211726263924817])

# use Knew to scale the output
Knew = K.copy()
Knew[(0,1), (0,1)] = 0.4 * Knew[(0,1), (0,1)]


img = cv2.imread('/home/lenovo/Desktop/a.jpg')
plt.figure('original image')
plt.imshow(img[...,::-1])
img_undistorted = cv2.fisheye.undistortImage(img, K, D=D, Knew=Knew)
cv2.imwrite('right_60_undistorted.jpg', img_undistorted)
plt.figure('undistored image')
plt.imshow(img_undistorted[..., ::-1])
# plt.show()
def cal_img(img):
    num_coor_row = []
    num_coor_col = []
    # 記錄每行有多少個黑色像素
    for i in range(img.shape[0]):
        num_row = 0
        for j in range(img.shape[1]):
            if sum(img[i][j]) == 0:
                # print([i, j])
                num_row = num_row + 1
        num_coor_row.append(num_row)
    # 記錄每列有多少個黑色像素
    for k in range(img.shape[1]):
        num_col = 0
        for l in range(img.shape[0]):
            if sum(img[l][k]) == 0:
                # print([l, k])
                num_col = num_col + 1
        num_coor_col.append(num_col)
    return num_coor_row, num_coor_col


def multi_index(alist, f):    # 返回list中多個元素的索引
    return [i for (i,v) in enumerate(alist)if v == f]


# 去除圖像黑色邊緣
num_coor_row, num_coor_col = cal_img(img_undistorted)
edge_index_row = multi_index(num_coor_row, img.shape[1])
edge_index_col = multi_index(num_coor_col, img.shape[0])
img_cropped = np.delete(img_undistorted, edge_index_row, axis=0)
img_cropped = np.delete(img_cropped, edge_index_col, axis=1)
plt.figure('cropped image')
plt.imshow(img_cropped[..., ::-1])


def crop_img(img_cropped):
    # 截取上下感興趣區域
    top_roi = img_cropped[:int(img_cropped.shape[0] / 2),
                          int(img_cropped.shape[1] / 2) - 50:int(img_cropped.shape[1] / 2) + 50, :]
    bottom_roi = img_cropped[int(img_cropped.shape[0] / 2) + 1:,
                             int(img_cropped.shape[1] / 2) - 50:int(img_cropped.shape[1] / 2) + 50, :]
    # 統計黑色像素個數
    _, top_num_coor_row = cal_img(top_roi)
    _, bottom_num_coor_row = cal_img(bottom_roi)
    # 截取左右感興趣區域
    left_roi = img_cropped[int(img_cropped.shape[0] / 2) - 50:int(img_cropped.shape[0] / 2) + 50,
                           :int(img_cropped.shape[1] / 2), :]
    right_roi = img_cropped[int(img_cropped.shape[0] / 2) - 50:int(img_cropped.shape[0] / 2) + 50,
                            int(img_cropped.shape[1] / 2) + 1:, :]
    # 統計黑色像素個數
    left_num_coor_col, _ = cal_img(left_roi)
    right_num_coor_col, _ = cal_img(right_roi)
    # plt.figure('roi')
    # plt.imshow(top_roi)
    '''
    print(top_num_coor_row)
    print(bottom_num_coor_row)
    print(left_num_coor_col)
    print(right_num_coor_col)
    '''
    # 確定黑色像素最多的行和列的索引
    top_max_row = max(top_num_coor_row)
    top_max_row_index = multi_index(top_num_coor_row, top_max_row)
    bottom_max_row = max(bottom_num_coor_row)
    bottom_max_row_index = multi_index(bottom_num_coor_row, bottom_max_row)
    left_max_col = max(left_num_coor_col)
    left_max_col_index = multi_index(left_num_coor_col, left_max_col)
    right_max_col = max(right_num_coor_col)
    right_max_col_index = multi_index(right_num_coor_col, right_max_col)
    '''
    print(top_max_row_index)
    print(bottom_max_row_index)
    print(left_max_col_index)
    print(right_max_col_index)
    '''
    # 取索引的中位數
    top_col_index = int(np.median(top_max_row_index))
    bottom_col_index = int(np.median(bottom_max_row_index))
    left_row_index = int(np.median(left_max_col_index))
    right_row_index = int(np.median(right_max_col_index))
    print(top_col_index,bottom_col_index,left_row_index,right_row_index)
    # 取中位數對應的列
    top_col = top_roi[:, top_col_index, :]
    bottom_col = bottom_roi[:, bottom_col_index, :]
    # 該列第一個不爲零的像素爲校正後圖像內切矩形的上切點
    for top_c in range(top_roi.shape[0]):
        if sum(top_col[top_c]) != 0:
            top = top_c
            break
    # 該列第一個爲零的像素爲校正後圖像內切矩形的下切點
    for bottom_c in range(bottom_roi.shape[0]):
        if sum(bottom_col[bottom_c+1]) == 0:
            bottom = bottom_c
            break
    # 取中位數對應的行
    left_row = left_roi[left_row_index, :, :]
    right_row = right_roi[right_row_index, :, :]
    # 該行第一個不爲零的像素爲校正後圖像內切矩形的上切點
    for left_r in range(left_roi.shape[1]):
        if sum(left_row[left_r]) != 0:
            left = left_r
            break
    # 該行第一個不爲零的像素爲校正後圖像內切矩形的上切點
    for right_r in range(int(img_cropped.shape[1] / 2)):
        if sum(right_row[right_r+1]) == 0:
            right = right_r
            break
    # 計算上下左右切點的真實索引
    top = top
    bottom = bottom + top_roi.shape[0]
    left = left
    right = right + left_roi.shape[1]
    return top, bottom, left, right


top_f,bottom_f,left_f,right_f = crop_img(img_cropped)


print(top_f,bottom_f,left_f,right_f)
img_cropped_f = img_cropped[top_f:bottom_f, left_f:right_f, :]
plt.figure('cropped image')


plt.figure('final result')
plt.imshow(img_cropped_f[...,::-1])
plt.show()
cv2.imwrite('final_result.jpg', img_cropped_f)

C++的兩種形式矯正代碼
remap與cv::fisheye::undistortImage函數,略有不同,博主爲了方便調參,做了一個參數調節按鈕。下面給出完整C++代碼

//
// Created by lenovo on 19-5-16.
//
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
using namespace fisheye;
Mat  image;
Mat cameraMatrix1;
cv::Mat distCoeffs1(4, 1, cv::DataType<double>::type);
Mat UndistortImg;
Mat new_intrinsic_mat;    //Mat new_intrinsic_mat(3, 3, CV_64FC1, Scalar(0))亦可,注意數據類型;
int  fx_threshold_min=80;
int  fy_threshold_min=80;
string winname="UndistortImg";
//TrackBar發生改變的回調函數
void onChangeTrackBar(int, void* );

int main()
{
    string bar_name_1="fx";
    string bar_name_2="fy";
    image=imread("./2.bmp");
    imwrite("image.jpg",image);
    Mat dst;
    cameraMatrix1=Mat::eye(3, 3, cv::DataType<double>::type);



    distCoeffs1.at<double>(0,0) = 0.061439051;
    distCoeffs1.at<double>(1,0) = 0.03187556;
    distCoeffs1.at<double>(2,0) = -0.00726151;
    distCoeffs1.at<double>(3,0) = -0.00111799;
    //distCoeffs1.at<double>(4,0) = -0.00678974;

    //Taken from Mastring OpenCV d
    double fx = 328.61652824;
    double fy = 328.56512516;
    double cx = 629.80551148;
    double cy = 340.5442837;

    cameraMatrix1.at<double>(0, 0) = fx;
    cameraMatrix1.at<double>(1, 1) = fy;
    cameraMatrix1.at<double>(0, 2) = cx;
    cameraMatrix1.at<double>(1, 2) = cy;



    std::cout << "After calib cameraMatrix --- 1: " << cameraMatrix1 << std::endl;
    std::cout << "After calib distCoeffs: --- 1" << distCoeffs1 << std::endl;
       ////////////////////////////////////////////////////////////////////
      /////  魚眼攝像頭畸變矯正
       ////////////////////////////////////////////////////////////////////
    Size image_size(1280,720);
    Mat mapx = Mat(image_size, CV_32FC1);
    Mat mapy = Mat(image_size, CV_32FC1);
    Mat R = Mat::eye(3, 3, CV_32F);
    Mat new_cameraMatrix=Mat::zeros(3,3,CV_32F);
    new_cameraMatrix=getOptimalNewCameraMatrix(cameraMatrix1, distCoeffs1, image_size, 1, image_size, 0);
    cout<<"new cameraMatrix: "<<new_cameraMatrix<<endl;
   // cv::fisheye::initUndistortRectifyMap()

    fisheye::initUndistortRectifyMap(cameraMatrix1,distCoeffs1, R,new_cameraMatrix,image_size,CV_16SC2,mapx,mapy);
    //cout<<mapx;
    double t1 = (double)getTickCount();
    remap(image, dst, mapx, mapy, INTER_LINEAR);
    ////////////////////////////////////////////////////////////////////
    /////  魚眼攝像頭畸變矯正
    ////////////////////////////////////////////////////////////////////
    t1 = (double)getTickCount() - t1;
    std::cout << "compute time :" << t1*1000.0 / cv::getTickFrequency() << " ms \n";
    imshow("result",dst);
    imwrite("result.jpg",dst);

//fx,fy變大(小),視場變小(大),裁剪較多(少),但細節清晰(模糊);很關鍵,new_intrinsic_mat決定輸出的畸變校正圖像的範圍



    //float max_lowThreshold=1.0;
//調整輸出校正圖的視場
    namedWindow(winname,WINDOW_AUTOSIZE);
    createTrackbar(bar_name_1, winname, &fx_threshold_min, 100,onChangeTrackBar,0);
    createTrackbar(bar_name_2, winname, &fy_threshold_min, 100,onChangeTrackBar,0);
    //onChangeTrackBar(0,0);


    waitKey(0);
}


void onChangeTrackBar(int pos, void* )
{
    cameraMatrix1.copyTo(new_intrinsic_mat);
    new_intrinsic_mat.at<double>(0, 0) *= ((float)fx_threshold_min)/100.0;      //注意數據類型,非常重要
    new_intrinsic_mat.at<double>(1, 1) *= ((float)fy_threshold_min)/100.0;

//調整輸出校正圖的中心
    new_intrinsic_mat.at<double>(0, 2) = 0.5*UndistortImg.cols;
    new_intrinsic_mat.at<double>(1, 2) = 0.5 *UndistortImg.rows;


    cv::fisheye::undistortImage(image,UndistortImg,cameraMatrix1,distCoeffs1,new_intrinsic_mat);
    cv::imshow("UndistortImg", UndistortImg);
}

最後我們再看下效果吧
在這裏插入圖片描述結合車位線檢測與車位線定位,即可作爲自動泊車視覺項目哦

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章