傻瓜版,拿個雙目攝像頭,標定,得到數據,填進去,調調參數。
兩個部分,一個是相機的參數設置,一個是測距
運用matlab裏面的stereo Camera Calibrator APP進行拍照
拍個30多張,然後拉線,留個10-20張進行計算,把雙目攝像機的數據填到camera_configs.py裏面
camera_configs.py
from cv2 import cv2
import numpy as np
left_camera_matrix = np.array([[ 392.9351, 0.1468, 310.0016],
[0, 393.6869, 279.4163],
[0., 0., 1.]])
left_distortion = np.array([[0.0396, -0.0643, 0.0038, 0.0013, 0.0370]])
right_camera_matrix = np.array([[ 393.0777, 0.4140, 344.1193],
[ 0, 394.0348, 242.2463],
[ 0, 0, 1.0000]])
right_distortion = np.array([[0.0503, -0.0820, 0.0045, 0.0014, 0.0571]])
R = np.matrix([
[ 1.0000, 0.0014, 0.0033],
[-0.0014, 1.0000, 0.0020],
[-0.0033, -0.0020, 1.0000],
])
# print(R)
T = np.array([-18.1454, -0.3016, 0.4750]) # 平移關係向量
size = (640, 480) # 圖像尺寸
# 進行立體更正
R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(left_camera_matrix, left_distortion,
right_camera_matrix, right_distortion, size, R,
T)
# 計算更正map
left_map1, left_map2 = cv2.initUndistortRectifyMap(left_camera_matrix, left_distortion, R1, P1, size, cv2.CV_16SC2)
right_map1, right_map2 = cv2.initUndistortRectifyMap(right_camera_matrix, right_distortion, R2, P2, size, cv2.CV_16SC2)
depth.py
# 該腳本實現深度圖以及點擊深度圖測量像素點的真實距離
# 可以運行看到效果之後最好自己重新標定一次
from cv2 import cv2
import numpy as np
import camera_configs # 攝像頭的標定數據
cam1 = cv2.VideoCapture(1) # 攝像頭的ID不同設備上可能不同
cam2 = cv2.VideoCapture(0) # 攝像頭的ID不同設備上可能不同
# cam1 = cv2.VideoCapture(1 + cv2.CAP_DSHOW) # 攝像頭的ID不同設備上可能不同
# cam1.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) # 設置雙目的寬度
# cam1.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) # 設置雙目的高度
# 創建用於顯示深度的窗口和調節參數的bar
cv2.namedWindow("depth")
cv2.moveWindow("left", 0, 0)
cv2.moveWindow("right", 600, 0)
# 創建用於顯示深度的窗口和調節參數的bar
# cv2.namedWindow("depth")
cv2.namedWindow("config", cv2.WINDOW_NORMAL)
cv2.moveWindow("left", 0, 0)
cv2.moveWindow("right", 600, 0)
cv2.createTrackbar("num", "config", 0, 60, lambda x: None)
cv2.createTrackbar("blockSize", "config", 30, 255, lambda x: None)
cv2.createTrackbar("SpeckleWindowSize", "config", 1, 10, lambda x: None)
cv2.createTrackbar("SpeckleRange", "config", 1, 255, lambda x: None)
cv2.createTrackbar("UniquenessRatio", "config", 1, 255, lambda x: None)
cv2.createTrackbar("TextureThreshold", "config", 1, 255, lambda x: None)
cv2.createTrackbar("UniquenessRatio", "config", 1, 255, lambda x: None)
cv2.createTrackbar("MinDisparity", "config", 0, 255, lambda x: None)
cv2.createTrackbar("PreFilterCap", "config", 1, 65, lambda x: None) # 注意調節的時候這個值必須是奇數
cv2.createTrackbar("MaxDiff", "config", 1, 400, lambda x: None)
# 添加點擊事件,打印當前點的距離
def callbackFunc(e, x, y, f, p):
if e == cv2.EVENT_LBUTTONDOWN:
print(threeD[y][x])
if abs(threeD[y][x][2]) < 3000:
print("當前距離:"+str(abs(threeD[y][x][2])))
else:
print("當前距離過大或請點擊色塊的位置")
cv2.setMouseCallback("depth", callbackFunc, None)
# 初始化計算FPS需要用到參數 注意千萬不要用opencv自帶fps的函數,那個函數得到的是攝像頭最大的FPS
frame_rate_calc = 1
freq = cv2.getTickFrequency()
font = cv2.FONT_HERSHEY_SIMPLEX
imageCount = 1
while True:
t1 = cv2.getTickCount()
ret1, frame1 = cam1.read()
ret1, frame2 = cam2.read()
if not ret1:
print("camera is not connected!")
break
# 這裏的左右兩個攝像頭的圖像是連在一起的,所以進行一下分割
# frame1 = frame[0:480, 0:640]
# frame2 = frame[0:480, 640:1280]
####### 深度圖測量開始 #######
# 立體匹配這裏使用BM算法,
# 根據標定數據對圖片進行重構消除圖片的畸變
img1_rectified = cv2.remap(frame1, camera_configs.left_map1, camera_configs.left_map2, cv2.INTER_LINEAR,
cv2.BORDER_CONSTANT)
img2_rectified = cv2.remap(frame2, camera_configs.right_map1, camera_configs.right_map2, cv2.INTER_LINEAR,
cv2.BORDER_CONSTANT)
# 如有些版本 remap()的圖是反的 這裏對角翻轉一下
# img1_rectified = cv2.flip(img1_rectified, -1)
# img2_rectified = cv2.flip(img2_rectified, -1)
# 將圖片置爲灰度圖,爲StereoBM作準備,BM算法只能計算單通道的圖片,即灰度圖
# 單通道就是黑白的,一個像素只有一個值如[123],opencv默認的是BGR(注意不是RGB), 如[123,4,134]分別代表這個像素點的藍綠紅的值
imgL = cv2.cvtColor(img1_rectified, cv2.COLOR_BGR2GRAY)
imgR = cv2.cvtColor(img2_rectified, cv2.COLOR_BGR2GRAY)
out = np.hstack((img1_rectified, img2_rectified))
for i in range(0, out.shape[0], 30):
cv2.line(out, (0, i), (out.shape[1], i), (0, 255, 0), 1)
cv2.imshow("epipolar lines", out)
# 通過bar來獲取到當前的參數
# BM算法對參數非常敏感,一定要耐心調整適合自己攝像頭的參數,前兩個參數影響大 後面的參數也要調節
num = cv2.getTrackbarPos("num", "config")
SpeckleWindowSize = cv2.getTrackbarPos("SpeckleWindowSize", "config")
SpeckleRange = cv2.getTrackbarPos("SpeckleRange", "config")
blockSize = cv2.getTrackbarPos("blockSize", "config")
UniquenessRatio = cv2.getTrackbarPos("UniquenessRatio", "config")
TextureThreshold = cv2.getTrackbarPos("TextureThreshold", "config")
MinDisparity = cv2.getTrackbarPos("MinDisparity", "config")
PreFilterCap = cv2.getTrackbarPos("PreFilterCap", "config")
MaxDiff = cv2.getTrackbarPos("MaxDiff", "config")
if blockSize % 2 == 0:
blockSize += 1
if blockSize < 5:
blockSize = 5
# 根據BM算法生成深度圖的矩陣,也可以使用SGBM,SGBM算法的速度比BM慢,但是比BM的精度高
stereo = cv2.StereoBM_create(
numDisparities=16 * num,
blockSize=blockSize,
)
stereo.setROI1(camera_configs.validPixROI1)
stereo.setROI2(camera_configs.validPixROI2)
stereo.setPreFilterCap(PreFilterCap)
stereo.setMinDisparity(MinDisparity)
stereo.setTextureThreshold(TextureThreshold)
stereo.setUniquenessRatio(UniquenessRatio)
stereo.setSpeckleWindowSize(SpeckleWindowSize)
stereo.setSpeckleRange(SpeckleRange)
stereo.setDisp12MaxDiff(MaxDiff)
# 對深度進行計算,獲取深度矩陣
disparity = stereo.compute(imgL, imgR)
# 按照深度矩陣生產深度圖
disp = cv2.normalize(disparity, disparity, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
# 將深度圖擴展至三維空間中,其z方向的值則爲當前的距離
threeD = cv2.reprojectImageTo3D(disparity.astype(np.float32) / 16., camera_configs.Q)
# 將深度圖轉爲僞色圖,這一步對深度測量沒有關係,只是好看而已
fakeColorDepth = cv2.applyColorMap(disp, cv2.COLORMAP_JET)
cv2.putText(frame1, "FPS: {0:.2f}".format(frame_rate_calc), (30, 50), font, 1, (255, 255, 0), 2, cv2.LINE_AA)
# 按下S可以保存圖片
interrupt = cv2.waitKey(10)
if interrupt & 0xFF == 27: # 按下ESC退出程序
break
if interrupt & 0xFF == ord('s'):
cv2.imwrite('images/left' +'.jpg', frame1)
cv2.imwrite('images/right' +'.jpg', frame2)
cv2.imwrite('images/img1_rectified' +'.jpg', img1_rectified)#畸變,注意觀察正反
cv2.imwrite('images/img2_rectified' +'.jpg', img2_rectified)
cv2.imwrite('images/depth' +'.jpg', disp)
cv2.imwrite('images/fakeColor' +'.jpg', fakeColorDepth)
cv2.imwrite('mages/epipolar' + '.jpg', out)
####### 任務1:測距結束 #######
# 顯示
# cv2.imshow("frame", frame) # 原始輸出,用於檢測左右
cv2.imshow("frame1", frame1) # 左邊原始輸出
cv2.imshow("frame2", frame2) # 右邊原始輸出
cv2.imshow("img1_rectified", img1_rectified) # 左邊矯正後輸出
cv2.imshow("img2_rectified", img2_rectified) # 右邊邊矯正後輸出
cv2.imshow("depth", disp) # 輸出深度圖及調整的bar
cv2.imshow("fakeColor", fakeColorDepth) # 輸出深度圖的僞色圖,這個圖沒有用只是好看
# 需要對深度圖進行濾波將下面幾行開啓即可 開啓後FPS會降低
img_medianBlur = cv2.medianBlur(disp, 25)
img_medianBlur_fakeColorDepth = cv2.applyColorMap(img_medianBlur, cv2.COLORMAP_JET)
img_GaussianBlur = cv2.GaussianBlur(disp, (7, 7), 0)
img_Blur = cv2.blur(disp, (5, 5))
cv2.imshow("img_GaussianBlur", img_GaussianBlur) # 右邊原始輸出
cv2.imshow("img_medianBlur_fakeColorDepth", img_medianBlur_fakeColorDepth) # 右邊原始輸出
cv2.imshow("img_Blur", img_Blur) # 右邊原始輸出
cv2.imshow("img_medianBlur", img_medianBlur) # 右邊原始輸出
t2 = cv2.getTickCount()
time1 = (t2 - t1) / freq
frame_rate_calc = 1 / time1
cam1.release()
cv2.destroyAllWindows()
如何判斷數據有沒有填對
看矯正圖,每根極線上對應的點是不是一樣的。
可能問題:1.攝像頭左右標反了、
2. 如有些opencv版本 remap()的圖是反的 這裏對角翻轉一下
# img1_rectified = cv2.flip(img1_rectified, -1)
# img2_rectified = cv2.flip(img2_rectified, -1)
3.攝像頭輸出的是一張圖還是兩張圖,這裏的左右兩個攝像頭的圖像是連在一起的,所以進行一下分割
# frame1 = frame[0:480, 0:640]
# frame2 = frame[0:480, 640:1280]
我的是兩張圖,所以這一段註釋了
參數自己看着調,先調前面兩個,前面兩個出不了距離後面調了也沒用
效果:
在depth窗口點擊進行測距
先這麼寫着,以後有時間再寫具體的