基於surf特徵點匹配的圖像拼接:
1.讀取要拼接的左右圖像;
2.提取左右圖像的surf特徵點;
3.利用knn對左右圖像的特徵點進行匹配;
4.利用匹配的特徵對,求出單應矩陣;
5.利用單應矩陣對右圖像進行透視變化;
6.對左右圖像進行拼接。
具體如下代碼:
import cv2
import numpy as np
def warp_corner(H,src):
'''
:param H: 單應矩陣
:param src: 透視變化的圖像
:return: 透視變化後的四個角,左上角開始,逆時鐘
'''
warp_points=[]
# 圖像左上角,左下角
src_left_up=np.array([0,0,1])
src_left_down=np.array([0,src.shape[0],1])
# 圖像右上角,右下角
src_right_up=np.array([src.shape[1],0,1])
src_right_down=np.array([src.shape[1],src.shape[0],1])
#透視變化後的左上角,左下角
warp_left_up=H.dot(src_left_up)
left_up=warp_left_up[0:2]/warp_left_up[2]
warp_points.append(left_up)
warp_left_down=H.dot(src_left_down)
left_down=warp_left_down[0:2]/warp_left_down[2]
warp_points.append(left_down)
# 透視變化後的右上角,右下角
warp_right_up=H.dot(src_right_up)
right_up=warp_right_up[0:2]/warp_right_up[2]
warp_points.append(right_up)
warp_right_down=H.dot(src_right_down)
right_down=warp_right_down[0:2]/warp_right_down[2]
warp_points.append(right_down)
return warp_points
def optim_mask(mask,warp_point):
min_left_x = min(warp_point[0][0], warp_point[1][0])
left_margin = mask.shape[1] - min_left_x
points_zeros = np.where(mask == 0)
x_indexs = points_zeros[1]
alpha = (left_margin - (x_indexs - min_left_x)) / left_margin
mask[points_zeros] = alpha
return mask
def Seam_Left_Right(left,imagewarp,H,warp_point,with_optim_mask=False):
'''
:param left: 拼接的左圖像
:param imagewarp: 透視變化後的右圖像
:param H: 單應矩陣
:param warp_point: 透視變化後的四個頂點
:param with_optim_mask: 是否需要對拼接後的圖像進行優化
:return:
'''
w = left.shape[1]
mask = imagewarp[:, 0:w]
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
mask[mask != 0] = 1
mask[mask == 0] = 0
mask = 1 - mask
mask = np.float32(mask)
if with_optim_mask==True:
mask=optim_mask(mask,warp_point)
mask_rgb=np.stack([mask,mask,mask],axis=2)
tt=np.uint8((1-mask_rgb)*255)
left=left*mask_rgb+imagewarp[:,0:w]*(1-mask_rgb)
imagewarp[:,0:w]=left
return np.uint8(imagewarp)
def main():
left=cv2.imread('/home/simple/convid_withornot_mask/recogizition_withornot_mask/ttt/b_t.jpg')
#left=cv2.resize(left,dsize=(512,512))
left_gray=cv2.cvtColor(left,cv2.COLOR_BGR2GRAY)
right=cv2.imread('/home/simple/convid_withornot_mask/recogizition_withornot_mask/ttt/b.jpg')
#right=cv2.resize(right,dsize=(512,512))
right_gray=cv2.cvtColor(right,cv2.COLOR_BGR2GRAY)
#提取左右圖像的surf特徵點
detector=cv2.xfeatures2d_SURF.create(hessianThreshold=400)
left_kps,left_dess=detector.detectAndCompute(left_gray,None)
right_kps,right_dess=detector.detectAndCompute(right_gray,None)
#利用knn對左右圖像的特徵點進行匹配
matcher=cv2.FlannBasedMatcher_create()
knn_matchers=matcher.knnMatch(left_dess,right_dess,2)
good_keypoints=[]
#挑出好的匹配點
for m,n in knn_matchers:
if m.distance<0.5*n.distance:
good_keypoints.append(m)
left_points=np.zeros(shape=(len(good_keypoints),2),dtype=np.float32)
right_points=np.zeros(shape=(len(good_keypoints),2),dtype=np.float32)
outimg=np.zeros(shape=(right.shape[0],right.shape[0]+left.shape[0],3),dtype=np.uint8)
cv2.drawMatches(left,left_kps,right,right_kps,good_keypoints,outimg)
# cv2.imshow('hks',outimg)
# cv2.waitKey(0)
for i in range(len(good_keypoints)):
left_points[i][0]=left_kps[good_keypoints[i].queryIdx].pt[0]
left_points[i][1]=left_kps[good_keypoints[i].queryIdx].pt[1]
right_points[i][0]=right_kps[good_keypoints[i].trainIdx].pt[0]
right_points[i][1]=right_kps[good_keypoints[i].trainIdx].pt[1]
#求取單應矩陣
H,_=cv2.findHomography(right_points,left_points)
#求出右圖像的透視變化頂點
warp_point = warp_corner(H, right)
#求出右圖像的透視變化圖像
imagewarp=cv2.warpPerspective(right,H,(left.shape[1]+right.shape[1],left.shape[0]))
#對左右圖像進行拼接,返回最後的拼接圖像
image_seam_optim=Seam_Left_Right(left,imagewarp,H,warp_point,with_optim_mask=True)
cv2.namedWindow('image_seam_optim',cv2.WINDOW_NORMAL)
cv2.imshow('image_seam_optim',image_seam_optim)
cv2.waitKey(0)
if __name__=='__main__':
main()
其中,圖像拼接時,有對結果進行優化,具體參考代碼。
效果如下:
左圖
右圖
拼接的圖像
優化後的圖像拼接