OpenCV實現兩幅圖片全景拼接

課程作業的一個題目,找了代碼加了註釋。

import numpy as np
import cv2


class Stitcher:

    def stitch(self, images, ratio=0.75, reprojThresh=4.0,
        showMatches=False):

        # 檢測出關鍵點,局部不變描述符
        (imageB, imageA) = images
        (kpsA, featuresA) = self.detectAndDescribe(imageA)
        (kpsB, featuresB) = self.detectAndDescribe(imageB)

        print("關鍵點個數",len(kpsA),len(kpsB))

        # 特徵匹配
        M = self.matchKeypoints(kpsA, kpsB,
            featuresA, featuresB, ratio, reprojThresh)

        # 如果特徵匹配返回None
        if M is None:
            return None

        # 將圖像粘合在一起
        (matches, H, status) = M
        # 根據單應性矩陣進行矯正圖片
        result = cv2.warpPerspective(imageA, H,
            (imageA.shape[1] + imageB.shape[1], imageA.shape[0]))
        # imageA.shape[1]=400,imageB.shape[1]=400,imageA.shape[0]=533
        # result.shape[0]=533,result.shape[1]=800
        result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB


        # 顯示匹配線
        if showMatches:
            vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches,
                status)
            return (result, vis)

        # 單獨返回一個
        return result


    #接收照片,檢測關鍵點和提取局部不變特徵
    #用到了高斯差分(Difference of Gaussian (DoG))關鍵點檢測,和SIFT特徵提取
    #detectAndCompute方法用來處理提取關鍵點和特徵
    #返回一系列的關鍵點
    def detectAndDescribe(self, image):
        # 將圖片轉化爲灰度圖像
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        # 提取特徵點
        descriptor = cv2.xfeatures2d.SIFT_create()
        (kps, features) = descriptor.detectAndCompute(image, None)

        # print(kps) # 關鍵點
        print(features.shape[0],features.shape[1]) # 長度爲128維的特徵向量

        # 將關鍵點的座標pt存入numpy
        kps = np.float32([kp.pt for kp in kps])

        return (kps, features)


    #matchKeypoints方法需要四個參數,第一張圖片的關鍵點和特徵向量,第二張圖片的關鍵點特徵向量。
    #David Lowe’s ratio測試變量和RANSAC重投影門限也應該被提供。
    def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB,
        ratio, reprojThresh):

        matcher = cv2.DescriptorMatcher_create("BruteForce")
        rawMatches = matcher.knnMatch(featuresA, featuresB, 2) # 最近鄰算法設置K=2
        matches = []
        # for m in rawMatches:
        #     print(m[0].distance,m[1].distance)

        print("------------------------------")
        # 循環遍歷匹配點
        for m in rawMatches:
            # Lowe’s ratio測試,用來確定高質量的特徵匹配
            if len(m) == 2 and m[0].distance < m[1].distance * ratio:
                # 將第一張圖像的下標值和第二張圖像的下標值存入
                matches.append((m[0].trainIdx, m[0].queryIdx))

        # print(matches)
        # print(len(matches))

        # 將標註位置存入numpy
        if len(matches) > 4:
            # construct the two sets of points
            ptsA = np.float32([kpsA[i] for (_, i) in matches])
            ptsB = np.float32([kpsB[i] for (i, _) in matches])

            # 計算單應性矩陣
            # 其中H爲求得的單應性矩陣矩陣
            # status則返回一個列表來表徵匹配成功的特徵點。
            (H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
                reprojThresh)

            return (matches, H, status)


        return None


    #連線畫出兩幅圖的匹配
    def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status):
        # initialize the output visualization image
        (hA, wA) = imageA.shape[:2]
        (hB, wB) = imageB.shape[:2]
        # 三通道照片
        vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
        vis[0:hA, 0:wA] = imageA
        vis[0:hB, wA:] = imageB


        for ((trainIdx, queryIdx), s) in zip(matches, status):
            if s == 1:
                ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
                ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
                cv2.line(vis, ptA, ptB, (0, 255, 0), 1)

        return vis


if __name__ == '__main__':
# 加載圖片
    imageA = cv2.imread('./hw2/building_02.jpg')
    imageB = cv2.imread('./hw2/building_03.jpg')

# 調整圖片寬度
#     imageA = imutils.resize(imageA, width=400)
#     imageB = imutils.resize(imageB, width=400)

# showMatches=True 展示兩幅圖像特徵的匹配,返回vis
    stitcher = Stitcher()
    (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True)


    # vis = imutils.resize(imageA, width=800,height=800)
    # result = imutils.resize(imageB, width=800,height=800)

    cv2.imwrite('./vis1.jpg', vis)
    cv2.imwrite('./result.jpg', result)

參考:

https://www.cnblogs.com/lqerio/p/11601951.html

https://blog.csdn.net/weixin_44072651/article/details/89262277

https://x-nicolo.github.io/2017/09/19/%E5%9F%BA%E4%BA%8EOpenCV%E5%85%A8%E6%99%AF%E6%8B%BC%E6%8E%A5%EF%BC%88Python%EF%BC%89/

https://blog.csdn.net/xull88619814/article/details/81587595

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章