opencv-python實現sift、orb、asift、denseSift

1.ORB

# coding:utf-8
import cv2
import numpy as np

from contextlib import contextmanager
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency()

@contextmanager
def Timer(msg):
    print(msg, '...',)
    start = clock()
    try:
        yield
    finally:
        print("%.2f ms" % ((clock()-start)*1000))

def filter_matches(kp1, kp2, matches, ratio = 0.75):
    mkp1, mkp2 = [], []
    for m in matches:
        if len(m) == 2 and m[0].distance < m[1].distance * ratio:
            m = m[0]
            mkp1.append( kp1[m.queryIdx] )
            mkp2.append( kp2[m.trainIdx] )
    p1 = np.float32([kp.pt for kp in mkp1])
    p2 = np.float32([kp.pt for kp in mkp2])
    kp_pairs = zip(mkp1, mkp2)
    return p1, p2, list(kp_pairs)


with Timer('matching'):
    debug=1
    if debug==1:
        path1 = "./1_IMG_Texture_8Bit.png"
        path2 = "./3_IMG_Texture_8Bit.png"
    else:
        path1 = "./OtherSampleFrame_IMG_Texture_8Bit_48.png"
        path2 = "./OtherSampleFrame_IMG_Texture_8Bit_52.png"

    img1 = cv2.imread(path1, cv2.IMREAD_GRAYSCALE)
    img2 = cv2.imread(path2, cv2.IMREAD_GRAYSCALE)
    orb = cv2.ORB_create(500000)
    keypoint1, desc1 = orb.detectAndCompute(img1, None)
    keypoint2, desc2 = orb.detectAndCompute(img2, None)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
    matches = bf.knnMatch(desc1, desc2, k=2)


    #是否按照ratio比率剔除
    if 1:
        p1, p2, kp_pairs = filter_matches(keypoint1, keypoint2, matches)
    else:
        mkp1, mkp2 = [], []
        for m in matches:
            m = m[0]
            mkp1.append( keypoint1[m.queryIdx] )
            mkp2.append( keypoint2[m.trainIdx] )
        p1 = np.float32([kp.pt for kp in mkp1])
        p2 = np.float32([kp.pt for kp in mkp2])
        kp_pairs = zip(mkp1, mkp2)
        kp_pairs = list(kp_pairs)

    if len(p1) >= 4:
        H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 20.0)
        print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
        kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]

        img2temp=img2.copy()
        img2temp = cv2.cvtColor(img2temp, cv2.COLOR_GRAY2BGR)
        H_inv = np.linalg.inv(H)
        leftnew_array = np.ones((3, 1), dtype=np.float64)
        leftnew_array[0][0]=843.53
        leftnew_array[1][0]=230.07
        leftnew_array_1 = np.dot(H, leftnew_array)
        leftnew_array_2 = leftnew_array_1/leftnew_array_1[2][0]

        rightnew_array = np.ones((3, 1), dtype=np.float64)
        rightnew_array[0][0]=966.61
        rightnew_array[1][0]=447.38
        rightnew_array_1 = np.dot(H, rightnew_array)
        rightnew_array_2 = rightnew_array_1/rightnew_array_1[2][0]

        cv2.rectangle(img2temp, (int(leftnew_array_2[0][0]+0.5), int(leftnew_array_2[1][0]+0.5)),
                      (int(rightnew_array_2[0][0]+0.5), int(rightnew_array_2[1][0]+0.5)), (0, 255, 0), 3)
        #GT
        cv2.rectangle(img2temp, (int(239.82 + 0.5), int(704.91 + 0.5)),
                      (int(389.82 + 0.5), int(985.34 + 0.5)), (0, 0, 255), 3)
        cv2.imwrite("./result_orb_rectangle.png", img2temp)

    else:
        H, status = None, None
        print('%d matches found, not enough for homography estimation' % len(p1))

    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    vis = np.zeros((h1+h2, w1), np.uint8)
    vis[:h1, :w1] = img1
    vis[h1:h1+h2, :w1] = img2
    vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)

    p1, p2 = [], []
    for kpp in kp_pairs:
        p1.append(np.int32(kpp[0].pt))
        p2.append(np.int32(np.array(kpp[1].pt) + [0, h1]))

    green = (0, 255, 0)
    for (x1, y1), (x2, y2) in zip(p1, p2):
        col = green
        r = 1
        thickness = 2
        cv2.line(vis, (int(x1), int(y1)), (int(x2), int(y2)), col, thickness)
    cv2.imwrite("./result_orb.png", vis)

2、sift

# coding:utf-8
import cv2
import numpy as np

from contextlib import contextmanager
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency()

@contextmanager
def Timer(msg):
    print(msg, '...',)
    start = clock()
    try:
        yield
    finally:
        print("%.2f ms" % ((clock()-start)*1000))

def filter_matches(kp1, kp2, matches, ratio = 0.75):
    mkp1, mkp2 = [], []
    for m in matches:
        if len(m) == 2 and m[0].distance < m[1].distance * ratio:
            m = m[0]
            mkp1.append( kp1[m.queryIdx] )
            mkp2.append( kp2[m.trainIdx] )
    p1 = np.float32([kp.pt for kp in mkp1])
    p2 = np.float32([kp.pt for kp in mkp2])
    kp_pairs = zip(mkp1, mkp2)
    return p1, p2, list(kp_pairs)

with Timer('matching'):
    debug=1
    if debug==1:
        path1 = "./1_IMG_Texture_8Bit.png"
        path2 = "./3_IMG_Texture_8Bit.png"
    else:
        path1 = "./OtherSampleFrame_IMG_Texture_8Bit_48.png"
        path2 = "./OtherSampleFrame_IMG_Texture_8Bit_52.png"

    img1 = cv2.imread(path1, cv2.IMREAD_GRAYSCALE)
    img2 = cv2.imread(path2, cv2.IMREAD_GRAYSCALE)

    sift = cv2.xfeatures2d.SIFT_create()
    keypoint1, desc1 = sift.detectAndCompute(img1, None)
    keypoint2, desc2 = sift.detectAndCompute(img2, None)

    flann=1
    if flann==1:
        """
        FLANN是類似最近鄰的快速匹配庫
            它會根據數據本身選擇最合適的算法來處理數據
            比其他搜索算法快10倍
        """
        #原始是 FLANN_INDEX_KDTREE=0 trees=5
        FLANN_INDEX_KDTREE = 1
        indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=6)
        searchParams = dict(checks=50)
        flann = cv2.FlannBasedMatcher(indexParams, searchParams)
        matches = flann.knnMatch(desc1, desc2, k=2)
    else:
        bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=False)
        matches = bf.knnMatch(desc1, desc2, k=2)


    #是否按照ratio比率剔除
    if 1:
        p1, p2, kp_pairs = filter_matches(keypoint1, keypoint2, matches)
    else:
        mkp1, mkp2 = [], []
        for m in matches:
            m = m[0]
            mkp1.append( keypoint1[m.queryIdx] )
            mkp2.append( keypoint2[m.trainIdx] )
        p1 = np.float32([kp.pt for kp in mkp1])
        p2 = np.float32([kp.pt for kp in mkp2])
        kp_pairs = zip(mkp1, mkp2)
        kp_pairs = list(kp_pairs)

    if len(p1) >= 4:
        H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 20.0)
        print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
        kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]

        img2temp=img2.copy()
        img2temp = cv2.cvtColor(img2temp, cv2.COLOR_GRAY2BGR)
        H_inv = np.linalg.inv(H)
        leftnew_array = np.ones((3, 1), dtype=np.float64)
        leftnew_array[0][0]=843.53
        leftnew_array[1][0]=230.07
        leftnew_array_1 = np.dot(H, leftnew_array)
        leftnew_array_2 = leftnew_array_1/leftnew_array_1[2][0]

        rightnew_array = np.ones((3, 1), dtype=np.float64)
        rightnew_array[0][0]=966.61
        rightnew_array[1][0]=447.38
        rightnew_array_1 = np.dot(H, rightnew_array)
        rightnew_array_2 = rightnew_array_1/rightnew_array_1[2][0]

        cv2.rectangle(img2temp, (int(leftnew_array_2[0][0]+0.5), int(leftnew_array_2[1][0]+0.5)),
                      (int(rightnew_array_2[0][0]+0.5), int(rightnew_array_2[1][0]+0.5)), (0, 255, 0), 3)
        #GT
        cv2.rectangle(img2temp, (int(239.82 + 0.5), int(704.91 + 0.5)),
                      (int(389.82 + 0.5), int(985.34 + 0.5)), (0, 0, 255), 3)
        cv2.imwrite("./result_sift_rectangle.png", img2temp)

    else:
        H, status = None, None
        print('%d matches found, not enough for homography estimation' % len(p1))

    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    vis = np.zeros((h1+h2, w1), np.uint8)
    vis[:h1, :w1] = img1
    vis[h1:h1+h2, :w1] = img2
    vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)

    p1, p2 = [], []
    for kpp in kp_pairs:
        p1.append(np.int32(kpp[0].pt))
        p2.append(np.int32(np.array(kpp[1].pt) + [0, h1]))

    green = (0, 255, 0)
    for (x1, y1), (x2, y2) in zip(p1, p2):
        col = green
        r = 1
        thickness = 2
        cv2.line(vis, (int(x1), int(y1)), (int(x2), int(y2)), col, thickness)
    cv2.imwrite("./result_sift.png", vis)

3、ASIFT

# coding:utf-8
import cv2
import numpy as np

from contextlib import contextmanager
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency()

@contextmanager
def Timer(msg):
    print(msg, '...',)
    start = clock()
    try:
        yield
    finally:
        print("%.2f ms" % ((clock()-start)*1000))

FLANN_INDEX_KDTREE = 1  # bug: flann enums are missing
FLANN_INDEX_LSH    = 6
def init_feature(name):
    chunks = name.split('-')
    if chunks[0] == 'sift':
        detector = cv2.xfeatures2d.SIFT_create()
        norm = cv2.NORM_L2
    elif chunks[0] == 'surf':
        detector = cv2.xfeatures2d.SURF_create(800)
        norm = cv2.NORM_L2
    elif chunks[0] == 'orb':
        detector = cv2.ORB_create(400)
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'akaze':
        detector = cv2.AKAZE_create()
        norm = cv2.NORM_HAMMING
    elif chunks[0] == 'brisk':
        detector = cv2.BRISK_create()
        norm = cv2.NORM_HAMMING
    else:
        return None, None
    if 'flann' in chunks:
        if norm == cv2.NORM_L2:
            flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        else:
            flann_params= dict(algorithm = FLANN_INDEX_LSH,
                               table_number = 6, # 12
                               key_size = 12,     # 20
                               multi_probe_level = 1) #2
        matcher = cv2.FlannBasedMatcher(flann_params, {})  # bug : need to pass empty dict (#1329)
    else:
        matcher = cv2.BFMatcher(norm)
    return detector, matcher

def filter_matches(kp1, kp2, matches, ratio = 0.75):
    mkp1, mkp2 = [], []
    for m in matches:
        if len(m) == 2 and m[0].distance < m[1].distance * ratio:
            m = m[0]
            mkp1.append( kp1[m.queryIdx] )
            mkp2.append( kp2[m.trainIdx] )
    p1 = np.float32([kp.pt for kp in mkp1])
    p2 = np.float32([kp.pt for kp in mkp2])
    kp_pairs = zip(mkp1, mkp2)
    return p1, p2, list(kp_pairs)

def explore_match(img1, img2, kp_pairs, status = None, H = None):
    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    vis = np.zeros((h1 + h2, w1), np.uint8)
    vis[:h1, :w1] = img1
    vis[h1:h1 + h2, :w1] = img2
    vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)

    if status is None:
        status = np.ones(len(kp_pairs), np.bool_)
    p1, p2 = [], []  # python 2 / python 3 change of zip unpacking
    for kpp in kp_pairs:
        p1.append(np.int32(kpp[0].pt))
        p2.append(np.int32(np.array(kpp[1].pt) +  [0, h1]))

    green = (0, 255, 0)
    for (x1, y1), (x2, y2) in zip(p1, p2):
        col = green
        r = 1
        thickness = 2
        cv2.line(vis, (int(x1), int(y1)), (int(x2), int(y2)), col, thickness)
    cv2.imwrite("./result_asift.png", vis)

def affine_skew(tilt, phi, img, mask=None):
    '''
    affine_skew(tilt, phi, img, mask=None) -> skew_img, skew_mask, Ai
    Ai - is an affine transform matrix from skew_img to img
    '''
    h, w = img.shape[:2]
    if mask is None:
        mask = np.zeros((h, w), np.uint8)
        mask[:] = 255
    A = np.float32([[1, 0, 0], [0, 1, 0]])
    if phi != 0.0:
        phi = np.deg2rad(phi)
        s, c = np.sin(phi), np.cos(phi)
        A = np.float32([[c,-s], [ s, c]])
        corners = [[0, 0], [w, 0], [w, h], [0, h]]
        tcorners = np.int32( np.dot(corners, A.T) )
        x, y, w, h = cv2.boundingRect(tcorners.reshape(1,-1,2))
        A = np.hstack([A, [[-x], [-y]]])
        img = cv2.warpAffine(img, A, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
    if tilt != 1.0:
        s = 0.8*np.sqrt(tilt*tilt-1)
        img = cv2.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01)
        img = cv2.resize(img, (0, 0), fx=1.0/tilt, fy=1.0, interpolation=cv2.INTER_NEAREST)
        A[0] /= tilt
    if phi != 0.0 or tilt != 1.0:
        h, w = img.shape[:2]
        mask = cv2.warpAffine(mask, A, (w, h), flags=cv2.INTER_NEAREST)
    Ai = cv2.invertAffineTransform(A)
    return img, mask, Ai


def affine_detect(detector, img, mask=None):
    '''
    affine_detect(detector, img, mask=None, pool=None) -> keypoints, descrs
    Apply a set of affine transformations to the image, detect keypoints and
    reproject them into initial image coordinates.
    See http://www.ipol.im/pub/algo/my_affine_sift/ for the details.
    ThreadPool object may be passed to speedup the computation.
    '''
    hh, ww = img.shape[:2]
    params = [(1.0, 0.0)]
    for t in 2**(0.5*np.arange(1,6)):
        for phi in np.arange(0, 180, 72.0 / t):
            params.append((t, phi))

    keypointa_all, descrs_all = [], []

    for i, (k, d) in enumerate(params):
        t, phi = k, d
        timg, tmask, Ai = affine_skew(t, phi, img)
        #img_disp = cv2.bitwise_and(timg, timg, mask=tmask);

        keypoints, descrs = detector.detectAndCompute(timg, tmask)
        for kp in keypoints:
            x, y = kp.pt
            kp.pt = tuple( np.dot(Ai, (x, y, 1)) )
            # Out of bounds judgment
            if ((kp.pt[0]<0) or (kp.pt[1]<0) or (kp.pt[0] > ww-1) or (kp.pt[1] > hh-1)):
                if (kp.pt[0] < 0):
                    kp.pt = (0, kp.pt[1])
                if (kp.pt[1] < 0):
                    kp.pt = (kp.pt[0], 0)
                if (kp.pt[0] > ww-1):
                    kp.pt = (ww-1, kp.pt[1])
                if (kp.pt[1] > hh-1):
                    kp.pt = (kp.pt[0], hh-1)
        if descrs is None:
            descrs = []
        keypointa_all.extend(keypoints)
        descrs_all.extend(descrs)

    return keypointa_all, np.array(descrs_all)


with Timer('matching'):
    debug=1
    if debug==1:
        path1 = "./1_IMG_Texture_8Bit.png"
        path2 = "./3_IMG_Texture_8Bit.png"
    else:
        path1 = "./OtherSampleFrame_IMG_Texture_8Bit_48.png"
        path2 = "./OtherSampleFrame_IMG_Texture_8Bit_52.png"

    img1 = cv2.imread(path1, cv2.IMREAD_GRAYSCALE)
    img2 = cv2.imread(path2, cv2.IMREAD_GRAYSCALE)
    detector, matcher = init_feature('sift')

    kp1, desc1 = affine_detect(detector, img1)
    kp2, desc2 = affine_detect(detector, img2)
    print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)))

    raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2
    p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)
    if len(p1) >= 4:
        H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 50.0)
        print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
        # do not draw outliers (there will be a lot of them)
        kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]

        img2temp=img2.copy()
        img2temp = cv2.cvtColor(img2temp, cv2.COLOR_GRAY2BGR)
        #H_inv = np.linalg.inv(H)
        leftnew_array = np.ones((3, 1), dtype=np.float64)
        leftnew_array[0][0]=843.53
        leftnew_array[1][0]=230.07
        leftnew_array_1 = np.dot(H, leftnew_array)
        leftnew_array_2 = leftnew_array_1/leftnew_array_1[2][0]

        rightnew_array = np.ones((3, 1), dtype=np.float64)
        rightnew_array[0][0]=966.61
        rightnew_array[1][0]=447.38
        rightnew_array_1 = np.dot(H, rightnew_array)
        rightnew_array_2 = rightnew_array_1/rightnew_array_1[2][0]

        cv2.rectangle(img2temp, (int(leftnew_array_2[0][0]+0.5), int(leftnew_array_2[1][0]+0.5)),
                      (int(rightnew_array_2[0][0]+0.5), int(rightnew_array_2[1][0]+0.5)), (0, 255, 0), 3)
        #GT
        #cv2.rectangle(img2temp, (int(239.82 + 0.5), int(704.91 + 0.5)),
        #              (int(389.82 + 0.5), int(985.34 + 0.5)), (0, 0, 255), 3)
        cv2.imwrite("./result_asift_rectangle.png", img2temp)
        '''
        
        Mat H = findHomography(queryCoord, objectCoord, CV_RANSAC, 10, mask);
        Mat H_inv = H.inv();
        int inliers_cnt = 0, outliers_cnt = 0;
        
        for (unsigned i = 0; i < queryCoord.size(); i++) {
            Mat col = Mat::ones(3, 1, CV_64F);
            col.at<double>(0) = queryCoord[i].x;
            col.at<double>(1) = queryCoord[i].y;
        
            col = H * col;
            col /= col.at<double>(2); // 將[x*, y*, #] 轉換爲 [x*/#, y*/#, 1]
            double dist = sqrt(pow(col.at<double>(0) - objectCoord[i].x, 2) +
                pow(col.at<double>(1) - objectCoord[i].y, 2)); // 計算誤差-歐式距離
        
            if (dist < inlier_threshold) { // 誤差小於閾值
                queryInliers.push_back(queryCoord[i]);
                sceneInliers.push_back(objectCoord[i]);
                inliers_cnt++;
            }
        }


        歷史圖
        [
          843.5384615384614, 
          230.07692307692304
        ], 
        [
          966.6153846153845, 
          447.38461538461536
        ]
        
        當前圖
        [
          239.82608695652175, 
          704.9130434782609
        ], 
        [
          389.82608695652175, 
          985.3478260869565
        ]
        
        
        '''
    else:
        H, status = None, None
        print('%d matches found, not enough for homography estimation' % len(p1))
    explore_match(img1, img2, kp_pairs, None, H)

4、DenseSIFT

# coding:utf-8
import cv2
import numpy as np

from contextlib import contextmanager
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency()

@contextmanager
def Timer(msg):
    print(msg, '...',)
    start = clock()
    try:
        yield
    finally:
        print("%.2f ms" % ((clock()-start)*1000))

def filter_matches(kp1, kp2, matches, ratio = 0.75):
    mkp1, mkp2 = [], []
    for m in matches:
        if len(m) == 2 and m[0].distance < m[1].distance * ratio:
            m = m[0]
            mkp1.append( kp1[m.queryIdx] )
            mkp2.append( kp2[m.trainIdx] )
    p1 = np.float32([kp.pt for kp in mkp1])
    p2 = np.float32([kp.pt for kp in mkp2])
    kp_pairs = zip(mkp1, mkp2)
    return p1, p2, list(kp_pairs)

with Timer('matching'):
    debug=1
    if debug==1:
        path1 = "./1_IMG_Texture_8Bit.png"
        path2 = "./3_IMG_Texture_8Bit.png"
    else:
        path1 = "./OtherSampleFrame_IMG_Texture_8Bit_48.png"
        path2 = "./OtherSampleFrame_IMG_Texture_8Bit_52.png"

    img1 = cv2.imread(path1, cv2.IMREAD_GRAYSCALE)
    img2 = cv2.imread(path2, cv2.IMREAD_GRAYSCALE)

    sift = cv2.xfeatures2d.SIFT_create()
    # keypoint1, desc1 = sift.detectAndCompute(img1, None)
    # keypoint2, desc2 = sift.detectAndCompute(img2, None)

    rows1, cols1 = img1.shape[:2]
    rows2, cols2 = img2.shape[:2]
    initXyStep=6
    keypoint1 = []
    for lrow in range(6, rows1-6, 6):
        for lcol in range(6, cols1 - 6, 6):
            keypoint = cv2.KeyPoint(lcol, lrow, 6, _class_id=0)
            keypoint1.append(keypoint)

    keypoint2 = []
    for lrow in range(6, rows2-6, 6):
        for lcol in range(6, cols2 - 6, 6):
            keypoint = cv2.KeyPoint(lcol, lrow, 6, _class_id=0)
            keypoint2.append(keypoint)
    keypoint1, desc1 = sift.compute(img1, keypoint1)
    keypoint2, desc2 = sift.compute(img2, keypoint2)

    resultimg = img1.copy()
    resultimg=cv2.drawKeypoints(img1,keypoint1,resultimg,flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT)
    cv2.imwrite('denseSift_keypoints.png',resultimg)


    flann=1
    if flann==1:
        """
        FLANN是類似最近鄰的快速匹配庫
            它會根據數據本身選擇最合適的算法來處理數據
            比其他搜索算法快10倍
        """
        #原始是 FLANN_INDEX_KDTREE=0 trees=5
        FLANN_INDEX_KDTREE = 1
        indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=6)
        searchParams = dict(checks=50)
        flann = cv2.FlannBasedMatcher(indexParams, searchParams)
        matches = flann.knnMatch(desc1, desc2, k=2)
    else:
        bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=False)
        matches = bf.knnMatch(desc1, desc2, k=2)


    #是否按照ratio比率剔除
    if 1:
        p1, p2, kp_pairs = filter_matches(keypoint1, keypoint2, matches)
    else:
        mkp1, mkp2 = [], []
        for m in matches:
            m = m[0]
            mkp1.append( keypoint1[m.queryIdx] )
            mkp2.append( keypoint2[m.trainIdx] )
        p1 = np.float32([kp.pt for kp in mkp1])
        p2 = np.float32([kp.pt for kp in mkp2])
        kp_pairs = zip(mkp1, mkp2)
        kp_pairs = list(kp_pairs)

    if len(p1) >= 4:
        H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 20.0)
        print('%d / %d  inliers/matched' % (np.sum(status), len(status)))
        kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag]
    else:
        H, status = None, None
        print('%d matches found, not enough for homography estimation' % len(p1))

    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    vis = np.zeros((h1+h2, w1), np.uint8)
    vis[:h1, :w1] = img1
    vis[h1:h1+h2, :w1] = img2
    vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)

    p1, p2 = [], []
    for kpp in kp_pairs:
        p1.append(np.int32(kpp[0].pt))
        p2.append(np.int32(np.array(kpp[1].pt) + [0, h1]))

    green = (0, 255, 0)
    for (x1, y1), (x2, y2) in zip(p1, p2):
        col = green
        r = 1
        thickness = 2
        cv2.line(vis, (int(x1), int(y1)), (int(x2), int(y2)), col, thickness)
    cv2.imwrite("./result_denseSift.png", vis)

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章