圖像拼接首要步驟就是對齊。對齊就要找到兩幅圖像相對的位置關係。爲了描述位置之間的變換關係,研究者引人了諸如平移,仿射,單應等變換模型。每個模型無所謂好壞,各有特定的適用範圍。
在其次座標系下,圖像位置之間的關係,或者說同名點座標之間關係,都可以用一個3×3的矩陣來表達。從平移到單應,這個變換矩陣的自由度逐步上升,靈活度增加,適用的場合變廣,但也導致求解出來的變換矩陣不太準確和穩定,意思是容易拼飛。所以,能夠用平移變換模型解決的問題,不見得使用單應變換矩陣更好。模型越緊,解越精確。
本篇博客使用單應變換模型,完成兩幅圖像的拼接。
單應矩陣的求解,按照“特徵檢測+特徵描述+特徵匹配+直接線性變換”的方法。
拼接對齊圖像使用OpenCV裏的warpPespective()
函數。
代碼實現:
#Homography類
將H矩陣(單應矩陣)的求解封裝進Homography
類中。
//Homography.h 類聲明文件
#pragma once
# include "opencv2/core/core.hpp"
# include "opencv2/features2d/features2d.hpp"
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/imgproc/imgproc.hpp"
#include"opencv2/nonfree/nonfree.hpp"
#include"opencv2/calib3d/calib3d.hpp"
#include<iostream>
using namespace cv;
using namespace std;
class Homography
{
private:
Mat img1;
Mat img2;
Ptr<FeatureDetector> detector;
Ptr<DescriptorExtractor> extractor;
Ptr<DescriptorMatcher> matcher;
vector<KeyPoint> keyPoints1;
vector<KeyPoint> keyPoints2;
Mat descriptors1;
Mat descriptors2;
vector<DMatch> firstMatches;
vector<DMatch> matches;
vector<Point2f> selfPoints1;
vector<Point2f> selfPoints2;
vector<uchar> inliers;
Mat homography;
public:
Homography();
Homography(Mat img1, Mat img2) ;
void setFeatureDetector(string detectorName);
void setDescriptorExtractor(string descriptorName);
void setDescriptorMatcher(string matcherName);
vector<KeyPoint> getKeyPoints1();
vector<KeyPoint> getKeyPoints2();
Mat getDescriptors1();
Mat getDescriptors2();
vector<DMatch> getMatches();
void drawMatches();
Mat getHomography();
~Homography();
private:
void detectKeyPoints();
void computeDescriptors();
void match();
void matchesToSelfPoints();
void findHomography();
void matchesFilter();
};
//Homography.cpp 類實現文件
#include "Homography.h"
Homography::Homography()
{
detector = new SIFT(800);
extractor = detector;
matcher = DescriptorMatcher::create("BruteForce");
}
Homography::Homography(Mat img1, Mat img2)
{
new(this) Homography();
this->img1 = img1;
this->img2 = img2;
}
void Homography::setFeatureDetector(string detectorName)
{
detector = FeatureDetector::create(detectorName);
}
void Homography::setDescriptorExtractor(string descriptorName)
{
extractor = DescriptorExtractor::create(descriptorName);
}
void Homography::setDescriptorMatcher(string matcherName)
{
matcher = DescriptorMatcher::create(matcherName);
}
vector<KeyPoint> Homography::getKeyPoints1()
{
if (keyPoints1.size() == 0)
{
detectKeyPoints();
}
return keyPoints1;
}
vector<KeyPoint> Homography::getKeyPoints2()
{
if (keyPoints2.size()==0)
{
detectKeyPoints();
}
return keyPoints2;
}
Mat Homography::getDescriptors1()
{
if (descriptors1.data == NULL)
{
computeDescriptors();
}
return descriptors1;
}
Mat Homography::getDescriptors2()
{
if (descriptors2.data == NULL)
{
computeDescriptors();
}
return descriptors2;
}
vector<DMatch> Homography::getMatches()
{
if (matches.size() == 0)
{
matchesFilter();
}
return matches;
}
Mat Homography::getHomography()
{
if (homography.data == NULL)
{
findHomography();
}
return homography;
}
void Homography::drawMatches()
{
Mat matchImage;
if (matches.size() == 0)
{
matchesFilter();
}
cv::drawMatches(img1, keyPoints1, img2, keyPoints2, matches, matchImage, 255, 255);
imshow("drawMatches", matchImage);
}
void Homography::detectKeyPoints()
{
detector->detect(img1, keyPoints1, Mat());
detector->detect(img2, keyPoints2, Mat());
}
void Homography::computeDescriptors()
{
if (keyPoints1.size() == 0 || keyPoints2.size() == 0)
{
detectKeyPoints();
}
extractor->compute(img1,keyPoints1,descriptors1);
extractor->compute(img2, keyPoints2, descriptors2);
}
void Homography::match()
{
if (descriptors1.data == NULL || descriptors2.data == NULL)
{
computeDescriptors();
}
matcher->match(descriptors1, descriptors2, firstMatches, Mat());
}
void Homography::matchesToSelfPoints()
{
for (vector<DMatch>::const_iterator it = firstMatches.begin(); it != firstMatches.end(); ++it)
{
selfPoints1.push_back(keyPoints1.at(it->queryIdx).pt);
selfPoints2.push_back(keyPoints2.at(it->trainIdx).pt);
}
}
void Homography::findHomography()
{
if (firstMatches.size()==0)
{
match();
}
if (selfPoints1.size()==0||selfPoints2.size()==0)
{
matchesToSelfPoints();
}
inliers=vector<uchar>(selfPoints1.size(),0);
homography = cv::findHomography(selfPoints1, selfPoints2, inliers, CV_FM_RANSAC, 1.0);
}
void Homography::matchesFilter()
{
if (0 == firstMatches.size())
{
findHomography();
}
vector<DMatch>::const_iterator itM = firstMatches.begin();
vector<uchar>::const_iterator itIn = inliers.begin();
for (; itIn != inliers.end(); ++itIn, ++itM)
{
if (*itIn)
{
matches.push_back(*itM);
}
}
}
Homography::~Homography()
{
}
#Homography類使用說明
創建類對象,需要指定兩幅輸入圖像:
Homography h12(img1,img2);
可以設定特徵檢測器、描述器,匹配器的類型。默認情況下,使用SIFT檢測和描述特徵,使用BruteForce算法匹配特徵。獲取更多可用的類型,可以參見OpenCV通用的程序接口。使用示例:
h12.setFeatureDetector("FAST");
h12.setDescriptorExtractor("SIFT");
h12.setDescriptorMatcher("BruteForce");
可以檢查各種中間量,比如檢測出的角點,描述子,匹配,以及畫出匹配。使用例子如下:
//獲取兩幅圖像的特徵點
vector<KeyPoint> keyPoints1=h12.getKeyPoints1();
vector<KeyPoint> keyPoints2=h12.getKeyPoints2();
//獲取描述子
Mat descriptors1=h12.getDescriptors1();
Mat descriptors2=h12.getDescriptors2();
//獲取匹配
vector<DMatch> matches=h12.getMatches();
//畫出帶有匹配連接線的圖像
h12.drawMatches();
可以直接獲取計算出的單應矩陣:
Mat h=h12.getHomography();
#使用warpPerspective()拼接
#include"Homography.h"
int main()
{
string imgPath1 = "trees_000.jpg";
string imgPath2 = "trees_001.jpg";
Mat img1 = imread(imgPath1, CV_LOAD_IMAGE_GRAYSCALE);
Mat img2 = imread(imgPath2, CV_LOAD_IMAGE_GRAYSCALE);
Homography homo12(img1,img2);
Mat h12 = homo12.getHomography();
Mat h21;
invert(h12, h21, DECOMP_LU);
Mat canvas;
Mat img1_color = imread(imgPath1, CV_LOAD_IMAGE_COLOR);
Mat img2_color = imread(imgPath2, CV_LOAD_IMAGE_COLOR);
warpPerspective(img2_color, canvas, h21, Size(img1.cols*2, img1.rows));
img1_color.copyTo(canvas(Range::all(), Range(0, img1.cols)));
imshow("canvas",canvas);
waitKey(0);
return 0;
}
trees_000.jpg
trees_001.jpg
canvas.jpg