sift開發記錄1

先上幾個有用的例子

1.

#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>

using namespace cv;
using namespace std;

int main()
{
    initModule_nonfree();//初始化模塊,使用SIFT或SURF時用到
    Ptr<FeatureDetector> detector = FeatureDetector::create( "SIFT" );//創建SIFT特徵檢測器
    Ptr<DescriptorExtractor> descriptor_extractor = DescriptorExtractor::create( "SIFT" );//創建特徵向量生成器
    Ptr<DescriptorMatcher> descriptor_matcher = DescriptorMatcher::create( "BruteForce" );//創建特徵匹配器
    if( detector.empty() || descriptor_extractor.empty() )
        cout<<"fail to create detector!";

    //讀入圖像
    Mat img1 = imread("phone2.jpg");
    Mat img2 = imread("phone3.jpg");

    //特徵點檢測
    double t = getTickCount();//當前滴答數
    vector<KeyPoint> keypoints1,keypoints2;
    detector->detect( img1, keypoints1 );//檢測img1中的SIFT特徵點,存儲到keypoints1中
    detector->detect( img2, keypoints2 );
    cout<<"圖像1特徵點個數:"<<keypoints1.size()<<endl;
    cout<<"圖像2特徵點個數:"<<keypoints2.size()<<endl;

    //輸出特徵點
    cout<<"圖像1的特徵點:"<<endl;
    for(int i=0;i<keypoints1.size();i++)
    {
        cout<<"座標:"<<keypoints1[i].pt;
        cout<<",鄰域直徑:"<<keypoints1[i].size;
        cout<<",方向:"<<keypoints1[i].angle;
        cout<<",octave:"<<keypoints1[i].octave;
        cout<<",id:"<<keypoints1[i].class_id<<endl;
    }

    //根據特徵點計算特徵描述子矩陣,即特徵向量矩陣
    Mat descriptors1,descriptors2;
    descriptor_extractor->compute( img1, keypoints1, descriptors1 );
    descriptor_extractor->compute( img2, keypoints2, descriptors2 );
    t = ((double)getTickCount() - t)/getTickFrequency();
    cout<<"SIFT算法用時:"<<t<<"秒"<<endl;

    cout<<"圖像1特徵描述矩陣大小:"<<descriptors1.size()
        <<",特徵向量個數:"<<descriptors1.rows<<",維數:"<<descriptors1.cols<<endl;
    cout<<"圖像2特徵描述矩陣大小:"<<descriptors2.size()
        <<",特徵向量個數:"<<descriptors2.rows<<",維數:"<<descriptors2.cols<<endl;

    //輸出特徵描述
    for(int i=0;i<128;i++)
        cout<<descriptors1.at<long>(0,i)<<",";

    //畫出特徵點
    Mat img_keypoints1,img_keypoints2;
    drawKeypoints(img1,keypoints1,img_keypoints1,Scalar::all(-1),0);
    drawKeypoints(img2,keypoints2,img_keypoints2,Scalar::all(-1),0);
    //imshow("Src1",img_keypoints1);
    //imshow("Src2",img_keypoints2);

    //特徵匹配
    vector<DMatch> matches;//匹配結果
    descriptor_matcher->match( descriptors1, descriptors2, matches );//匹配兩個圖像的特徵矩陣
    cout<<"Match個數:"<<matches.size()<<endl;

    //計算匹配結果中距離的最大和最小值
	//距離是指兩個特徵向量間的歐式距離,表明兩個特徵的差異,值越小表明兩個特徵點越接近
    double max_dist = 0;
    double min_dist = 100;
    for(int i=0; i<matches.size(); i++)
    {
        double dist = matches[i].distance;
        if(dist < min_dist) min_dist = dist;
        if(dist > max_dist) max_dist = dist;
    }
    cout<<"最大距離:"<<max_dist<<endl;
    cout<<"最小距離:"<<min_dist<<endl;

    //篩選出較好的匹配
    vector<DMatch> goodMatches;
    for(int i=0; i<matches.size(); i++)
    {
        if(matches[i].distance < 0.5 * max_dist)
        {
            goodMatches.push_back(matches[i]);
        }
    }
    cout<<"goodMatch個數:"<<goodMatches.size()<<endl;

    //輸出匹配結果
    for(int i=0; i<goodMatches.size(); i++)
    {
        cout<<"queryIdx:"<<matches[i].queryIdx;
        cout<<",trainIdx:"<<matches[i].trainIdx;
        cout<<",imgIdx:"<<matches[i].imgIdx;
        cout<<",歐氏距離:"<<matches[i].distance<<endl;
    }

    //畫出匹配結果
    Mat img_matches;
    //紅色連接的是匹配的特徵點對,綠色是未匹配的特徵點
    drawMatches(img1,keypoints1,img2,keypoints2,goodMatches,img_matches,
                Scalar::all(-1)/*CV_RGB(255,0,0)*/,CV_RGB(0,255,0),Mat(),2);

    imshow("MatchSIFT",img_matches);
    waitKey(0);
    return 0;
}

2.

LocalFeature.h

//	局部圖像特徵提取與匹配
//      Author:  www.icvpr.com
//	Blog  :  http://blog.csdn.net/icvpr
	
#ifndef _FEATURE_H_ 
#define _FEATURE_H_

#include <iostream>
#include <vector>
#include <string>

#include <opencv2/opencv.hpp>

using namespace cv;
using namespace std;

class Feature
{
public:
	Feature();
	~Feature();

	Feature(const string& detectType, const string& extractType, const string& matchType);

public:
	
	void detectKeypoints(const Mat& image, vector<KeyPoint>& keypoints);   // 檢測特徵點
	void extractDescriptors(const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptor);   // 提取特徵向量
	void bestMatch(const Mat& queryDescriptor, Mat& trainDescriptor, vector<DMatch>& matches);  // 最近鄰匹配
	void knnMatch(const Mat& queryDescriptor, Mat& trainDescriptor, vector<vector<DMatch>>& matches, int k);   // K近鄰匹配

	void saveKeypoints(const Mat& image, const vector<KeyPoint>& keypoints, const string& saveFileName = "");  // 保存特徵點
	void saveMatches(const Mat& queryImage,
			 const vector<KeyPoint>& queryKeypoints,
			 const Mat& trainImage,
			 const vector<KeyPoint>& trainKeypoints,
			 const vector<DMatch>& matches,
			 const string& saveFileName = "");   // 保存匹配結果到圖片中

private:
	Ptr<FeatureDetector> m_detector;
	Ptr<DescriptorExtractor> m_extractor;
	Ptr<DescriptorMatcher> m_matcher;

	string m_detectType;
	string m_extractType;
	string m_matchType;

};


#endif

LocalFeature.cpp

//  局部圖像特徵提取與匹配  
//  Author:  www.icvpr.com
//  Blog  :  http://blog.csdn.net/icvpr

#include "LocalFeature.h"

Feature::Feature()
{
	m_detectType = "SIFT";
	m_extractType = "SIFT";
	m_matchType = "FruteForce";
	initModule_nonfree(); 
}

Feature::~Feature()
{

}


Feature::Feature(const string& detectType, const string& extractType, const string& matchType)
{
	assert(!detectType.empty());
	assert(!extractType.empty());
	assert(!matchType.empty());

	m_detectType = detectType;
	m_extractType = extractType;
	m_matchType = matchType;
	initModule_nonfree(); 
}


void Feature::detectKeypoints(const Mat& image, std::vector<KeyPoint>& keypoints) 
{
	assert(image.type() == CV_8UC1);
	assert(!m_detectType.empty());

	keypoints.clear();
	m_detector = FeatureDetector::create(m_detectType);
	m_detector->detect(image, keypoints);

}



void Feature::extractDescriptors(const Mat& image, std::vector<KeyPoint>& keypoints, Mat& descriptor)
{
	assert(image.type() == CV_8UC1);
	assert(!m_extractType.empty());

	m_extractor = DescriptorExtractor::create(m_extractType);
	m_extractor->compute(image, keypoints, descriptor);

}


void Feature::bestMatch(const Mat& queryDescriptor, Mat& trainDescriptor, std::vector<DMatch>& matches) 
{
	assert(!queryDescriptor.empty());
	assert(!trainDescriptor.empty());
	assert(!m_matchType.empty());

	matches.clear();

	m_matcher = DescriptorMatcher::create(m_matchType);
	m_matcher->add(std::vector<Mat>(1, trainDescriptor));
	m_matcher->train();
	m_matcher->match(queryDescriptor, matches);

}


void Feature::knnMatch(const Mat& queryDescriptor, Mat& trainDescriptor, std::vector<std::vector<DMatch>>& matches, int k)
{
	assert(k > 0);
	assert(!queryDescriptor.empty());
	assert(!trainDescriptor.empty());
	assert(!m_matchType.empty());

	matches.clear();

	m_matcher = DescriptorMatcher::create(m_matchType);
	m_matcher->add(std::vector<Mat>(1, trainDescriptor));
	m_matcher->train();
	m_matcher->knnMatch(queryDescriptor, matches, k);

}



void Feature::saveKeypoints(const Mat& image, const vector<KeyPoint>& keypoints, const string& saveFileName)
{
	assert(!saveFileName.empty());

	Mat outImage;
	cv::drawKeypoints(image, keypoints, outImage, Scalar(255,255,0), DrawMatchesFlags::DRAW_RICH_KEYPOINTS );

	//
	string saveKeypointsImgName = saveFileName + "_" + m_detectType + ".jpg";
	imwrite(saveKeypointsImgName, outImage);

}



void Feature::saveMatches(const Mat& queryImage,
							const vector<KeyPoint>& queryKeypoints,
							const Mat& trainImage,
							const vector<KeyPoint>& trainKeypoints,
							const vector<DMatch>& matches,
							const string& saveFileName)
{
	assert(!saveFileName.empty());

	Mat outImage;
	cv::drawMatches(queryImage, queryKeypoints, trainImage, trainKeypoints, matches, outImage, 
				Scalar(255, 0, 0), Scalar(0, 255, 255), vector<char>(),  DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

	//
	string saveMatchImgName = saveFileName + "_" + m_detectType + "_" + m_extractType + "_" + m_matchType + ".jpg";
	imwrite(saveMatchImgName, outImage);
}

測試代碼, main.cpp


//  局部圖像特徵提取與匹配  
//  Author:  www.icvpr.com  
//  Blog  : http://blog.csdn.net/icvpr    
  
#include   
#include   
#include <opencv2/opencv.hpp>  
  
using namespace cv;  
using namespace std;  
  
#include "LocalFeature.h"  
  
int main(int argc, char** argv)  
{  
	if (argc != 6)  
	{  
		cout << "wrong usage!" << endl;  
		cout << "usage: .exe FAST SIFT BruteForce queryImage trainImage" << endl;  
		return -1;  
	}  
  
	string detectorType = argv[1];  
	string extractorType = argv[2];  
	string matchType = argv[3];  
	string queryImagePath = argv[4];  
	string trainImagePath = argv[5];  
	  
	Mat queryImage = imread(queryImagePath, CV_LOAD_IMAGE_GRAYSCALE);  
	if (queryImage.empty())  
	{  
		cout<<"read failed"<< endl;  
		return -1;  
	}  
	  
	Mat trainImage = imread(trainImagePath, CV_LOAD_IMAGE_GRAYSCALE);  
	if (trainImage.empty())  
	{  
		cout<<"read failed"<< endl;  
		return -1;  
	}  
	  
	Feature feature(detectorType, extractorType, matchType);  
	  
	vector queryKeypoints, trainKeypoints;  
	feature.detectKeypoints(queryImage, queryKeypoints);  
	feature.detectKeypoints(trainImage, trainKeypoints);  
	  
	Mat queryDescriptor, trainDescriptor;  
	  
	feature.extractDescriptors(queryImage, queryKeypoints, queryDescriptor);  
	feature.extractDescriptors(trainImage, trainKeypoints, trainDescriptor);  
	  
	vector matches;  
	feature.bestMatch(queryDescriptor, trainDescriptor, matches);  
	  
	vector<vector> knnmatches;  
	feature.knnMatch(queryDescriptor, trainDescriptor, knnmatches, 2);  
	  
	Mat outImage;  
	feature.saveMatches(queryImage, queryKeypoints, trainImage, trainKeypoints, matches, "../");  
	  
	return 0;  
}  



發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章