C++(ALL) STL ATL Opencv 視覺圖像實踐進展記錄

目錄

 檢索文件夾中的圖片名——可用於批量讀取圖片

C++ 創建文件夾並檢測指定文件的個數

 C++ 檢測指定文件的個數

C++ 多線程

C++ 常見錯誤解決

"Error: "const char"類型的實參與"LPCWSTR"類型的形參不兼容",解決方法:

operator()

找數組最大最小值

圖像增強——RGB圖分離並均衡化 

圖像增強——RGB圖分離並對數log變換

圖像增強——濾波器

圖像增強——高反差

圖像增強——邊緣增強

形態學——膨脹腐蝕

圖像分割

圖像分割——OTSU閾值

圖像分割——Kmeans

圖像濾波

多種濾波器

輪廓

findContours

drawContours

 boundingRect

 minAreaRect

 輪廓子程序

圖像特徵提取思路

ATL::CImage讀取圖片

 ATL::CImage轉Mat

C++ 圖像處理——濾波

C++ Opencv HSV

C++ Opencv 雙邊濾波

C++ Opencv 圖像特徵( Opencv3)

C++ Opencv 圖像特徵brisk s

C++ Opencv hog+svm

 C++ Opencv 顏色、紋理、形狀+svm

主函數 

子程序

子函數 

C++ Opencv HU、MOM、GLCM

C++ Opencv hog+SVM(opencv3)

C++ Opencv  特徵AKAZE(opencv3.3.0)

C++ Opencv 矩形分割

 C++ vector容器打印

C++ FlyCapture相機

 C++ Modbus通信

C++ Opencv xml調用

 C++ Opencv xvid編碼錄像

 C++ Opencv glcm

C++ Opencv 同態濾波

C++ Opencv hsv的h直方圖

C++ Opencv HSV H、S、V直方圖

C++ ATL::CImage和Mat的轉換

C++ Opencv 圖像目標分割

C++ Opencv 特徵Feature.h{顏色、形狀、紋理}

C++ vector操作

C++ io頭文件進行zhi指定文件夾的文件名獲取+文件個數返回


 檢索文件夾中的圖片名——可用於批量讀取圖片

/*
#include <iostream>
using namespace std;
*/
std::string img_dir = "C:\\Users\\Administrator\\Desktop\\樣品\\目標分割結果\\";	
for (int i = 0; i < 6; i++){
	string pos;
	stringstream ss;
	ss << i;
	ss >> pos;
	string img_name = img_dir + "test" + pos + ".bmp";
	cout << img_name << endl;		
}
system("pause");

C++ 創建文件夾並檢測指定文件的個數

// 創建文件夾
string dirName = "save";
bool flag = CreateDirectory(dirName.c_str(), NULL);

//讀取指定文件夾的文件數目
cv::String pattern = "./save/*.bmp";//
size_t cout;
cout = read_images_in_folder(pattern);

// 保存圖片
char filesave[100];
cout++;//全局變量進行命名統一名稱累加保存
sprintf_s(filesave, "./save/save%d.bmp", cout);
imwrite(filesave, my_camera.src);//保存圖片到當前項目位置		

//讀取當前文件夾圖片的個數子程序
size_t read_images_in_folder(cv::String pattern)//讀取當前指定目錄的圖片的個數
{
	vector<cv::String> fn;
	glob(pattern, fn, false);//OpenCV自帶一個函數glob()可以遍歷文件
	size_t count = fn.size(); //number of png files in images folder	
	return count;
}

 C++ 檢測指定文件的個數

//讀取當前文件夾圖片的個數子程序
/*
cv::String pattern = "./save/*.bmp";
int cout = read_images_in_folder(pattern);
*/
size_t read_images_in_folder(cv::String pattern)//讀取當前指定目錄的圖片的個數
{
	vector<cv::String> fn;
	glob(pattern, fn, false);//OpenCV自帶一個函數glob()可以遍歷文件
	size_t count = fn.size(); //number of png files in images folder	
	return count;
}

C++ 多線程

#include <iostream>
#include <thread>//多線程
#include <mutex>//mutex是用來保證線程同步的,防止不同的線程同時操作同一個共享數據。但是使用mutex是不安全的,當一個線程在解鎖之前異常退出了,那麼其它被阻塞的線程就無法繼續下去。
using namespace std;


int cnt = 20;
mutex m;
void t1()
{
	while (cnt > 0)
	{
		lock_guard<mutex> lockGuard(m);//lock_guard則相對安全,它是基於作用域的,能夠自解鎖,當該對象創建時,它會像m.lock()一樣獲得互斥鎖,當生命週期結束時,它會自動析構(unlock),不會因爲某個線程異常退出而影響其他線程。
		if (cnt > 0)
		{
			--cnt;
			cout << cnt << endl;
		}

	}
}
void t2()
{
	while (cnt > 0)
	{
		lock_guard<mutex> lockGuard(m);
		if (cnt > 0)
		{
			--cnt;
			cout << "t2";
			cout << cnt << endl;
		}

	}
}
int main()
{

	thread th1(t1);
	thread th2(t2);

	th1.join();//線程執行後才執行下一個線程
	th2.join();

	system("pause");
	return 0;
}

C++ 常見錯誤解決

"Error: "const char"類型的實參與"LPCWSTR"類型的形參不兼容",解決方法:

項目菜單——項目屬性(最後一個)——配置屬性——常規——項目默認值——字符集,將使用Unicode字符集改爲未設置即可。

operator()

IntelliSense:  在沒有適當 operator() 的情況下調用類類型的對象或將函數轉換到指向函數的類型   

找數組最大最小值

// 計算最大最小值
/*
float p[10] = { 1, 20, 2.3, 3.3, 5, -0.22, 5, 40, 5, 4 };
int length = 10;
float max = 0;
float min = 0;	
feature_class.normalize_nolinear(p,length,max,min);
cout<<"max:"<<max<<endl;
cout<<"min:"<<min<<endl;
*/
void normalize_nolinear(float* p, int length, float& max, float& min)
{		
	max = p[0];
	min = p[0];
	for (int i = 0; i < length; i++)
	{
		if (max < p[i])
		{
			max = p[i];
		}
		if (min > p[i])
		{
			min = p[i];
		}
	}	
}

圖像增強

圖像增強——RGB圖分離並均衡化 

Mat src = imread(argv[i]);
Mat imageRGB[3];
split(src, imageRGB);
for (int i = 0; i < 3; i++)
{
	equalizeHist(imageRGB[i], imageRGB[i]);
}
merge(imageRGB, 3, src);

圖像增強——RGB圖分離並對數log變換

Mat image = imread(argv[i]);
Mat imageLog(image.size(), CV_32FC3);
for (int i = 0; i < image.rows; i++)
{
    for (int j = 0; j < image.cols; j++)
    {
        imageLog.at<Vec3f>(i, j)[0] = log(1 + image.at<Vec3b>(i, j)[0]);
        imageLog.at<Vec3f>(i, j)[1] = log(1 + image.at<Vec3b>(i, j)[1]);
        imageLog.at<Vec3f>(i, j)[2] = log(1 + image.at<Vec3b>(i, j)[2]);
    }
}
//歸一化到0~255  
normalize(imageLog, imageLog, 0, 255, CV_MINMAX);
//轉換成8bit圖像顯示  
convertScaleAbs(imageLog, imageLog);

圖像增強——濾波器

// 濾波器
void sharpenImage(const cv::Mat &image, cv::Mat &result)
{
	//創建並初始化濾波模板
	/*濾波核爲拉普拉斯核3x3:
	0 -1 0
	-1 5 -1
	0 -1 0
	*/
	cv::Mat kernel(3, 3, CV_32F, cv::Scalar(0));
	kernel.at<float>(0, 1) = -1.0;
	kernel.at<float>(1, 0) = -1.0;
	kernel.at<float>(1, 1) = 5.0;	
	kernel.at<float>(1, 2) = -1.0;
	kernel.at<float>(2, 1) = -1.0;

	result.create(image.size(), image.type());

	//對圖像進行濾波
	cv::filter2D(image, result, image.depth(), kernel);
}

圖像增強——高反差

Mat HighPass(Mat img)
{
	Mat temp;
	GaussianBlur(img, temp, Size(7, 7), 1.6, 1.6);

	int r = 3;
	Mat diff = img + r*(img - temp); //高反差保留算法
	return diff;
}

圖像增強——邊緣增強

void edgeEnhance(cv::Mat& srcImg, cv::Mat& dstImg)
{
	if (!dstImg.empty())
	{
		dstImg.release();
	}

	std::vector<cv::Mat> rgb;

	if (srcImg.channels() == 3)        // rgb image
	{
		cv::split(srcImg, rgb);
	}
	else if (srcImg.channels() == 1)   // gray image
	{
		rgb.push_back(srcImg);
	}

	// 分別對R、G、B三個通道進行邊緣增強
	for (size_t i = 0; i < rgb.size(); i++)
	{
		cv::Mat sharpMat8U;
		cv::Mat sharpMat;
		cv::Mat blurMat;

		// 高斯平滑
		cv::GaussianBlur(rgb[i], blurMat, cv::Size(3, 3), 0, 0);

		// 計算拉普拉斯
		cv::Laplacian(blurMat, sharpMat, CV_16S);

		// 轉換類型
		sharpMat.convertTo(sharpMat8U, CV_8U);
		cv::add(rgb[i], sharpMat8U, rgb[i]);
	}


	cv::merge(rgb, dstImg);
}

形態學——膨脹腐蝕

erode(hu_dst, hu_dst, getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1)));
dilate(hu_dst, hu_dst, getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1)));

圖像分割

圖像分割——OTSU閾值

//otsu閾值自動求取
double getThreshVal_Otsu(const cv::Mat& _src)
{
	cv::Size size = _src.size();
	if (_src.isContinuous())
	{
		size.width *= size.height;
		size.height = 1;
	}
	const int N = 256;
	int i, j, h[N] = { 0 };
	for (i = 0; i < size.height; i++)
	{
		const uchar* src = _src.data + _src.step*i;
		for (j = 0; j <= size.width - 4; j += 4)
		{
			int v0 = src[j], v1 = src[j + 1];
			h[v0]++; h[v1]++;
			v0 = src[j + 2]; v1 = src[j + 3];
			h[v0]++; h[v1]++;
		}
		for (; j < size.width; j++)
			h[src[j]]++;
	}

	double mu = 0, scale = 1. / (size.width*size.height);
	for (i = 0; i < N; i++)
		mu += i*h[i];

	mu *= scale;
	double mu1 = 0, q1 = 0;
	double max_sigma = 0, max_val = 0;

	for (i = 0; i < N; i++)
	{
		double p_i, q2, mu2, sigma;

		p_i = h[i] * scale;
		mu1 *= q1;
		q1 += p_i;
		q2 = 1. - q1;

		if (std::min(q1, q2) < FLT_EPSILON || std::max(q1, q2) > 1. - FLT_EPSILON)
			continue;

		mu1 = (mu1 + i*p_i) / q1;
		mu2 = (mu - q1*mu1) / q2;
		sigma = q1*q2*(mu1 - mu2)*(mu1 - mu2);
		if (sigma > max_sigma)
		{
			max_sigma = sigma;
			max_val = i;
		}
	}

	return max_val;
}

圖像分割——Kmeans

Mat Image_Kmeans(Mat src, int n)
{
	int width = src.cols;
	int height = src.rows;
	int dims = src.channels();

	// 初始化定義 
	int sampleCount = width*height;
	int clusterCount = n;//分幾類
	Mat points(sampleCount, dims, CV_32F, Scalar(10));
	Mat labels;
	Mat centers(clusterCount, 1, points.type());

	// 圖像RGB到數據集轉換 
	int index = 0;
	for (int row = 0; row < height; row++) {
		for (int col = 0; col < width; col++) {
			index = row*width + col;
			Vec3b rgb = src.at<Vec3b>(row, col);
			points.at<float>(index, 0) = static_cast<int>(rgb[0]);
			points.at<float>(index, 1) = static_cast<int>(rgb[1]);
			points.at<float>(index, 2) = static_cast<int>(rgb[2]);
		}
	}

	// 運行K-Means數據分類 
	TermCriteria criteria = TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 10, 1.0);
	kmeans(points, clusterCount, labels, criteria, 3, KMEANS_PP_CENTERS, centers);

	// 顯示圖像分割結果 
	Mat result = Mat::zeros(src.size(), CV_8UC3);
	for (int row = 0; row < height; row++) {
		for (int col = 0; col < width; col++) {
			index = row*width + col;
			int label = labels.at<int>(index, 0);
			if (label == 1) {
				result.at<Vec3b>(row, col)[0] = 255;
				result.at<Vec3b>(row, col)[1] = 0;
				result.at<Vec3b>(row, col)[2] = 0;
			}
			else if (label == 2){
				result.at<Vec3b>(row, col)[0] = 0;
				result.at<Vec3b>(row, col)[1] = 255;
				result.at<Vec3b>(row, col)[2] = 0;
			}
			else if (label == 3) {
				result.at<Vec3b>(row, col)[0] = 0;
				result.at<Vec3b>(row, col)[1] = 0;
				result.at<Vec3b>(row, col)[2] = 255;
			}
			else if (label == 0) {
				result.at<Vec3b>(row, col)[0] = 0;
				result.at<Vec3b>(row, col)[1] = 255;
				result.at<Vec3b>(row, col)[2] = 255;
			}
		}
	}
	return result;
}

圖像濾波

多種濾波器

https://blog.csdn.net/zoucharming/article/details/70197863

在圖像處理中,儘可能消除圖片中的噪聲,消除噪聲就需要用到濾波,在本次opencv學習中,學習了三個濾波方式。

(1)平均濾波,就是將一個區域內的像素值求和取平均值,然後用這個平均值替換區域中心的像素值。

blur(源Mat對象,目標Mat對象,Size對象,Point對象)//Size對象用來確定區域大小,Point對象如果x,y都是-1則表示更新區域中心的像素。

(2)高斯濾波,也是將一個區域的像素值求取平均值替換區域中心的像素值,但是是加權平均,權重按照二維正態分佈。

GaussianBlur(源Mat對象,目標Mat對象,Size對象,x方向正太分佈參數,y方向正太分佈參數)

(3)中值濾波,之前的兩個濾波都有個問題,如果區域中有極端值,很可能影響濾波效果,中值濾波採用區域中的中值來替換,有利於克服椒鹽噪聲。

medianBlur(源Mat對象,目標Mat對象,int size)//這裏的size表示正方形區域的邊長

(4)雙邊濾波,之前的濾波還有個問題,他們都會把輪廓給模糊了,有一些區域之間相差較大的像素,這往往能看出輪廓,所以如果我們給個限制範圍,如果兩點間的像素值差距大於這個範圍就不濾波了,保留圖像輪廓

bilateralFilter(源Mat對象,目標Mat對象,int 區域半徑,int 限制範圍,int space)//space是當區域半徑給的是0時,用來計算區域範圍的,一般情況下沒用,隨便給個數就行。

#include<opencv2\opencv.hpp>
#include<iostream>
#include<math.h>
#include<algorithm>
 
using namespace std;
using namespace cv;
 
int main()
{
	Mat src;
	src = imread("1.jpg", 1);
	if (src.empty())
	{
		printf("cannot load!!\n");
		return -1;
	}
	namedWindow("原圖");
	imshow("原圖", src);
	Mat dst,dst1;
	blur(src, dst, Size(3, 3), Point(-1, -1));
	namedWindow("均值濾波");
	imshow("均值濾波", dst);
	GaussianBlur(src, dst, Size(5, 5), 5, 5);
	namedWindow("高斯濾波");
	imshow("高斯濾波", dst);
	medianBlur(src, dst, 5);
	namedWindow("中值濾波");
	imshow("中值濾波", dst);
	bilateralFilter(src, dst, 5, 100, 3);
	namedWindow("雙邊濾波");
	imshow("雙邊濾波", dst);
 
	waitKey(0);
 
	return 0;
}

輪廓

findContours

Mat grayImage;//輸入處理後的二值圖
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
Mat showImage = Mat::zeros(grayImage.size(), CV_32SC1);
Mat showedge = Mat::zeros(grayImage.size(), CV_8UC1);
findContours(grayImage, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(-1, -1));

drawContours

drawContours(要繪製的圖、所有輪廓容器、輪廓編號、顏色填充、線寬);

for (size_t i = 0; i < contours.size(); i++)
{
	//這裏static_cast<int>(i+1)是爲了分水嶺的標記不同,區域1、2、3。。。。這樣才能分割
	drawContours(showImage, contours, static_cast<int>(i), Scalar::all(static_cast<int>(i + 1)), 2);//指定要繪製輪廓的編號
}

 boundingRect

// 矩形邊框 boundingRect
Rect rect = boundingRect(Mat(contours[i]));
rectangle(edge_dst_, rect, Scalar(theRNG().uniform(0, 255), theRNG().uniform(0, 255), theRNG().uniform(0, 255)), 3);

 minAreaRect

//最小外接邊框
RotatedRect box = minAreaRect(Mat(contours[i]));  //計算每個輪廓最小外接矩形
Point2f rect[4];
box.points(rect);  //把最小外接矩形四個端點複製給rect數組
float angle = box.angle;
for (int j = 0; j<4; j++)
{
	line(edge_dst_, rect[j], rect[(j + 1) % 4], Scalar(1), 2, 8);  //繪製最小外接矩形每條邊
}

 輪廓子程序

#include <opencv2/opencv.hpp>
#include <opencv2\core\core.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <iostream>


using namespace cv;
using namespace std;

void sear_contours(Mat src, Mat dst)
{
	//轉換爲灰度圖並平滑濾波
	cvtColor(src, src, COLOR_BGR2GRAY);

	// 找—— findContours
	vector<vector<Point>> contours;
	vector<Vec4i> hierarchy;
	findContours(src, contours, hierarchy, 2, 2);

	// 畫—— drawContours
	dst = Mat::zeros(src.size(), CV_8UC3);
	for (int i = 0; i < hierarchy.size(); i++)
	{		
		drawContours(dst, contours, i, Scalar(255, 255, 255), -1, 1, hierarchy);
		drawContours(dst, contours, i, Scalar(0, 0, 0), 10, 1, hierarchy);//比實際範圍變小,防止邊界影響檢測
	}

	// 矩—— 最小外接邊框 minAreaRect
	for (size_t i = 0; i < hierarchy.size(); i++)
	{
		
		RotatedRect box = minAreaRect(Mat(contours[i]));  //計算每個輪廓最小外接矩形
		Point2f rect[4];
		box.points(rect);  //把最小外接矩形四個端點複製給rect數組
		float angle = box.angle;
		for (int j = 0; j<4; j++)
		{
			line(dst, rect[j], rect[(j + 1) % 4], Scalar(rand() % 255, rand() % 255, rand() % 255), 2, 8);  //繪製最小外接矩形每條邊
		}
	}

	// 矩—— 矩形邊框  boundingRect
	for (size_t i = 0; i < hierarchy.size(); i++)
	{		
		Rect rect = boundingRect(Mat(contours[i]));
		rectangle(dst, rect, Scalar(theRNG().uniform(0, 255), theRNG().uniform(0, 255), theRNG().uniform(0, 255)), 3);
	}	
}
// 圖像增強——邊緣增強
		// 高斯平滑
		cv::GaussianBlur(dst, dst, cv::Size(3, 3), 0, 0);
	

		// 圖像增強——伽馬變換	
		Mat imageGamma(dst.size(), CV_32FC3);
		for (int i = 0; i < dst.rows; i++)
		{
			for (int j = 0; j < dst.cols; j++)
			{
				imageGamma.at<Vec3f>(i, j)[0] = (dst.at<Vec3b>(i, j)[0])*(dst.at<Vec3b>(i, j)[0])*(dst.at<Vec3b>(i, j)[0]);
				imageGamma.at<Vec3f>(i, j)[1] = (dst.at<Vec3b>(i, j)[1])*(dst.at<Vec3b>(i, j)[1])*(dst.at<Vec3b>(i, j)[1]);
				imageGamma.at<Vec3f>(i, j)[2] = (dst.at<Vec3b>(i, j)[2])*(dst.at<Vec3b>(i, j)[2])*(dst.at<Vec3b>(i, j)[2]);
			}
		}
		//歸一化到0~255  
		normalize(imageGamma, imageGamma, 0, 255, CV_MINMAX);
		//轉換成8bit圖像顯示  
		convertScaleAbs(imageGamma, imageGamma);
		//imshow("原圖", image);
		//imshow("伽馬變換圖像增強效果", imageGamma);
		dst = imageGamma;
// 圖像增強——邊緣增強
		// 高斯平滑
		//cv::GaussianBlur(dst, dst, cv::Size(3, 3), 0, 0);
		Mat temp;
		GaussianBlur(dst, temp, Size(3, 3), 1.6, 1.6);

		int r = 3;
		Mat diff = dst + r*(dst - temp); //高反差保留算法

		// 圖像增強——伽馬變換	
		Mat imageGamma(dst.size(), CV_32FC3);
		for (int i = 0; i < dst.rows; i++)
		{
			for (int j = 0; j < dst.cols; j++)
			{
				imageGamma.at<Vec3f>(i, j)[0] = (dst.at<Vec3b>(i, j)[0])*(dst.at<Vec3b>(i, j)[0])*(dst.at<Vec3b>(i, j)[0]);
				imageGamma.at<Vec3f>(i, j)[1] = (dst.at<Vec3b>(i, j)[1])*(dst.at<Vec3b>(i, j)[1])*(dst.at<Vec3b>(i, j)[1]);
				imageGamma.at<Vec3f>(i, j)[2] = (dst.at<Vec3b>(i, j)[2])*(dst.at<Vec3b>(i, j)[2])*(dst.at<Vec3b>(i, j)[2]);
			}
		}
		//歸一化到0~255  
		normalize(imageGamma, imageGamma, 0, 255, CV_MINMAX);
		//轉換成8bit圖像顯示  
		convertScaleAbs(imageGamma, imageGamma);
		//imshow("原圖", image);
		//imshow("伽馬變換圖像增強效果", imageGamma);
		dst = imageGamma;

圖像特徵提取思路

顏色:白平衡——同態濾波——亮度歸一

紋理:差分高通濾波器——直方圖均衡化——紋理加強

矩:仿射原理

ATL::CImage讀取圖片

#include <atlimage.h> //ATL
#include <iostream>
using namespace std;

// 圖片讀取\保存
void test()
{
	ATL::CImage Image;
	Image.Load(_T("條紋1.bmp"));
	if (Image.IsNull())
	{
		cout << "沒加載成功" << endl;
	}
	else
	{
		cout << "讀取成功" << endl;
		Image.Save(_T("image1.bmp"));
		cout << "通道(int):" << Image.GetBPP() / 8 << endl;
		cout << "寬(int):" << Image.GetWidth() << endl;
		cout << "高(int)" << Image.GetHeight() << endl;
		cout << "每行字節數(int):" << Image.GetPitch() << endl;
    }
}

 ATL::CImage轉Mat

//CImage 轉 Mat
void CImage2Mat(CImage& Image, Mat& src)
{
	// CImage 轉 Mat
	if (Image.IsNull())
	{
		cout << "沒加載成功" << endl;
		//MessageBox(_T("沒有加載成功"));
	}
	if (1 == Image.GetBPP() / 8)
	{
		src.create(Image.GetHeight(), Image.GetWidth(), CV_8UC1);
	}
	else if (3 == Image.GetBPP() / 8)
	{
		src.create(Image.GetHeight(), Image.GetWidth(), CV_8UC3);
	}

	//拷貝數據
	uchar* pucRow;								//指向 Mat 數據區的行指針
	uchar* pucImage = (uchar*)Image.GetBits();	//指向 CImage 數據區的指針
	int nStep = Image.GetPitch();//每行的字節數,注意這個返回值有正有負
	for (int nRow = 0; nRow < Image.GetHeight(); nRow++)
	{
		pucRow = (src.ptr<uchar>(nRow));
		for (int nCol = 0; nCol < Image.GetWidth(); nCol++)
		{
			if (1 == Image.GetBPP() / 8)
			{
				pucRow[nCol] = *(pucImage + nRow * nStep + nCol);
			}
			else if (3 == Image.GetBPP() / 8)
			{
				for (int nCha = 0; nCha < 3; nCha++)
				{
					pucRow[nCol * 3 + nCha] = *(pucImage + nRow * nStep + nCol * 3 + nCha);
				}
			}
		}
	}
}

C++ 圖像處理——濾波

#include <opencv2/opencv.hpp>
#include <opencv2\core\core.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\imgproc\imgproc.hpp>
#include <iostream>

using namespace cv;
using namespace std;

/*

邊緣突出
	用高通——拉普拉斯
	銳化處理

去噪點
	用低通——均值、中值


偏色問題解決
	用白平衡進行處理

【空域與頻域之間是有一定關係。均值濾波器是一種低通濾波;拉普拉斯算子濾波(邊緣檢測)是一種高通濾波】
*/

class Blur_Class
{
public:	
	/*
	雙邊濾波——保留紋理的濾波
	三通道圖,返回三通道圖
	Shuangbian_Bialteral_Filter(src, src);
	*/
	void Shuangbian_Bialteral_Filter(Mat src, Mat dst, int R = 10, double sigmaC = 20, double sigmaS = 20);

	/*
	高分差濾波——類似銳化增加噪聲點
	三通道圖,返回三通道圖
	HighPass(src, dst);
	*/
	void HighPass(Mat src, Mat dst, int r = 3, int k = 3);

	/*
	LOG變換——使整體圖片亮度均衡(強度大)
	三通道圖,返回三通道圖
	log_Filter(src, src);
	*/
	void log_Filter(Mat src, Mat dst);

	/*
	均衡化——一定程度上亮度均衡,增強邊緣強度
	輸入單通道、三通道
	junheng_equalizeHist(src, src);
	*/
	void junheng_equalizeHist(Mat src, Mat dst);

	/*
	中值濾波——可以去除多個零散密集的噪聲點
	輸入單通道、三通道圖
	zhongzhi_Median_Filter(src, src);
	*/
	void zhongzhi_Median_Filter(Mat src, Mat dst, int k = 10);

	/*
	膨脹腐蝕——默認膨脹
	輸入單通道、三通道
	PF_Filter(src, dst);
	*/
	void PF_Filter(Mat src, Mat dst, bool flag = true, int k = 1);

	/*
	自己定義卷積核
	0	-1	0
	-1	5	-1
	0	-1	0
	filter(dst2, dst2)
	*/
	void filter(Mat src, Mat dst, Mat kerne = (Mat_<char>(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0));


public:
	/* RGB分離 */
	void RGB_Split(Mat src, Mat  r = Mat(), Mat g = Mat(), Mat b = Mat(), Mat R = Mat(), Mat G = Mat(), Mat B = Mat());

	/*白平衡*/
	void baipingheng(Mat src, Mat dst);

public:
	/*
	lab
	*/
	void RGB2LAB(Mat& rgb, Mat& Lab);

	/*
	偏色檢驗
	*/
	float colorCheck(const Mat& imgLab);


};








/*——————————————————————————————————————————————————————————————*/

/*
雙邊濾波——保留紋理的濾波
三通道圖,返回三通道圖
Shuangbian_Bialteral_Filter(src, src);
*/
void Blur_Class::Shuangbian_Bialteral_Filter(Mat src, Mat dst, int R, double sigmaC, double sigmaS)
{
	Mat dst_;
	bilateralFilter(src, dst_, R, sigmaC, sigmaS);
	dst_.copyTo(dst);
}

/*
高反差濾波——類似銳化增加噪聲點——疊加邊緣後的效果圖 = 原圖 + r*(原圖-濾波)
三通道圖,返回三通道圖
HighPass(src, dst);
*/
void Blur_Class::HighPass(Mat src, Mat dst, int r, int k)
{
	Mat temp;
	int n = 2 * k + 1;
	Size ksize = Size(n, n);
	GaussianBlur(src, temp, ksize, 1.6, 1.6);
	Mat diff = src + r*(src - temp); //高反差保留算法
	diff.copyTo(dst);
}

/*
LOG變換——使整體圖片亮度均衡(強度大)
三通道圖,返回三通道圖
log_Filter(src, src);
*/
void Blur_Class::log_Filter(Mat src, Mat dst)
{
	Mat imageLog(src.size(), CV_32FC3);
	for (int i = 0; i < src.rows; i++)
	{
		for (int j = 0; j < src.cols; j++)
		{
			imageLog.at<Vec3f>(i, j)[0] = log(1 + src.at<Vec3b>(i, j)[0]);
			imageLog.at<Vec3f>(i, j)[1] = log(1 + src.at<Vec3b>(i, j)[1]);
			imageLog.at<Vec3f>(i, j)[2] = log(1 + src.at<Vec3b>(i, j)[2]);
		}
	}
	//歸一化到0~255  
	normalize(imageLog, imageLog, 0, 255, CV_MINMAX);
	//轉換成8bit圖像顯示  
	convertScaleAbs(imageLog, imageLog);
	imageLog.copyTo(dst);
}

/*
均衡化——一定程度上亮度均衡,增強邊緣強度
輸入單通道、三通道
junheng_equalizeHist(src, src);
*/
void Blur_Class::junheng_equalizeHist(Mat src, Mat dst)
{
	if (src.channels() == 3)
	{
		dst.create(src.size(), src.type());
		Mat imageRGB[3];
		split(src, imageRGB);
		for (int i = 0; i < 3; i++)
		{
			equalizeHist(imageRGB[i], imageRGB[i]);
		}
		merge(imageRGB, 3, dst);
	}
	else if (src.channels() == 1)
	{
		dst.create(src.size(), src.type());
		equalizeHist(src, dst);
	}
}

/*
中值濾波——可以去除多個零散密集的噪聲點
輸入單通道、三通道圖
zhongzhi_Median_Filter(src, src);
*/
void Blur_Class::zhongzhi_Median_Filter(Mat src, Mat dst, int k)
{
	int n = 2 * k + 1;
	dst.create(src.size(), src.type());
	medianBlur(src, dst, n);
}

/*
膨脹腐蝕——默認膨脹
輸入單通道、三通道
PF_Filter(src, dst);
*/
void Blur_Class::PF_Filter(Mat src, Mat dst, bool flag, int k)
{
	int n = 2 * k + 1;
	Size Ksize = Size(k, k);
	Mat element = getStructuringElement(MORPH_RECT, Ksize);
	if (flag == true)
	{
		// 進行膨脹操作
		dilate(src, dst, element);
	}
	else
	{
		// 進行腐蝕操作
		erode(src, dst, element);
	}
}

/*
自己定義卷積核
0	-1	0
-1	5	-1
0	-1	0
filter(dst2, dst2)
*/
void Blur_Class::filter(Mat src, Mat dst, Mat kerne)
{
	//Mat kerne = (Mat_<char>(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);                       // 生成一個掩模核 大小爲 3x3 , 通過<<  輸入到矩陣Mat_<char> 中,然後隱式轉換成Mat類型
	dst.create(src.size(), src.type());
	filter2D(src, dst, CV_8UC3, kerne);

}

/*
RGB
分離成r、g、b
合併成R、G、B
*/
void Blur_Class::RGB_Split(Mat src, Mat r, Mat g, Mat b, Mat R, Mat G, Mat B)
{
	Mat imageRGB[3];
	split(src, imageRGB);
	r.create(src.size(), src.type());
	g.create(src.size(), src.type());
	b.create(src.size(), src.type());
	b = imageRGB[0];
	g = imageRGB[1];
	r = imageRGB[2];
		
	Mat black = Mat::zeros(Size(src.cols, src.rows), CV_8UC1);

	vector<Mat> channels_r;
	channels_r.push_back(black);
	channels_r.push_back(black);
	channels_r.push_back(r);		
	merge(channels_r, R);

	vector<Mat> channels_g;
	channels_g.push_back(black);
	channels_g.push_back(g);
	channels_g.push_back(black);
	merge(channels_g, G);

	vector<Mat> channels_b;
	channels_b.push_back(b);
	channels_b.push_back(black);
	channels_b.push_back(black);
	merge(channels_b, B);
}

/*
白平衡
*/
void Blur_Class::baipingheng(Mat src, Mat dst)
{
	vector<Mat> imageRGB;
	split(src, imageRGB);

	//求原始圖像的RGB分量的均值
	double R, G, B;
	B = mean(imageRGB[0])[0];
	G = mean(imageRGB[1])[0];
	R = mean(imageRGB[2])[0];

	//需要調整的RGB分量的增益
	double KR, KG, KB;
	KB = (R + G + B) / (3 * B);
	KG = (R + G + B) / (3 * G);
	KR = (R + G + B) / (3 * R);

	//調整RGB三個通道各自的值
	imageRGB[0] = imageRGB[0] * KB;
	imageRGB[1] = imageRGB[1] * KG;
	imageRGB[2] = imageRGB[2] * KR;

	//RGB三通道圖像合併
	dst.create(src.size(), src.type());
	merge(imageRGB, dst);
	namedWindow("白平衡調整後", 0);
	imshow("白平衡調整後", dst);
	waitKey();

}



/*轉換成lab*/
void Blur_Class::RGB2LAB(Mat& rgb, Mat& Lab)
{
	Mat XYZ(rgb.size(), rgb.type());
	Mat_<Vec3b>::iterator begainRGB = rgb.begin<Vec3b>();
	Mat_<Vec3b>::iterator endRGB = rgb.end<Vec3b>();
	Mat_<Vec3b>::iterator begainXYZ = XYZ.begin<Vec3b>();
	int shift = 22;
	for (; begainRGB != endRGB; begainRGB++, begainXYZ++)
	{
		(*begainXYZ)[0] = ((*begainRGB)[0] * 199049 + (*begainRGB)[1] * 394494 + (*begainRGB)[2] * 455033 + 524288) >> (shift - 2);
		(*begainXYZ)[1] = ((*begainRGB)[0] * 75675 + (*begainRGB)[1] * 749900 + (*begainRGB)[2] * 223002 + 524288) >> (shift - 2);
		(*begainXYZ)[2] = ((*begainRGB)[0] * 915161 + (*begainRGB)[1] * 114795 + (*begainRGB)[2] * 18621 + 524288) >> (shift - 2);
	}

	int LabTab[1024];
	for (int i = 0; i < 1024; i++)
	{
		if (i>9)
			LabTab[i] = (int)(pow((float)i / 1020, 1.0F / 3) * (1 << shift) + 0.5);
		else
			LabTab[i] = (int)((29 * 29.0 * i / (6 * 6 * 3 * 1020) + 4.0 / 29) * (1 << shift) + 0.5);
	}
	const int ScaleLC = (int)(16 * 2.55 * (1 << shift) + 0.5);
	const int ScaleLT = (int)(116 * 2.55 + 0.5);
	const int HalfShiftValue = 524288;
	begainXYZ = XYZ.begin<Vec3b>();
	Mat_<Vec3b>::iterator endXYZ = XYZ.end<Vec3b>();
	Lab.create(rgb.size(), rgb.type());
	Mat_<Vec3b>::iterator begainLab = Lab.begin<Vec3b>();
	for (; begainXYZ != endXYZ; begainXYZ++, begainLab++)
	{
		int X = LabTab[(*begainXYZ)[0]];
		int Y = LabTab[(*begainXYZ)[1]];
		int Z = LabTab[(*begainXYZ)[2]];
		int L = ((ScaleLT * Y - ScaleLC + HalfShiftValue) >> shift);
		int A = ((500 * (X - Y) + HalfShiftValue) >> shift) + 128;
		int B = ((200 * (Y - Z) + HalfShiftValue) >> shift) + 128;
		(*begainLab)[0] = L;
		(*begainLab)[1] = A;
		(*begainLab)[2] = B;
	}
}
/*偏色檢測*/
float Blur_Class::colorCheck(const Mat& imgLab)
{
	Mat_<Vec3b>::const_iterator begainIt = imgLab.begin<Vec3b>();
	Mat_<Vec3b>::const_iterator endIt = imgLab.end<Vec3b>();
	float aSum = 0;
	float bSum = 0;
	for (; begainIt != endIt; begainIt++)
	{
		aSum += (*begainIt)[1];
		bSum += (*begainIt)[2];
	}
	int MN = imgLab.cols*imgLab.rows;
	double Da = aSum / MN - 128; // 必須歸一化到[-128,,127]範圍內    
	double Db = bSum / MN - 128;

	//平均色度
	double D = sqrt(Da*Da + Db*Db);

	begainIt = imgLab.begin<Vec3b>();
	double Ma = 0;
	double Mb = 0;
	for (; begainIt != endIt; begainIt++)
	{
		Ma += abs((*begainIt)[1] - 128 - Da);
		Mb += abs((*begainIt)[2] - 128 - Db);
	}
	Ma = Ma / MN;
	Mb = Mb / MN;
	//色度中心距
	double M = sqrt(Ma*Ma + Mb*Mb);
	//偏色因子
	float K = (float)(D / M);
	cout << "K=" << K << endl;
	return K;
}


C++ Opencv HSV

Opencv的HSV範圍:

C++ Opencv 雙邊濾波

轉:

雙邊濾波是一種非線性濾波器,它可以達到保持邊緣、降噪平滑的效果。和其他濾波原理一樣,雙邊濾波也是採用加權平均的方法,用周邊像素亮度值的加權平均代表某個像素的強度,所用的加權平均基於高斯分佈。最重要的是,雙邊濾波的權重不僅考慮了像素的歐氏距離(如普通的高斯低通濾波,只考慮了位置對中心像素的影響),還考慮了像素範圍域中的輻射差異(例如卷積核中像素與中心像素之間相似程度、顏色強度,深度距離等),在計算中心像素的時候同時考慮這兩個權重。

高斯濾波:

雙邊濾波:

 

g(i, j)代表輸出點;
S(i, j)的是指以(i,j)爲中心的(2N+1)(2N+1)的大小的範圍;
f(k, l)代表(多個)輸入點;
w(i, j, k, l)代表經過兩個高斯函數計算出的值(這裏還不是權值)

上述公式我們進行轉化,假設公式中w(i,j,k,l)爲m,則有

 設 m1+m2+m3 … +mn = M,則有
這裏寫圖片描述

w(i, j, k, l):ws爲空間臨近高斯函數,wr爲像素值相似度高斯函數


這裏寫圖片描述
這裏寫圖片描述

這裏寫圖片描述

雙邊濾波的核函數是空間域核(空間域(spatial domain S))與像素範圍域核(像素範圍域(range domain R))的綜合結果:在圖像的平坦區域,像素值變化很小,對應的像素範圍域權重接近於1,此時空間域權重起主要作用,相當於進行高斯模糊;在圖像的邊緣區域,像素值變化很大,像素範圍域權重變大,從而保持了邊緣的信息。

算法實現:

void bilateralFilter(src, dst, d, sigmaColor, sigmaSpace, BORDER_DEFAULT)
/*
. InputArray src: 輸入圖像,可以是Mat類型,圖像必須是8位或浮點型單通道、三通道的圖像。 
. OutputArray dst: 輸出圖像,和原圖像有相同的尺寸和類型。 
. int d: (直徑範圍)表示在過濾過程中每個像素鄰域的直徑範圍。如果這個值是非正數,則函數會從第五個參數sigmaSpace計算該值。 
. double sigmaColor:(sigma顏色) 顏色空間過濾器的sigma值,這個參數的值月大,表明該像素鄰域內有月寬廣的顏色會被混合到一起,產生較大的半相等顏色區域。 
. double sigmaSpace:(sigma空間) 座標空間中濾波器的sigma值,如果該值較大,則意味着顏色相近的較遠的像素將相互影響,從而使更大的區域中足夠相似的顏色獲取相同的顏色。當d>0時,d指定了鄰域大小且與sigmaSpace五官,否則d正比於sigmaSpace. 
. int borderType=BORDER_DEFAULT: 用於推斷圖像外部像素的某種邊界模式,有默認值BORDER_DEFAULT.
*/
bilateralFilter(src, dst, 10, 10, 10);

C++ Opencv 圖像特徵( Opencv3)

// 特徵點
void FeatureAndCompare(cv::Mat srcImage1)
	{
		CV_Assert(srcImage1.data != NULL);
		Mat c_src1 = srcImage1.clone();

		// 轉換爲灰度
		cv::Mat grayMat1;
		cv::cvtColor(srcImage1, grayMat1, CV_RGB2GRAY);

		//// 加強
		equalizeHist(grayMat1, grayMat1);
		//// 銳化
		sharpenImage1(grayMat1, grayMat1);

		//
		cv::Ptr<cv::BRISK> ptrBrisk = cv::BRISK::create();
		vector<KeyPoint> kp1;
		Mat des1;//descriptor  
		ptrBrisk->detectAndCompute(grayMat1, Mat(), kp1, des1);

		Mat res1;
		int drawmode = DrawMatchesFlags::DRAW_RICH_KEYPOINTS;
		drawKeypoints(c_src1, kp1, res1, Scalar::all(-1), drawmode);//畫出特徵點  
		//
		//std::cout << "size of description of Img1: " << kp1.size() << endl;
		//	
		namedWindow("drawKeypoints", 0);
		imshow("drawKeypoints", c_src1);
		cvWaitKey(0);
	}
//brisk
	void brisk_feature(Mat src1, Mat src2)
	{
		cv::cvtColor(src1, src1, CV_RGB2GRAY);
		cv::cvtColor(src2, src2, CV_RGB2GRAY);
		// 加強
		equalizeHist(src1, src1);
		equalizeHist(src2, src2);
		// 銳化
		//sharpenImage1(src1, src1);
		//sharpenImage1(src2, src2);

		Ptr<BRISK> brisk = BRISK::create();
		vector<KeyPoint>keypoints1, keypoints2;
		Mat descriptors1, descriptors2;
		brisk->detectAndCompute(src1, Mat(), keypoints1, descriptors1);
		brisk->detectAndCompute(src2, Mat(), keypoints2, descriptors2);
		Mat dst1,dst2;
		drawKeypoints(src1, keypoints1, dst1);
		drawKeypoints(src2, keypoints2, dst2);
		namedWindow("output1", 0);
		imshow("output1", dst1);
		waitKey();
		namedWindow("output2", 0);
		imshow("output2", dst2);
		waitKey();

		BFMatcher matcher;
		vector<DMatch>matches;
		matcher.match(descriptors1, descriptors2, matches);
		Mat match_img;
		drawMatches(src1, keypoints1, src2, keypoints2, matches, match_img);
		namedWindow("match_img", 0);
		imshow("match_img", match_img);

		double minDist = 1000;
		for (int i = 0; i < descriptors1.rows; i++)
		{
			double dist = matches[i].distance;
			if (dist < minDist)
			{
				minDist = dist;
			}
		}
		printf("min distance is:%f\n", minDist);

		vector<DMatch>goodMatches;
		for (int i = 0; i < descriptors1.rows; i++)
		{
			double dist = matches[i].distance;
			if (dist < max(1.8*minDist, 0.02))
			{
				goodMatches.push_back(matches[i]);
			}
		}
		Mat good_match_img;
		drawMatches(src1, keypoints1, src2, keypoints2, goodMatches, good_match_img, Scalar::all(-1), Scalar::all(-1), vector<char>(), 2);
		namedWindow("goodMatch", 0);
		imshow("goodMatch", good_match_img);
		waitKey(0);
}

C++ Opencv 圖像特徵brisk s

#include<opencv2\opencv.hpp>
#include<opencv2\xfeatures2d.hpp>
using namespace cv;
using namespace xfeatures2d;
using namespace std;
 
int main(int arc, char** argv) { 
	Mat src1 = imread("1.png",IMREAD_GRAYSCALE);
	Mat src2 = imread("2.png",IMREAD_GRAYSCALE);
	namedWindow("input", CV_WINDOW_AUTOSIZE);
	imshow("input", src1);
 
	Ptr<BRISK> brisk = BRISK::create();
	vector<KeyPoint>keypoints1, keypoints2;
	Mat descriptors1, descriptors2;
	brisk->detectAndCompute(src1, Mat(), keypoints1, descriptors1);
	brisk->detectAndCompute(src2, Mat(), keypoints2, descriptors2);
	/*Mat dst1;
	drawKeypoints(src1, keypoints1, dst1);
	imshow("output1", dst1);*/
 
	BFMatcher matcher;
	vector<DMatch>matches;
	matcher.match(descriptors1, descriptors2, matches);
	Mat match_img;
	drawMatches(src1, keypoints1, src2, keypoints2, matches, match_img);
	imshow("match_img", match_img);
	
	double minDist = 1000;
	for (int i = 0; i < descriptors1.rows; i++)
	{
		double dist = matches[i].distance;
		if (dist < minDist)
		{
			minDist = dist;
		}
	}
	printf("min distance is:%f\n", minDist);
 
	vector<DMatch>goodMatches;
	for (int i = 0; i < descriptors1.rows; i++)
	{
		double dist = matches[i].distance;
		if (dist < max( 1.8*minDist, 0.02))
		{
			goodMatches.push_back(matches[i]);
		}
	}
	Mat good_match_img;
	drawMatches(src1, keypoints1, src2, keypoints2, goodMatches, good_match_img, Scalar::all(-1), Scalar::all(-1), vector<char>(), 2);
	imshow("goodMatch", good_match_img);
 
	vector<Point2f>src1GoodPoints, src2GoodPoints;
	for (int i = 0; i < goodMatches.size(); i++)
	{
		src1GoodPoints.push_back(keypoints1[goodMatches[i].queryIdx].pt);
		src2GoodPoints.push_back(keypoints2[goodMatches[i].trainIdx].pt);
	}
	Mat P = findHomography(src1GoodPoints, src2GoodPoints, RANSAC);
	vector<Point2f> src1corner(4);
	vector<Point2f> src2corner(4);
	src1corner[0] = Point(0, 0);
	src1corner[1] = Point(src1.cols, 0);
	src1corner[2] = Point(src1.cols, src1.rows);
	src1corner[3] = Point(0, src1.rows);
	perspectiveTransform(src1corner, src2corner, P);
	line(good_match_img, src2corner[0] + Point2f(src1.cols, 0), src2corner[1] + Point2f(src1.cols, 0), Scalar(0, 0, 255), 2);
	line(good_match_img, src2corner[1] + Point2f(src1.cols, 0), src2corner[2] + Point2f(src1.cols, 0), Scalar(0, 0, 255), 2);
	line(good_match_img, src2corner[2] + Point2f(src1.cols, 0), src2corner[3] + Point2f(src1.cols, 0), Scalar(0, 0, 255), 2);
	line(good_match_img, src2corner[3] + Point2f(src1.cols, 0), src2corner[0] + Point2f(src1.cols, 0), Scalar(0, 0, 255), 2);
	imshow("result", good_match_img);
	waitKey(0);
	return 0;
}

 

C++ Opencv hog+svm

思路: 

// HOG描述子向量
std::vector<float> descriptors;
cv::HOGDescriptor hog(cv::Size(48, 48), cv::Size(16, 16), cv::Size(8, 8), cv::Size(8, 8), 9);
hog.compute(src, descriptors, cv::Size(8, 8));
int DescriptorDim = descriptors.size();
// SVM 樣本+標籤
int num;
Mat sampleFeatureMat = cv::Mat::zeros(num, DescriptorDim, CV_32FC1);
int i, j;//i爲樣本序列,j爲樣本特徵值
sampleFeatureMat.at<float>(i, j) = descriptors[j];
Mat sampleLabelMat;
int label;
sampleLabelMat.at<float>(i, 0) = label;

 C++ Opencv 顏色、紋理、形狀+svm

主函數 

#include <opencv2/opencv.hpp>
#include <opencv2\core\core.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\imgproc\imgproc.hpp>
#include <iostream>

#include "Water_Cut.h"
#include "Feature.h"


using namespace cv;
using namespace std;
using namespace cv::ml;

Feature feature_class;

int main()
{	
// 訓練——訓練時候關閉測試
#if 0
	// 遍歷圖片——爲了循環提取特徵用
	string train_img_dir = "C:\\Users\\Administrator\\Desktop\\樣品\\訓練";
	string train_img_namehead = "test";
	string train_img_type = "bmp";
	//size_t train_img_num = 4;
	string img_name = train_img_dir + "\\" + "*." + train_img_type;	//cout << img_name << endl;
	int train_img_num = feature_class.read_images_in_folder(img_name);
	cout << "訓練圖個數:" << train_img_num << endl;

	// 訓練用的輸入和標籤
	Mat trainmat;
	trainmat = cv::Mat::zeros(train_img_num, 32, CV_32FC1);
	Mat labelmat;
	labelmat = cv::Mat::zeros(train_img_num, 1, CV_32SC1);

	// 遍歷圖並提取特徵
	vector<Mat> train_img = feature_class.data_search(train_img_dir, train_img_namehead, train_img_type, train_img_num);
	for (size_t i = 0; i < train_img_num; i++)
	{
		resize(train_img[i], train_img[i], Size(train_img[i].cols / 2, train_img[i].rows / 2));
		namedWindow("vetmat", 0);
		imshow("vetmat", train_img[i]);//train_img[i].clone();
		waitKey(0);

		// 圖像分割
		Mat src = train_img[i].clone();
		Mat dst = Mat::zeros(train_img[i].size(), CV_8UC3);
		Mat edge = Mat::zeros(train_img[i].size(), CV_8UC3);
		Water_Cut(src, dst, edge);

		// 圖像特徵_HU
		Mat hu_dst = dst.clone();
		double Hu[7] = { 0 };
		feature_class.feature_hu(hu_dst, Hu);

		// 圖像特徵_COLOR
		Mat color_dst = dst.clone();
		float Mom[9] = { 0 };
		feature_class.feature_color(color_dst, Mom);

		// 圖像特徵_GLCM
		Mat glcm_dst = dst.clone();
		cv::cvtColor(glcm_dst, glcm_dst, CV_RGB2GRAY);
		float glcm_data[16] = { 0 };
		feature_class.feature_glcm(glcm_dst, glcm_data);

		float train_data[32] = { 0 };

		for (size_t j = 0; j < 7; j++)
		{
			train_data[j] = (float)Hu[j];

		}
		for (size_t j = 0; j < 9; j++)
		{
			train_data[7 + j] = (float)Mom[j];

		}
		for (size_t j = 0; j < 16; j++)
		{
			train_data[16 + j] = (float)glcm_data[j];
		}
		


		vector<float> traindata; //	特徵值——一類(一張圖)的特徵
		for (size_t k = 0; k < 32; k++)
		{
			traindata.push_back(train_data[k]);
		}		
		std::cout << "traindata size:";
		std::cout << traindata.size() << endl;

		
		for (size_t j = 0; j < traindata.size(); j++)
		{
			trainmat.at<float>(i, j) = traindata[j];
		}
		labelmat.at<int>(i, 0) = i + 1;	//每張一類
	}
		
	// 訓練的初始化
	Ptr<SVM> svm = SVM::create();
	svm->setType(SVM::C_SVC);
	svm->setKernel(SVM::LINEAR);
	svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6));
	std::cout << "開始訓練" << endl;
	svm->train(trainmat, ROW_SAMPLE, labelmat);

	std::cout << "開始結束" << endl;
	svm->save("svm.xml");
#endif

// 測試——測試時候關閉訓練
#if 1 

	// 遍歷測試文件
	// 遍歷圖片——爲了循環提取特徵用
	//string test_img_dir = "C:\\Users\\Administrator\\Desktop\\樣品\\測試\\方格1號";
	//string test_img_dir = "C:\\Users\\Administrator\\Desktop\\樣品\\測試\\花紋2號";
	//string test_img_dir = "C:\\Users\\Administrator\\Desktop\\樣品\\測試\\空紋理3號";
	//string test_img_dir = "C:\\Users\\Administrator\\Desktop\\樣品\\測試\\條紋4號";
	string test_img_dir = "C:\\Users\\Administrator\\Desktop\\樣品\\測試\\";

	string test_img_namehead = "test";
	string test_img_type = "bmp";
	string img_name = test_img_dir + "\\" + "*." + test_img_type;	//cout << img_name << endl;
	int test_img_num = feature_class.read_images_in_folder(img_name);
	std::cout << "測試圖個數:" << test_img_num << endl;

	// 訓練用的輸入和標籤
	Mat testmat;
	testmat = cv::Mat::zeros(test_img_num, 32, CV_32F);

	// 遍歷圖並提取特徵
	vector<Mat> test_img = feature_class.data_search(test_img_dir, test_img_namehead, test_img_type, test_img_num);
	for (size_t i = 0; i < test_img_num; i++)
	{
		resize(test_img[i], test_img[i], Size(test_img[i].cols / 2, test_img[i].rows / 2));
		cv::namedWindow("vetmat", 0);
		cv::imshow("vetmat", test_img[i]);//test_img[i].clone();		

		// 圖像分割
		Mat src = test_img[i].clone();
		Mat dst = Mat::zeros(test_img[i].size(), CV_8UC3);
		Mat edge = Mat::zeros(test_img[i].size(), CV_8UC3);
		Water_Cut(src, dst, edge);

		// 圖像特徵_HU
		Mat hu_dst = dst.clone();
		double Hu[7] = { 0 };
		feature_class.feature_hu(hu_dst, Hu);

		// 圖像特徵_COLOR
		Mat color_dst = dst.clone();
		float Mom[9] = { 0 };
		feature_class.feature_color(color_dst, Mom);

		// 圖像特徵_GLCM
		Mat glcm_dst = dst.clone();
		cv::cvtColor(glcm_dst, glcm_dst, CV_RGB2GRAY);
		float glcm_data[16] = { 0 };
		feature_class.feature_glcm(glcm_dst, glcm_data);

		cv::waitKey();
		float test_data[32] = { 0 };

		for (size_t j = 0; j < 7; j++)
		{
			test_data[j] = (float)Hu[j];

		}
		for (size_t j = 0; j < 9; j++)
		{
			test_data[7 + j] = (float)Mom[j];

		}
		for (size_t j = 0; j < 16; j++)
		{
			test_data[16 + j] = (float)glcm_data[j];
		}

		vector<float> testdata; //	特徵值——一類(一張圖)的特徵
		for (size_t k = 0; k < 32; k++)
		{
			testdata.push_back(test_data[k]);
		}
		std::cout << "testdata size:";
		std::cout << testdata.size() << endl;


		for (size_t j = 0; j < testdata.size(); j++)
		{
			testmat.at<float>(i, j) = testdata[j];
		}
		
	}
	Ptr<SVM> svmtest = Algorithm::load<SVM>("svm.xml"); // SVM::load()是一個靜態函數,不能單獨用
	Mat result;
	float temp = svmtest->predict(testmat, result);
	std::cout << "分類結果" << endl;
	std::cout << result << endl;
	for (size_t i = 0; i < test_img_num; i++)
	{
		int a = result.at<Point2f>(i, 0).x;
		std::cout << "最終分類爲:" << "第" << a << "號瓷磚" << endl;
	}
#endif
	
	system("pause");
	return 0;
}

子程序

#include <opencv2/opencv.hpp>
#include <iostream>
#include <vector>
#include "time.h"

using namespace cv;
using namespace std;
using namespace cv::ml;

class Feature
{
public:

	/*
	第一步:建立類
	#include <opencv2/opencv.hpp>
	#include <iostream>
	#include <vector>
	#include "time.h"

	using namespace cv;
	using namespace std;
	
	第二步:包含類
	Feature feature_class;

	第三步:
	集合顏色+形狀+紋理
	// 圖像特徵_HU
	Mat hu_dst = dst.clone();
	double Hu[7] = { 0 };
	feature_class.feature_hu(hu_dst, Hu);

	// 圖像特徵_COLOR
	Mat color_dst = dst.clone();
	float Mom[9] = { 0 };
	feature_class.feature_color(color_dst, Mom);

	// 圖像特徵_GLCM
	Mat glcm_dst = dst.clone();
	cv::cvtColor(glcm_dst, glcm_dst, CV_RGB2GRAY);
	float glcm_data[16] = { 0 };
	feature_class.feature_glcm(glcm_dst, glcm_data);

	第四步:
	// 特徵集合7+9+16
	float test_data[32] = { 0 };
	for (size_t j = 0; j < 7; j++)
	{
	test_data[j] = (float)Hu[j];

	}
	for (size_t j = 0; j < 9; j++)
	{
	test_data[7 + j] = (float)Mom[j];

	}
	for (size_t j = 0; j < 16; j++)
	{
	test_data[16 + j] = (float)glcm_data[j];
	}

	*/
	/* 【顏色】 */
	// 顏色 計算三階矩
	double calc3orderMom(Mat &channel)  //計算三階矩
	{
		uchar *p;
		double mom = 0;
		double m = mean(channel)[0];    //計算單通道圖像的均值
		int nRows = channel.rows;
		int nCols = channel.cols;
		if (channel.isContinuous())     //連續存儲有助於提升圖像掃描速度
		{
			nCols *= nRows;
			nRows = 1;
		}
		for (int i = 0; i < nRows; i++) //計算立方和
		{
			p = channel.ptr<uchar>(i);
			for (int j = 0; j < nCols; j++)
				mom += pow((p[j] - m), 3);
		}
		float temp;
		temp = cvCbrt((float)(mom / (nRows*nCols)));    //求均值的立方根
		mom = (double)temp;
		return mom;
	}
	// 顏色 計算9個顏色矩:3個通道的1、2、3階矩
	double *colorMom(Mat &img)
	{
		double *Mom = new double[9];    //存放9個顏色矩
		if (img.channels() != 3)
			std::cout << "Error,input image must be a color image" << endl;
		Mat b(img.rows, img.cols, CV_8U);
		Mat r(img.rows, img.cols, CV_8U);
		Mat g(img.rows, img.cols, CV_8U);
		Mat channels[] = { b, g, r };
		split(img, channels);
		//cv::imshow("r", channels[0]);
		//cv::imshow("g", channels[1]);
		//cv::imshow("b", channels[2]);
		//waitKey(0);
		Mat tmp_m, tmp_sd;
		//計算b通道的顏色矩
		meanStdDev(b, tmp_m, tmp_sd);
		Mom[0] = tmp_m.at<double>(0, 0);
		Mom[3] = tmp_sd.at<double>(0, 0);
		Mom[6] = calc3orderMom(b);
		//  cout << Mom[0] << " " << Mom[1] << " " << Mom[2] << " " << endl;
		//計算g通道的顏色矩
		meanStdDev(g, tmp_m, tmp_sd);
		Mom[1] = tmp_m.at<double>(0, 0);
		Mom[4] = tmp_sd.at<double>(0, 0);
		Mom[7] = calc3orderMom(g);
		//  cout << Mom[3] << " " << Mom[4] << " " << Mom[5] << " " << endl;
		//計算r通道的顏色矩
		meanStdDev(r, tmp_m, tmp_sd);
		Mom[2] = tmp_m.at<double>(0, 0);
		Mom[5] = tmp_sd.at<double>(0, 0);
		Mom[8] = calc3orderMom(r);
		//  cout << Mom[6] << " " << Mom[7] << " " << Mom[8] << " " << endl;
		return Mom;//返回顏色矩數組
	}
	// 顏色 
	bool feature_color(Mat src, float Mom[9])
	{
		if (src.channels() == 3)
		{
			// 圖像特徵_COLOR
			Mat color_dst = src.clone();
			cv::cvtColor(color_dst, color_dst, CV_RGB2HSV);
			double *MOM;
			MOM = colorMom(color_dst);
			for (int i = 0; i < 9; i++)
			{
				std::cout << (float)MOM[i] << endl;
				Mom[i] = (float)MOM[i];
			}
			return  true;
		}
		else
		{
			std::cout << "channels!=3";
			return false;
		}
	}


	/* 【形狀】 */
	bool feature_hu(Mat src, double Hu[7])
	{
		if (src.channels() == 3)
		{
			// 圖像特徵_HU
			Mat hu_dst = src.clone();
			cv::cvtColor(hu_dst, hu_dst, CV_RGB2GRAY);
			Canny(hu_dst, hu_dst, 0, 120);
			//double Hu[7];       //存儲得到的Hu矩陣
			Moments mo = moments(hu_dst);//矩變量
			HuMoments(mo, Hu);
			for (int i = 0; i < 7; i++)
			{
				std::cout << (float)Hu[i] << endl;
			}
			return true;
		}
		else if ((src.channels() == 1))
		{
			Mat hu_dst = src.clone();
			Canny(hu_dst, hu_dst, 0, 120);
			//double Hu[7];       //存儲得到的Hu矩陣
			Moments mo = moments(hu_dst);//矩變量
			HuMoments(mo, Hu);
			for (int i = 0; i < 7; i++)
			{
				std::cout << (float)Hu[i] << endl;
			}
			return true;
		}
		else
		{
			return false;
		}
	}


	// 紋理
	const int gray_level = 16;//紋理區域塊的大小,通常將圖像劃分成若干個紋理塊計算
	vector<double> glamvalue;//全局變量

	//【】第一步:j計算共生矩陣
	void getglcm_0(Mat& input, Mat& dst)//0度灰度共生矩陣
	{
		Mat src = input;
		CV_Assert(1 == src.channels());
		src.convertTo(src, CV_32S);
		int height = src.rows;
		int width = src.cols;
		int max_gray_level = 0;
		for (int j = 0; j < height; j++)//尋找像素灰度最大值
		{
			int* srcdata = src.ptr<int>(j);
			for (int i = 0; i < width; i++)
			{
				if (srcdata[i] > max_gray_level)
				{
					max_gray_level = srcdata[i];
				}
			}
		}
		max_gray_level++;//像素灰度最大值加1即爲該矩陣所擁有的灰度級數
		if (max_gray_level > 16)//若灰度級數大於16,則將圖像的灰度級縮小至16級,減小灰度共生矩陣的大小。
		{
			for (int i = 0; i < height; i++)
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width; j++)
				{
					srcdata[j] = (int)srcdata[j] / gray_level;
				}
			}

			dst.create(gray_level, gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height; i++)
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width - 1; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata[j + 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
		else//若灰度級數小於16,則生成相應的灰度共生矩陣
		{
			dst.create(max_gray_level, max_gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height; i++)
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width - 1; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata[j + 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
	}

	void getglcm_45(Mat& input, Mat& dst)//45度灰度共生矩陣
	{
		Mat src = input;
		CV_Assert(1 == src.channels());
		src.convertTo(src, CV_32S);
		int height = src.rows;
		int width = src.cols;
		int max_gray_level = 0;
		for (int j = 0; j < height; j++)
		{
			int* srcdata = src.ptr<int>(j);
			for (int i = 0; i < width; i++)
			{
				if (srcdata[i] > max_gray_level)
				{
					max_gray_level = srcdata[i];
				}
			}
		}
		max_gray_level++;
		if (max_gray_level > 16)
		{
			for (int i = 0; i < height; i++)//將圖像的灰度級縮小至16級,減小灰度共生矩陣的大小。
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width; j++)
				{
					srcdata[j] = (int)srcdata[j] / gray_level;
				}
			}

			dst.create(gray_level, gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 0; j < width - 1; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j + 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
		else
		{
			dst.create(max_gray_level, max_gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 0; j < width - 1; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j + 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
	}

	void getglcm_90(Mat& input, Mat& dst)//90度灰度共生矩陣
	{
		Mat src = input;
		CV_Assert(1 == src.channels());
		src.convertTo(src, CV_32S);
		int height = src.rows;
		int width = src.cols;
		int max_gray_level = 0;
		for (int j = 0; j < height; j++)
		{
			int* srcdata = src.ptr<int>(j);
			for (int i = 0; i < width; i++)
			{
				if (srcdata[i] > max_gray_level)
				{
					max_gray_level = srcdata[i];
				}
			}
		}
		max_gray_level++;
		if (max_gray_level > 16)
		{
			for (int i = 0; i < height; i++)//將圖像的灰度級縮小至16級,減小灰度共生矩陣的大小。
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width; j++)
				{
					srcdata[j] = (int)srcdata[j] / gray_level;
				}
			}

			dst.create(gray_level, gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 0; j < width; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
		else
		{
			dst.create(max_gray_level, max_gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 0; j < width; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
	}

	void getglcm_135(Mat& input, Mat& dst)//135度灰度共生矩陣
	{
		Mat src = input;
		CV_Assert(1 == src.channels());
		src.convertTo(src, CV_32S);
		int height = src.rows;
		int width = src.cols;
		int max_gray_level = 0;
		for (int j = 0; j < height; j++)
		{
			int* srcdata = src.ptr<int>(j);
			for (int i = 0; i < width; i++)
			{
				if (srcdata[i] > max_gray_level)
				{
					max_gray_level = srcdata[i];
				}
			}
		}
		max_gray_level++;
		if (max_gray_level > 16)
		{
			for (int i = 0; i < height; i++)//將圖像的灰度級縮小至16級,減小灰度共生矩陣的大小。
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width; j++)
				{
					srcdata[j] = (int)srcdata[j] / gray_level;
				}
			}

			dst.create(gray_level, gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 1; j < width; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j - 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
		else
		{
			dst.create(max_gray_level, max_gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 1; j < width; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j - 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
	}

	// 【】第二步:計算紋理特徵	// 特徵值計算—— double& Asm, double& Con, double& Ent, double& Idm
	void feature_computer(Mat&src, float& Asm, float& Con, float& Ent, float& Idm)//計算特徵值
	{
		int height = src.rows;
		int width = src.cols;
		int total = 0;
		for (int i = 0; i < height; i++)
		{
			int*srcdata = src.ptr<int>(i);
			for (int j = 0; j < width; j++)
			{
				total += srcdata[j];//求圖像所有像素的灰度值的和
			}
		}

		Mat copy;
		copy.create(height, width, CV_64FC1);
		for (int i = 0; i < height; i++)
		{
			int*srcdata = src.ptr<int>(i);
			float*copydata = copy.ptr<float>(i);
			for (int j = 0; j < width; j++)
			{
				copydata[j] = (float)srcdata[j] / (float)total;//圖像每一個像素的的值除以像素總和
			}
		}


		for (int i = 0; i < height; i++)
		{
			float*srcdata = copy.ptr<float>(i);
			for (int j = 0; j < width; j++)
			{
				Asm += srcdata[j] * srcdata[j];								//能量
				if (srcdata[j]>0)
				{
					Ent -= srcdata[j] * log(srcdata[j]);					//熵   
				}
				Con += (float)(i - j)*(float)(i - j)*srcdata[j];			//對比度
				Idm += srcdata[j] / (1 + (float)(i - j)*(float)(i - j));	//逆差矩
			}
		}
	}

	// 【】融合第一、二步
	/*
	Mat src_gray;
	float data[16] = {0};
	*/
	void feature_glcm(Mat src_gray, float data[16])
	{
		Mat dst_0, dst_90, dst_45, dst_135;

		getglcm_0(src_gray, dst_0);
		float  asm_0 = 0, con_0 = 0, ent_0 = 0, idm_0 = 0;
		feature_computer(dst_0, asm_0, con_0, ent_0, idm_0);


		getglcm_45(src_gray, dst_45);
		float  asm_45 = 0, con_45 = 0, ent_45 = 0, idm_45 = 0;
		feature_computer(dst_45, asm_45, con_45, ent_45, idm_45);


		getglcm_90(src_gray, dst_90);
		float  asm_90 = 0, con_90 = 0, ent_90 = 0, idm_90 = 0;
		feature_computer(dst_90, asm_90, con_90, ent_90, idm_90);


		getglcm_135(src_gray, dst_135);
		float  asm_135 = 0, con_135 = 0, ent_135 = 0, idm_135 = 0;
		feature_computer(dst_135, asm_135, con_135, ent_135, idm_135);

		float AMS[4] = { asm_0, asm_45, asm_90, asm_135 };
		float COM[4] = { con_0, con_45, con_90, con_135 };
		float ENT[4] = { ent_0, ent_45, ent_90, ent_135 };
		float IDM[4] = { idm_0, idm_45, idm_90, idm_135 };

		float glcm_data[16] = {
			asm_0, asm_45, asm_90, asm_135,
			con_0, con_45, con_90, con_135,
			ent_0, ent_45, ent_90, ent_135,
			idm_0, idm_45, idm_90, idm_135
		};

		/*std::cout << "特徵數據:" << endl;*/
		for (size_t i = 0; i < 16; i++)
		{
			data[i] = glcm_data[i];
			//std::cout << data[i] << " ";
		}
	}




	// 讀取當前文件夾圖片的個數子程序
	/*
	cv::String pattern = "./save/*.bmp";
	int cout = read_images_in_folder(pattern);
	*/
	size_t read_images_in_folder(cv::String pattern)//讀取當前指定目錄的圖片的個數
	{
		vector<cv::String> fn;
		glob(pattern, fn, false);//OpenCV自帶一個函數glob()可以遍歷文件
		size_t count = fn.size(); //number of png files in images folder	
		return count;
	}


	// 【】文件檢索
	/*
	string train_img_dir = "C:\\Users\\Administrator\\Desktop\\樣品\\訓練";
	string train_img_namehead = "test";
	string train_img_type = "bmp";
	size_t train_img_num = 4;
	vector<Mat> train_img = data_search(train_img_dir, train_img_namehead, train_img_type, train_img_num);
	for (size_t i = 0; i < train_img_num; i++)
	{
	namedWindow("vetmat", 0);
	imshow("vetmat", train_img[i]);
	waitKey(0);
	}
	*/
	vector<Mat> data_search(string &img_dir, string &img_namehead, string &img_type, size_t n)
	{
		float train_data[4][16] = { 0 };
		vector<Mat> src;
		for (int i = 0; i < n; i++)
		{
			string pos;
			stringstream ss;
			ss << i;
			ss >> pos;
			string img_name = img_dir + "\\" + img_namehead + pos + "." + img_type;	//cout << img_name << endl;
			Mat outsrc = imread(img_name);
			src.push_back(outsrc);
		}
		return src;
	}

private:

};


子函數 

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

/*遺留問題:兩點的確立*/
void Water_Cut(InputArray& src, OutputArray& dst, OutputArray& edge)
{
	Mat srcImage;
	src.copyTo(srcImage);
	//cv::resize(srcImage, srcImage, Size(srcImage.cols / 2, srcImage.rows / 2));
	cv::namedWindow("resImage", 0);
	cv::imshow("resImage", srcImage);
	//waitKey();
	// 【mask兩點】	
	//mask的第一點 maskImage
	Mat maskImage;
	maskImage = Mat(srcImage.size(), CV_8UC1);  // 掩模,在上面做標記,然後傳給findContours
	maskImage = Scalar::all(0);
	Point point1(0, 0), point2(100, 10);
	//line(maskImage, point1, point2, Scalar::all(255), 5, 8, 0);
	circle(maskImage, point1, 10, Scalar::all(255), 100);
	

	//mask的第二點 maskImage
	Point point3(srcImage.cols / 2, srcImage.rows / 2), point4(srcImage.cols / 2+200, srcImage.rows / 2);
	//line(maskImage, point3, point4, Scalar::all(255), 5, 8, 0);
	circle(maskImage, point3, 10, Scalar::all(255), 100);

	/*namedWindow("resImage", 0);
	imshow("resImage", maskImage);
	waitKey();*/

	// 【輪廓】
	vector<vector<Point>> contours;
	vector<Vec4i> hierarchy;
	findContours(maskImage, contours, hierarchy, RETR_CCOMP, CHAIN_APPROX_SIMPLE);

	// 【分水嶺】
	// 參數二:maskWaterShed(CV_32S)
	Mat maskWaterShed;  // watershed()函數的參數
	maskWaterShed = Mat(maskImage.size(), CV_32S);//空白掩碼	maskWaterShed
	maskWaterShed = Scalar::all(0);

	/* 在maskWaterShed上繪製輪廓 */
	for (int index = 0; index < contours.size(); index++)
		drawContours(maskWaterShed, contours, index, Scalar::all(index + 1), -1, 8, hierarchy, INT_MAX);

	/* 如果imshow這個maskWaterShed,我們會發現它是一片黑,原因是在上面我們只給它賦了1,2,3這樣的值,通過代碼80行的處理我們才能清楚的看出結果 */
	// 參數一:srcImage(CV_8UC3)
	watershed(srcImage, maskWaterShed);  //int index = maskWaterShed.at<int>(row, col);操作

	// 【隨機生成幾種顏色】
	vector<Vec3b> colorTab;
	for (int i = 0; i < contours.size(); i++)
	{
		int b = theRNG().uniform(0, 255);
		int g = theRNG().uniform(0, 255);
		int r = theRNG().uniform(0, 255);

		colorTab.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
	}
	Mat dst_ = Mat::zeros(maskWaterShed.size(), CV_8UC3);
	Mat dst_edge = Mat::zeros(maskWaterShed.size(), CV_8UC3);
	int index = maskWaterShed.at<int>(maskWaterShed.rows / 2, maskWaterShed.cols / 2);
	int index_temp = 0;
	for (int i = 0; i < maskWaterShed.rows; i++)
	{
		for (int j = 0; j < maskWaterShed.cols; j++)
		{
			index_temp = maskWaterShed.at<int>(i, j);
			//cout << index_temp << endl;
			if (index_temp == index)//取中心的標籤區域
			{
				dst_edge.at<Vec3b>(i, j) = Vec3b((uchar)255, (uchar)255, (uchar)255); //colorTab[index - 1];
				dst_.at<Vec3b>(i, j) = srcImage.at<Vec3b>(i, j);
			}
		}
	}
	cv::namedWindow("分割結果", 0);
	cv::imshow("分割結果", dst_);
	imwrite("條紋1.bmp", dst_);

	/*Mat dst_add;
	addWeighted(dst_edge, 0.3, srcImage, 0.7, 0, dst_add);
	namedWindow("加權結果", 0);
	imshow("加權結果", dst_add);*/

	cv::namedWindow("連通域", 0);
	cv::imshow("連通域", dst_edge);
	imwrite("條紋1_.bmp", dst_edge);

	dst_.copyTo(dst);
	dst_edge.copyTo(edge);
}

C++ Opencv HU、MOM、GLCM

/*
	第一步:建立類
	#include <opencv2/opencv.hpp>
	#include <iostream>
	#include <vector>
	#include "time.h"

	using namespace cv;
	using namespace std;
	
	第二步:包含類
	Feature feature_class;

	第三步:
	集合顏色+形狀+紋理
	// 圖像特徵_HU
	Mat hu_dst = dst.clone();
	double Hu[7] = { 0 };
	feature_class.feature_hu(hu_dst, Hu);

	// 圖像特徵_COLOR
	Mat color_dst = dst.clone();
	float Mom[9] = { 0 };
	feature_class.feature_color(color_dst, Mom);

	// 圖像特徵_GLCM
	Mat glcm_dst = dst.clone();
	cv::cvtColor(glcm_dst, glcm_dst, CV_RGB2GRAY);
	float glcm_data[16] = { 0 };
	feature_class.feature_glcm(glcm_dst, glcm_data);

	第四步:
	// 特徵集合7+9+16
	float test_data[32] = { 0 };
	for (size_t j = 0; j < 7; j++)
	{
	test_data[j] = (float)Hu[j];

	}
	for (size_t j = 0; j < 9; j++)
	{
	test_data[7 + j] = (float)Mom[j];

	}
	for (size_t j = 0; j < 16; j++)
	{
	test_data[16 + j] = (float)glcm_data[j];
	}

	*/
	/* 【顏色】 */
	// 顏色 計算三階矩
	double calc3orderMom(Mat &channel)  //計算三階矩
	{
		uchar *p;
		double mom = 0;
		double m = mean(channel)[0];    //計算單通道圖像的均值
		int nRows = channel.rows;
		int nCols = channel.cols;
		if (channel.isContinuous())     //連續存儲有助於提升圖像掃描速度
		{
			nCols *= nRows;
			nRows = 1;
		}
		for (int i = 0; i < nRows; i++) //計算立方和
		{
			p = channel.ptr<uchar>(i);
			for (int j = 0; j < nCols; j++)
				mom += pow((p[j] - m), 3);
		}
		float temp;
		temp = cvCbrt((float)(mom / (nRows*nCols)));    //求均值的立方根
		mom = (double)temp;
		return mom;
	}
	// 顏色 計算9個顏色矩:3個通道的1、2、3階矩
	double *colorMom(Mat &img)
	{
		double *Mom = new double[9];    //存放9個顏色矩
		if (img.channels() != 3)
			std::cout << "Error,input image must be a color image" << endl;
		Mat b(img.rows, img.cols, CV_8U);
		Mat r(img.rows, img.cols, CV_8U);
		Mat g(img.rows, img.cols, CV_8U);
		Mat channels[] = { b, g, r };
		split(img, channels);
		//cv::imshow("r", channels[0]);
		//cv::imshow("g", channels[1]);
		//cv::imshow("b", channels[2]);
		//waitKey(0);
		Mat tmp_m, tmp_sd;
		//計算b通道的顏色矩
		meanStdDev(b, tmp_m, tmp_sd);
		Mom[0] = tmp_m.at<double>(0, 0);
		Mom[3] = tmp_sd.at<double>(0, 0);
		Mom[6] = calc3orderMom(b);
		//  cout << Mom[0] << " " << Mom[1] << " " << Mom[2] << " " << endl;
		//計算g通道的顏色矩
		meanStdDev(g, tmp_m, tmp_sd);
		Mom[1] = tmp_m.at<double>(0, 0);
		Mom[4] = tmp_sd.at<double>(0, 0);
		Mom[7] = calc3orderMom(g);
		//  cout << Mom[3] << " " << Mom[4] << " " << Mom[5] << " " << endl;
		//計算r通道的顏色矩
		meanStdDev(r, tmp_m, tmp_sd);
		Mom[2] = tmp_m.at<double>(0, 0);
		Mom[5] = tmp_sd.at<double>(0, 0);
		Mom[8] = calc3orderMom(r);
		//  cout << Mom[6] << " " << Mom[7] << " " << Mom[8] << " " << endl;
		return Mom;//返回顏色矩數組
	}
	// 顏色 
	bool feature_color(Mat src, float Mom[9])
	{
		if (src.channels() == 3)
		{
			// 圖像特徵_COLOR
			Mat color_dst = src.clone();
			cv::cvtColor(color_dst, color_dst, CV_RGB2HSV);
			double *MOM;
			MOM = colorMom(color_dst);
			for (int i = 0; i < 9; i++)
			{
				std::cout << (float)MOM[i] << endl;
				Mom[i] = (float)MOM[i];
			}
			return  true;
		}
		else
		{
			std::cout << "channels!=3";
			return false;
		}
	}


	/* 【形狀】 */
	bool feature_hu(Mat src, double Hu[7])
	{
		if (src.channels() == 3)
		{
			// 圖像特徵_HU
			Mat hu_dst = src.clone();
			cv::cvtColor(hu_dst, hu_dst, CV_RGB2GRAY);
			Canny(hu_dst, hu_dst, 0, 120);
			//double Hu[7];       //存儲得到的Hu矩陣
			Moments mo = moments(hu_dst);//矩變量
			HuMoments(mo, Hu);
			for (int i = 0; i < 7; i++)
			{
				std::cout << (float)Hu[i] << endl;
			}
			return true;
		}
		else if ((src.channels() == 1))
		{
			Mat hu_dst = src.clone();
			Canny(hu_dst, hu_dst, 0, 120);
			//double Hu[7];       //存儲得到的Hu矩陣
			Moments mo = moments(hu_dst);//矩變量
			HuMoments(mo, Hu);
			for (int i = 0; i < 7; i++)
			{
				std::cout << (float)Hu[i] << endl;
			}
			return true;
		}
		else
		{
			return false;
		}
	}


	// 紋理
	const int gray_level = 16;//紋理區域塊的大小,通常將圖像劃分成若干個紋理塊計算
	vector<double> glamvalue;//全局變量

	//【】第一步:j計算共生矩陣
	void getglcm_0(Mat& input, Mat& dst)//0度灰度共生矩陣
	{
		Mat src = input;
		CV_Assert(1 == src.channels());
		src.convertTo(src, CV_32S);
		int height = src.rows;
		int width = src.cols;
		int max_gray_level = 0;
		for (int j = 0; j < height; j++)//尋找像素灰度最大值
		{
			int* srcdata = src.ptr<int>(j);
			for (int i = 0; i < width; i++)
			{
				if (srcdata[i] > max_gray_level)
				{
					max_gray_level = srcdata[i];
				}
			}
		}
		max_gray_level++;//像素灰度最大值加1即爲該矩陣所擁有的灰度級數
		if (max_gray_level > 16)//若灰度級數大於16,則將圖像的灰度級縮小至16級,減小灰度共生矩陣的大小。
		{
			for (int i = 0; i < height; i++)
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width; j++)
				{
					srcdata[j] = (int)srcdata[j] / gray_level;
				}
			}

			dst.create(gray_level, gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height; i++)
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width - 1; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata[j + 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
		else//若灰度級數小於16,則生成相應的灰度共生矩陣
		{
			dst.create(max_gray_level, max_gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height; i++)
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width - 1; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata[j + 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
	}

	void getglcm_45(Mat& input, Mat& dst)//45度灰度共生矩陣
	{
		Mat src = input;
		CV_Assert(1 == src.channels());
		src.convertTo(src, CV_32S);
		int height = src.rows;
		int width = src.cols;
		int max_gray_level = 0;
		for (int j = 0; j < height; j++)
		{
			int* srcdata = src.ptr<int>(j);
			for (int i = 0; i < width; i++)
			{
				if (srcdata[i] > max_gray_level)
				{
					max_gray_level = srcdata[i];
				}
			}
		}
		max_gray_level++;
		if (max_gray_level > 16)
		{
			for (int i = 0; i < height; i++)//將圖像的灰度級縮小至16級,減小灰度共生矩陣的大小。
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width; j++)
				{
					srcdata[j] = (int)srcdata[j] / gray_level;
				}
			}

			dst.create(gray_level, gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 0; j < width - 1; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j + 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
		else
		{
			dst.create(max_gray_level, max_gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 0; j < width - 1; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j + 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
	}

	void getglcm_90(Mat& input, Mat& dst)//90度灰度共生矩陣
	{
		Mat src = input;
		CV_Assert(1 == src.channels());
		src.convertTo(src, CV_32S);
		int height = src.rows;
		int width = src.cols;
		int max_gray_level = 0;
		for (int j = 0; j < height; j++)
		{
			int* srcdata = src.ptr<int>(j);
			for (int i = 0; i < width; i++)
			{
				if (srcdata[i] > max_gray_level)
				{
					max_gray_level = srcdata[i];
				}
			}
		}
		max_gray_level++;
		if (max_gray_level > 16)
		{
			for (int i = 0; i < height; i++)//將圖像的灰度級縮小至16級,減小灰度共生矩陣的大小。
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width; j++)
				{
					srcdata[j] = (int)srcdata[j] / gray_level;
				}
			}

			dst.create(gray_level, gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 0; j < width; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
		else
		{
			dst.create(max_gray_level, max_gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 0; j < width; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
	}

	void getglcm_135(Mat& input, Mat& dst)//135度灰度共生矩陣
	{
		Mat src = input;
		CV_Assert(1 == src.channels());
		src.convertTo(src, CV_32S);
		int height = src.rows;
		int width = src.cols;
		int max_gray_level = 0;
		for (int j = 0; j < height; j++)
		{
			int* srcdata = src.ptr<int>(j);
			for (int i = 0; i < width; i++)
			{
				if (srcdata[i] > max_gray_level)
				{
					max_gray_level = srcdata[i];
				}
			}
		}
		max_gray_level++;
		if (max_gray_level > 16)
		{
			for (int i = 0; i < height; i++)//將圖像的灰度級縮小至16級,減小灰度共生矩陣的大小。
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width; j++)
				{
					srcdata[j] = (int)srcdata[j] / gray_level;
				}
			}

			dst.create(gray_level, gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 1; j < width; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j - 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
		else
		{
			dst.create(max_gray_level, max_gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 1; j < width; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j - 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
	}

	// 【】第二步:計算紋理特徵	// 特徵值計算—— double& Asm, double& Con, double& Ent, double& Idm
	void feature_computer(Mat&src, float& Asm, float& Con, float& Ent, float& Idm)//計算特徵值
	{
		int height = src.rows;
		int width = src.cols;
		int total = 0;
		for (int i = 0; i < height; i++)
		{
			int*srcdata = src.ptr<int>(i);
			for (int j = 0; j < width; j++)
			{
				total += srcdata[j];//求圖像所有像素的灰度值的和
			}
		}

		Mat copy;
		copy.create(height, width, CV_64FC1);
		for (int i = 0; i < height; i++)
		{
			int*srcdata = src.ptr<int>(i);
			float*copydata = copy.ptr<float>(i);
			for (int j = 0; j < width; j++)
			{
				copydata[j] = (float)srcdata[j] / (float)total;//圖像每一個像素的的值除以像素總和
			}
		}


		for (int i = 0; i < height; i++)
		{
			float*srcdata = copy.ptr<float>(i);
			for (int j = 0; j < width; j++)
			{
				Asm += srcdata[j] * srcdata[j];								//能量
				if (srcdata[j]>0)
				{
					Ent -= srcdata[j] * log(srcdata[j]);					//熵   
				}
				Con += (float)(i - j)*(float)(i - j)*srcdata[j];			//對比度
				Idm += srcdata[j] / (1 + (float)(i - j)*(float)(i - j));	//逆差矩
			}
		}
	}

	// 【】融合第一、二步
	/*
	Mat src_gray;
	float data[16] = {0};
	*/
	void feature_glcm(Mat src_gray, float data[16])
	{
		Mat dst_0, dst_90, dst_45, dst_135;

		getglcm_0(src_gray, dst_0);
		float  asm_0 = 0, con_0 = 0, ent_0 = 0, idm_0 = 0;
		feature_computer(dst_0, asm_0, con_0, ent_0, idm_0);


		getglcm_45(src_gray, dst_45);
		float  asm_45 = 0, con_45 = 0, ent_45 = 0, idm_45 = 0;
		feature_computer(dst_45, asm_45, con_45, ent_45, idm_45);


		getglcm_90(src_gray, dst_90);
		float  asm_90 = 0, con_90 = 0, ent_90 = 0, idm_90 = 0;
		feature_computer(dst_90, asm_90, con_90, ent_90, idm_90);


		getglcm_135(src_gray, dst_135);
		float  asm_135 = 0, con_135 = 0, ent_135 = 0, idm_135 = 0;
		feature_computer(dst_135, asm_135, con_135, ent_135, idm_135);

		float AMS[4] = { asm_0, asm_45, asm_90, asm_135 };
		float COM[4] = { con_0, con_45, con_90, con_135 };
		float ENT[4] = { ent_0, ent_45, ent_90, ent_135 };
		float IDM[4] = { idm_0, idm_45, idm_90, idm_135 };

		float glcm_data[16] = {
			asm_0, asm_45, asm_90, asm_135,
			con_0, con_45, con_90, con_135,
			ent_0, ent_45, ent_90, ent_135,
			idm_0, idm_45, idm_90, idm_135
		};

		/*std::cout << "特徵數據:" << endl;*/
		for (size_t i = 0; i < 16; i++)
		{
			data[i] = glcm_data[i];
			//std::cout << data[i] << " ";
		}
	}




	// 讀取當前文件夾圖片的個數子程序
	/*
	cv::String pattern = "./save/*.bmp";
	int cout = read_images_in_folder(pattern);
	*/
	size_t read_images_in_folder(cv::String pattern)//讀取當前指定目錄的圖片的個數
	{
		vector<cv::String> fn;
		glob(pattern, fn, false);//OpenCV自帶一個函數glob()可以遍歷文件
		size_t count = fn.size(); //number of png files in images folder	
		return count;
	}

C++ Opencv hog+SVM(opencv3)

 

// opencv3
#include <stdio.h>
#include <iostream>  
#include <fstream>  
#include <opencv2/opencv.hpp>
#include <string>
 
using namespace cv::ml;
 
#define PosSamNO   30    //正樣本個數                                                    
#define NegSamNO   30    //負樣本個數                                     
#define TestSamNO  5     //測試個數                                                    
 
void train_svm_hog()
{
 
	//HOG檢測器,用來計算HOG描述子的
	//檢測窗口(48,48),塊尺寸(16,16),塊步長(8,8),cell尺寸(8,8),直方圖bin個數9 
	cv::HOGDescriptor hog(cv::Size(48, 48), cv::Size(16, 16), cv::Size(8, 8), cv::Size(8, 8), 9);
	int DescriptorDim;//HOG描述子的維數,由圖片大小、檢測窗口大小、塊大小、細胞單元中直方圖bin個數決定  
 
	//設置SVM參數	
	cv::Ptr<cv::ml::SVM> svm = cv::ml::SVM::create();
	svm->setType(cv::ml::SVM::Types::C_SVC);
	svm->setKernel(cv::ml::SVM::KernelTypes::LINEAR);
	svm->setTermCriteria(cv::TermCriteria(cv::TermCriteria::MAX_ITER, 100, 1e-6));
	std::string ImgName;
 
	//正樣本圖片的文件列表
	std::ifstream finPos("positive_samples.txt");
	//負樣本圖片的文件列表
	std::ifstream finNeg("negative_samples.txt");
 
	//所有訓練樣本的特徵向量組成的矩陣,行數等於所有樣本的個數,列數等於HOG描述子維數 
	cv::Mat sampleFeatureMat;
	//訓練樣本的類別向量,行數等於所有樣本的個數,列數等於1;1表示有目標,-1表示無目標 
	cv::Mat sampleLabelMat;
 
	//依次讀取正樣本圖片,生成HOG描述子  
	for (int num = 0; num < PosSamNO && getline(finPos, ImgName); num++)
	{
		std::cout << "Processing:" << ImgName << std::endl;
		cv::Mat image = cv::imread(ImgName);
		cv::resize(image, image, cv::Size(48, 48));
 
		//HOG描述子向量
		std::vector<float> descriptors;
		//計算HOG描述子,檢測窗口移動步長(8,8)
		hog.compute(image, descriptors, cv::Size(8, 8));
		//處理第一個樣本時初始化特徵向量矩陣和類別矩陣,因爲只有知道了特徵向量的維數才能初始化特徵向量矩陣 
		if (0 == num)
		{
			//HOG描述子的維數
			DescriptorDim = descriptors.size();
			//初始化所有訓練樣本的特徵向量組成的矩陣,行數等於所有樣本的個數,列數等於HOG描述子維數sampleFeatureMat 
			sampleFeatureMat = cv::Mat::zeros(PosSamNO + NegSamNO, DescriptorDim, CV_32FC1);
			//初始化訓練樣本的類別向量,行數等於所有樣本的個數,列數等於1
			sampleLabelMat = cv::Mat::zeros(PosSamNO + NegSamNO, 1, CV_32SC1);
		}
		//將計算好的HOG描述子複製到樣本特徵矩陣sampleFeatureMat  
		for (int i = 0; i < DescriptorDim; i++)
		{
			//第num個樣本的特徵向量中的第i個元素 
			sampleFeatureMat.at<float>(num, i) = descriptors[i];
		}		//正樣本類別爲1,判別爲無噴濺	 
		sampleLabelMat.at<float>(num, 0) = 1;
	}
 
 
	//依次讀取負樣本圖片,生成HOG描述子  
	for (int num = 0; num < NegSamNO && getline(finNeg, ImgName); num++)
	{
		std::cout << "Processing:" << ImgName << std::endl;
		cv::Mat src = cv::imread(ImgName);
		cv::resize(src, src, cv::Size(48, 48));
 
		//HOG描述子向量		
		std::vector<float> descriptors;
		//計算HOG描述子,檢測窗口移動步長(8,8) 
		hog.compute(src, descriptors, cv::Size(8, 8));
		//處理第一個樣本時初始化特徵向量矩陣和類別矩陣,因爲只有知道了特徵向量的維數才能初始化特徵向量矩陣 
		//std::cout << "descriptor dimention:" << descriptors.size() << std::endl;
 
		//將計算好的HOG描述子複製到樣本特徵矩陣sampleFeatureMat
		for (int i = 0; i < DescriptorDim; i++)
		{
			//第PosSamNO+num個樣本的特徵向量中的第i個元素
			sampleFeatureMat.at<float>(num + PosSamNO, i) = descriptors[i];
		}
		//負樣本類別爲-1,判別爲噴濺
		sampleLabelMat.at<float>(num + PosSamNO, 0) = -1;
	}
 
 
	//訓練SVM分類器  
	std::cout << "開始訓練SVM分類器" << std::endl;
	cv::Ptr<cv::ml::TrainData> td = cv::ml::TrainData::create(sampleFeatureMat, cv::ml::SampleTypes::ROW_SAMPLE, sampleLabelMat);
 
	svm->train(td);
	std::cout << "SVM分類器訓練完成" << std::endl;
 
	//將訓練好的SVM模型保存爲xml文件
	svm->save("SVM_HOG.xml");
	return;
}
 
void svm_hog_classification()
{
	//HOG檢測器,用來計算HOG描述子的
	//檢測窗口(48,48),塊尺寸(16,16),塊步長(8,8),cell尺寸(8,8),直方圖bin個數9  
	cv::HOGDescriptor hog(cv::Size(48, 48), cv::Size(16, 16), cv::Size(8, 8), cv::Size(8, 8), 9);
	//HOG描述子的維數,由圖片大小、檢測窗口大小、塊大小、細胞單元中直方圖bin個數決定 
	int DescriptorDim;
 
	//測試樣本圖片的文件列表
	std::ifstream finTest("test_samples.txt");
	std::string ImgName;
	for (int num = 0; num < TestSamNO && getline(finTest, ImgName); num++)
	{
		//從XML文件讀取訓練好的SVM模型
		cv::Ptr<cv::ml::SVM> svm = cv::ml::SVM::load<cv::ml::SVM>("SVM_HOG.xml ");
		if (svm->empty())
		{
			std::cout << "load svm detector failed!!!" << std::endl;
			return;
		}
		//針對測試集進行識別
		std::cout << "開始識別..." << std::endl;
		std::cout << "Processing:" << ImgName << std::endl;
		cv::Mat test = cv::imread(ImgName);
		cv::resize(test, test, cv::Size(48, 48));
		std::vector<float> descriptors;     
		hog.compute(test, descriptors);
		cv::Mat testDescriptor = cv::Mat::zeros(1, descriptors.size(), CV_32FC1);
		for (size_t i = 0; i < descriptors.size(); i++)
		{
			testDescriptor.at<float>(0, i) = descriptors[i];
		}
		float label = svm->predict(testDescriptor);
		imshow("test image", test);
		std::cout << "這張圖屬於:" << label << std::endl;
		cv::waitKey(0);
	}
	return;
}
 
int main(int argc, char** argv)
{
	train_svm_hog();
	svm_hog_classification();
	return 0;
}

C++ Opencv  特徵AKAZE(opencv3.3.0)

特徵檢測

第一步:檢測器

Ptr<AKAZE> detector = AKAZE::create();

第二步:檢測器子類—檢測

detector->detect(img, keypoints, Mat());

計算檢測時間(通用):

double t1 = getTickCount();

/*加入你要計算時間的代碼段*/

double t2 = getTickCount();
double tkaze =  (t2 - t1) / getTickFrequency();
printf("Time consume(s) : %f\n", tkaze);

第三步:畫出特徵點圖

drawKeypoints(img, keypoints, keypointImg, Scalar::all(-1), DrawMatchesFlags::DEFAULT);

總體程序 

// Demo_Feature.cpp : 定義控制檯應用程序的入口點。
//

#include "stdafx.h"
#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int _tmain(int argc, _TCHAR* argv[])
{
	Mat img1 = imread("C:\\Users\\Administrator\\Desktop\\樣品\\瓷磚\\方格.bmp", IMREAD_GRAYSCALE);
	Mat img2 = imread("C:\\Users\\Administrator\\Desktop\\樣品\\瓷磚\\方格.bmp", IMREAD_GRAYSCALE);
	if (img1.empty() && img2.empty()) {
		printf("could not load image...\n");
		return -1;
	} 
	imshow("input image", img1);

	// kaze detection
	Ptr<AKAZE> detector = AKAZE::create();
	vector<KeyPoint> keypoints;
	double t1 = getTickCount();
	detector->detect(img1, keypoints, Mat());
	double t2 = getTickCount();
	double tkaze = 1000 * (t2 - t1) / getTickFrequency();
	printf("KAZE Time consume(ms) : %f", tkaze);

	Mat keypointImg;
	drawKeypoints(img1, keypoints, keypointImg, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
	imshow("kaze key points", keypointImg);

	waitKey(0);
	return 0;
}

特徵匹配

第一步:檢測器

Ptr<AKAZE> detector = AKAZE::create();

第二步: 檢測器子類—檢測和計算

detector->detectAndCompute(img1, Mat(), keypoints_obj, descriptor_obj);
detector->detectAndCompute(img2, Mat(), keypoints_scene, descriptor_scene);

第三步: 計算結果匹配

// 構建匹配器
FlannBasedMatcher matcher(new flann::LshIndexParams(20, 10, 2));
// 進行匹配
matcher.match(descriptor_obj, descriptor_scene, matches);

 第四步:匹配結果畫出

drawMatches(img1, keypoints_obj, img2, keypoints_scene, matches, akazeMatchesImg);

第五步:最佳匹配選取

vector<DMatch> goodMatches;
double minDist = 100000, maxDist = 0;
for (int i = 0; i < descriptor_obj.rows; i++) {
	double dist = matches[i].distance;
	if (dist < minDist) {
		minDist = dist;
	}
	if (dist > maxDist) {
		maxDist = dist;
	}
}
printf("min distance : %f", minDist);

for (int i = 0; i < descriptor_obj.rows; i++) {
	double dist = matches[i].distance;
	if (dist < max(1.5*minDist, 0.02)) {
		goodMatches.push_back(matches[i]);
	}
}

第六步:最佳匹配結果畫出

drawMatches(img1, keypoints_obj, img2, keypoints_scene, goodMatches, akazeMatchesImg, Scalar::all(-1),Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

 總體程序

// Demo_Feature.cpp : 定義控制檯應用程序的入口點。
//

#include "stdafx.h"
#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int _tmain(int argc, _TCHAR* argv[])
{
	Mat img1 = imread("C:\\Users\\Administrator\\Desktop\\樣品\\瓷磚\\方格.bmp", IMREAD_GRAYSCALE);
	Mat img2 = imread("C:\\Users\\Administrator\\Desktop\\樣品\\瓷磚\\方格.bmp", IMREAD_GRAYSCALE);
	if (img1.empty() && img2.empty()) {
		printf("could not load image...\n");
		return -1;
	}

	imshow("box image", img1);
	imshow("scene image", img2);

	// extract akaze features
	Ptr<AKAZE> detector = AKAZE::create();
	vector<KeyPoint> keypoints_obj;
	vector<KeyPoint> keypoints_scene;
	Mat descriptor_obj, descriptor_scene;
	double t1 = getTickCount();
	detector->detectAndCompute(img1, Mat(), keypoints_obj, descriptor_obj);
	detector->detectAndCompute(img2, Mat(), keypoints_scene, descriptor_scene);
	double t2 = getTickCount();
	double tkaze = 1000 * (t2 - t1) / getTickFrequency();
	printf("AKAZE Time consume(ms) : %f\n", tkaze);

	// matching
	FlannBasedMatcher matcher(new flann::LshIndexParams(20, 10, 2));
	//FlannBasedMatcher matcher;
	vector<DMatch> matches;
	matcher.match(descriptor_obj, descriptor_scene, matches);

	// draw matches(key points)
	Mat akazeMatchesImg;
	drawMatches(img1, keypoints_obj, img2, keypoints_scene, matches, akazeMatchesImg);
	imshow("akaze match result", akazeMatchesImg);

	vector<DMatch> goodMatches;
	double minDist = 100000, maxDist = 0;
	for (int i = 0; i < descriptor_obj.rows; i++) {
		double dist = matches[i].distance;
		if (dist < minDist) {
			minDist = dist;
		}
		if (dist > maxDist) {
			maxDist = dist;
		}
	}
	printf("min distance : %f", minDist);

	for (int i = 0; i < descriptor_obj.rows; i++) {
		double dist = matches[i].distance;
		if (dist < max(1.5*minDist, 0.02)) {
			goodMatches.push_back(matches[i]);
		}
	}

	drawMatches(img1, keypoints_obj, img2, keypoints_scene, goodMatches, akazeMatchesImg, Scalar::all(-1),
		Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
	imshow("good match result", akazeMatchesImg);

	waitKey(0);
	return 0;
}

C++ Opencv 矩形分割

 圖像預處理分割目標

// ROI提取
Mat Image_ROI(Mat frame)
{
	Mat gray, bw, img, bkup;
	/*目標提取——預處理——Mat ROI*/
	/*預處理很重要——直接找到目標*/
	//預處理很重要——直接找到目標
	// 灰度化
	cv::cvtColor(frame, gray, CV_BGR2GRAY);
	img = gray.clone();

	// 高斯濾波	
	cv::GaussianBlur(img, img, Size(5, 5), 0, 0);  //高斯濾波

	// 膨脹操作
	Mat element = getStructuringElement(MORPH_RECT, Size(3, 3)); //第一個參數MORPH_RECT表示矩形的卷積核,當然還可以選擇橢圓形的、交叉型的
	cv::dilate(img, img, element);  //實現過程中發現,適當的膨脹很重要

	//邊緣檢測
	cv::Canny(img, img, 30, 120, 3);   //邊緣提取
	//namedWindow("get contour", 1);
	//	cv::imshow("get contour", img);


	//從包好目標輪廓裏面找矩形的輪廓
	/**/
	bkup = gray.clone();
	Mat dstImg = frame.clone();
	//	cv::imshow("原圖", dstImg);//原圖
	/**/
	vector<vector<Point>> contours;
	vector<Vec4i> hierarcy;
	cv::findContours(img, contours, hierarcy, CV_RETR_TREE, CV_CHAIN_APPROX_NONE);
	vector<Rect> boundRect(contours.size());
	vector<RotatedRect> box(contours.size());
	Point2f rect[4];
	Mat image_object;
	for (int i = 0; i < contours.size(); i++)
	{
		box[i] = minAreaRect(Mat(contours[i]));

		if (box[i].size.width < 100 || box[i].size.height < 100)//篩選
			continue;
		//rectangle(dstImg, Point(boundRect[i].x, boundRect[i].y), Point(boundRect[i].x + boundRect[i].width, boundRect[i].y + boundRect[i].height), Scalar(0, 255, 0), 2, 8);
		//circle(dstImg, Point(box[i].center.x, box[i].center.y), 5, Scalar(255, 255, 0), -1, 8);
		box[i].points(rect);
		/*for (int j = 0; j<4; j++)
		{
		line(dstImg, rect[j], rect[(j + 1) % 4], Scalar(255, 0, 255), 2, 8);
		}*/
		float angle;
		std::cout << "angle=" << box[i].angle << endl;
		angle = box[i].angle;
		char width[20], height[20];
		
		sprintf_s(width, "width=%0.2f", box[i].size.width);
		sprintf_s(height, "height=%0.2f", box[i].size.height);
		//putText(dstImg, width, Point(195, 260), CV_FONT_HERSHEY_COMPLEX_SMALL, 0.85, Scalar(255, 255, 0));
		//putText(dstImg, height, Point(190, 285), CV_FONT_HERSHEY_COMPLEX_SMALL, 0.85, Scalar(255, 255, 0));


		//利用仿射變換進行旋轉        另一種方法,透視變換
		if (0 < abs(angle) && abs(angle) <= 45)
			angle = angle;//負數,順時針旋轉
		else if (45 < abs(angle) && abs(angle) < 90)
			angle = 90 - abs(angle);//正數,逆時針旋轉
		Point2f center = box[i].center;  //定義旋轉中心座標
		double angle0 = angle;
		double scale = 1;
		Mat roateM = getRotationMatrix2D(center, angle0, scale);  //獲得旋轉矩陣,順時針爲負,逆時針爲正
		Mat roate_img;
		warpAffine(dstImg, roate_img, roateM, dstImg.size()); //仿射變換結果frame
		//	imshow("roateM", roate_img);

		/**/
		boundRect[i] = boundingRect(Mat(contours[i]));
		//顯示昉射後的目標區域圖
		int x0 = 0, y0 = 0, w0 = 0, h0 = 0;
		x0 = boundRect[i].x;
		y0 = boundRect[i].y;
		w0 = boundRect[i].width;
		h0 = boundRect[i].height;
		Mat ROI = roate_img(Rect(x0, y0, w0, h0));//截取對應的區域
		//imshow("ROI", ROI);
		image_object = ROI;
		imwrite("測試ROI的準確性.jpg", ROI);
			
	}
	return image_object;
}

 C++ vector容器打印

容器vector打印:

vector<KeyPoint> keypoints_obj;
/*
分KeyPoint類和vector容器操作
*/
std::cout << "keypoints_obj尺寸:" << keypoints_obj.size() << endl;
for (size_t i = 0; i < keypoints_obj.size(); i++)
{
    std::cout << "keypoints_obj關鍵點的方向:" << keypoints_obj[i].angle << endl;
    std::cout << "keypoints_obj關鍵點的座標:" << keypoints_obj[i].pt.x << keypoints_obj[i].pt.y << endl;
}	

C++ FlyCapture相機

需要材料

FlyCapture2https://pan.baidu.com/s/14NSxArB5l-bZs229C9RHVA

FlycaptureTimehttps://pan.baidu.com/s/1YP9dobp2VnBHESQscjN3DA

 C++ Modbus通信

需要材料:

libmodbus調試助手

modbus發送數據子函數
第一步:
	#include modbus.h
第二步:
	char *ip = "192.168.83.75";
	int port = 502; 
	int SERVER_ID = 2;	//從端設備slave
	int LOOP = 1;	//循環次數
	int ADDRESS_START = 4096;	//測試寄存器起始地址
	int ADDRESS_END = 4102;	//測試寄存器結束地址
	int nb = ADDRESS_END - ADDRESS_START;
	 /*申請存儲空間*/
	uint16_t *send_data;
	send_data = (uint16_t *)malloc(nb * sizeof(uint16_t));
	memset(send_data, 0, nb* sizeof(uint16_t));
	for (int i = 0; i < nb; i++)
	{
		send_data[i] = (uint16_t)(0);//發送的數據輸入
	}
	int flag = modbus_tcpip_send_data(send_data, ip, port, SERVER_ID, LOOP, ADDRESS_START, ADDRESS_END);//發送失敗返回-1

第三步:
int modbus_tcpip_send_data(uint16_t *send_data, char *ip, int port, int SERVER_ID, int LOOP, int ADDRESS_START, int ADDRESS_END)
{
	modbus_t *ctx;
	int rc;
	int nb_fail;
	int nb_loop;
	int addr;
	int nb;
	uint16_t *tab_rq_registers;
	uint16_t *tab_rp_registers;

	/*連接TCP*///("Master"(主盤)接口和"Slave"(從盤)接口)
	ctx = modbus_new_tcp(ip, port);
	modbus_set_debug(ctx, TRUE);//調試:返回IP和port
	modbus_set_slave(ctx, SERVER_ID);

	/*連接報錯*/
	if (modbus_connect(ctx) == -1)
	{
		modbus_free(ctx);
		return -1;
	}

	nb = ADDRESS_END - ADDRESS_START;

	/*申請存儲空間*/
	tab_rq_registers = (uint16_t *)malloc(nb * sizeof(uint16_t));
	memset(tab_rq_registers, 0, nb* sizeof(uint16_t));

	tab_rp_registers = (uint16_t *)malloc(nb * sizeof(uint16_t));
	memset(tab_rp_registers, 0, nb* sizeof(uint16_t));

	/*設定欲寫入的數值*/
	for (addr = ADDRESS_START; addr < ADDRESS_END; addr++)
	{
		for (int i = 0; i < nb; i++)
		{
			tab_rq_registers[i] = (uint16_t)(send_data[i]);//(uint16_t)(65535.0 * rand() / (RAND_MAX + 1.0));			
		}
	}

	/*傳送與讀取數據*/
	nb_loop = nb_fail = 0;
	while (nb_loop++ < LOOP)//寫入的條件(一滿足【條件】即寫入並返回正確寫入的【標誌】)
	{
		int i;
		addr = ADDRESS_START;
		/*寄存器的批量寫*/
		rc = modbus_write_registers(ctx, ADDRESS_START, nb, tab_rq_registers);//返回rc爲寫入
		if (rc != nb)
		{//rc!=nb則寫入錯誤			
			nb_fail++;//錯誤的標識
		}
		else
		{//正確寫入後,讀取寄存器狀態			
			rc = modbus_read_registers(ctx, ADDRESS_START, nb, tab_rp_registers);
			if (rc != nb)
			{//rc!=nb則讀取失敗				
				nb_fail++;
			}/*單步調試成功*/
			else
			{//正確讀取後,返回正確標誌
				for (i = 0; i < nb; i++)
				{
					if (tab_rq_registers[i] != tab_rp_registers[i])
					{//寄存器數值不匹配則:						
						nb_fail++;
					}/*單步調試成功*/
				}
			}
		}
		
		if (nb_fail)return -1;
		else return 0;			

		/*釋放內存*/
		free(tab_rq_registers);
		free(tab_rp_registers);

		/*關閉連接*/
		modbus_close(ctx);
		modbus_free(ctx);
	}
}

C++ Opencv xml調用

 需要材料:

xml

xml2

// getheader.cpp : 定義控制檯應用程序的入口點。
//

#include "stdafx.h"

#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"

#include <iostream>
#include <stdio.h>

using namespace std;
using namespace cv;


/** Function Headers */
void detectAndDisplay( Mat frame );

/** Global variables */
//-- Note, either copy these two files from opencv/data/haarscascades to your current folder, or change these locations
String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);

const int FRAME_WIDTH = 1280;
const int FRAME_HEIGHT = 240;
/**
* @function main
*/
int main( void )
{
	CvCapture* capture;
	//VideoCapture capture;
	Mat frame;

	//-- 1. Load the cascades
	if (!face_cascade.load(face_cascade_name)){ printf("--(!)Error loading\n"); system("pause"); return -1; };
	if (!eyes_cascade.load(eyes_cascade_name)){ printf("--(!)Error loading\n"); system("pause"); return -1; };

	frame = imread("1.jpg");//背景圖片
	detectAndDisplay(frame);

	//VideoCapture cap(0); //打開默認的攝像頭號
	//if(!cap.isOpened())  //檢測是否打開成功
	//	return -1;

	//Mat edges;
	////namedWindow("edges",1);
	//for(;;)
	//{
	//	Mat frame;
	//	cap >> frame; // 從攝像頭中獲取新的一幀
	//	detectAndDisplay( frame );
	//	//imshow("edges", frame);
	//	if(waitKey(30) >= 0) break;
	//}
	////攝像頭會在VideoCapture的析構函數中釋放
	waitKey(0);			
	return 0;
}

void mapToMat(const cv::Mat &srcAlpha, cv::Mat &dest, int x, int y)
{
	int nc = 3;
	int alpha = 0;

	for (int j = 0; j < srcAlpha.rows; j++)
	{
		for (int i = 0; i < srcAlpha.cols*3; i += 3)
		{
			alpha = srcAlpha.ptr<uchar>(j)[i / 3*4 + 3];
			//alpha = 255-alpha;
			if(alpha != 0) //4通道圖像的alpha判斷
			{
				for (int k = 0; k < 3; k++)
				{
					// if (src1.ptr<uchar>(j)[i / nc*nc + k] != 0)
					if( (j+y < dest.rows) && (j+y>=0) &&
						((i+x*3) / 3*3 + k < dest.cols*3) && ((i+x*3) / 3*3 + k >= 0) &&
						(i/nc*4 + k < srcAlpha.cols*4) && (i/nc*4 + k >=0) )
					{
						dest.ptr<uchar>(j+y)[(i+x*nc) / nc*nc + k] = srcAlpha.ptr<uchar>(j)[(i) / nc*4 + k];
					}
				}
			}
		}
	}
}

/**
* @function detectAndDisplay
*/
void detectAndDisplay( Mat frame )
{
	std::vector<Rect> faces;
	Mat frame_gray;
	Mat hatAlpha;

	hatAlpha = imread("2.png",-1);//聖誕帽的圖片

	cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
	equalizeHist( frame_gray, frame_gray );
	//-- Detect faces
	face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );

	for( size_t i = 0; i < faces.size(); i++ )
	{

		Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
		// ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2), 0, 0, 360, Scalar( 255, 0, 255 ), 2, 8, 0 );//畫人臉

		// line(frame,Point(faces[i].x,faces[i].y),center,Scalar(255,0,0),5);//人臉中心線

		Mat faceROI = frame_gray( faces[i] );
		std::vector<Rect> eyes;

		//-- In each face, detect eyes
		eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );

		for( size_t j = 0; j < eyes.size(); j++ )
		{
			Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
			int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
			//circle( frame, eye_center, radius, Scalar( 255, 0, 0 ), 3, 8, 0 ); //畫眼睛
		}

		// if(eyes.size())
		{
			resize(hatAlpha,hatAlpha,Size(faces[i].width, faces[i].height),0,0,INTER_LANCZOS4);
			// mapToMat(hatAlpha,frame,center.x+2.5*faces[i].width,center.y-1.3*faces[i].height);
			mapToMat(hatAlpha,frame,faces[i].x,faces[i].y-0.8*faces[i].height);
		}
	}
	//-- Show what you got
	imshow( window_name, frame );
	//imwrite("merry christmas.jpg",frame);
}

 C++ Opencv xvid編碼錄像

Opencv+XVID編碼錄像 

參考

附件

/*
早期用Opencv打開相機進行錄像的代碼,可行的,太久沒碰,貼上來。
*/
//(攝像頭錄像)//配合xvid編碼器進行錄像
void opencv_capture_save_x32_cp()
{
	CvCapture* capture = 0;
	CvVideoWriter* video = NULL;
	IplImage* frame = 0;
	int n;
	capture = cvCreateCameraCapture(0);
	if (!capture)
	{
		cout << "Can not open the camera." << endl;
		printf("Can not open the camera.");    //讀取不成功,則標識
	}
	else
	{
		frame = cvQueryFrame(capture); //首先取得攝像頭中的一幀
		const char*videoPath = "camera.avi";
		//int fourcc = CV_FOURCC('X', 'V', 'I', 'D');			//方法一:偶爾會失敗——因爲沒有自動打開編譯器
		int fourcc = -1;//用-1可以自己選擇編碼格式--避免編碼器不打開	//方法二:人工選擇——,從未失敗
		video = cvCreateVideoWriter(videoPath, fourcc, 25,
			cvSize(frame->width, frame->height)); //創建CvVideoWriter對象並分配空間
		//保存的文件名爲camera.avi,編碼要在運行程序時選擇,大小就是攝像頭視頻的大小,幀頻率是32
		if (video) //如果能創建CvVideoWriter對象則表明成功
		{
			cout << "VideoWriter has created." << endl;
		}
		cvNamedWindow("Camera Video", 0); //新建一個窗口
		bool stop = false;
		while (!stop) // 
		{
			frame = cvQueryFrame(capture); //從CvCapture中獲得一幀			
			if (!frame) break;
			cvShowImage("Camera Video", frame); //顯示視頻內容的圖片
			n = cvWriteFrame(video, frame); //判斷是否寫入成功,如果返回的是1,表示寫入成功	
			if (!n) break;
			//stop = true;//停止標誌
			if (cvWaitKey(10) >= 0) break; //有其他鍵盤響應,則退出					
		}
	}
	cvReleaseVideoWriter(&video);
	cvReleaseCapture(&capture);
	cvDestroyWindow("Camera Video");
}
// 類型轉換:IplImage*—>Mat

IplImage* iplimg = cvLoadImage("heels.jpg");

Mat matImg;

matImg = Mat(iplimg);


// 類型轉換:Mat—>IplImage*

IplImage *frame;

Mat Img;

frame = &IplImage(Img); //取Mat值到指針
// 或者 *frame=IplImage(Img);

工業相機錄像

#include"stdafx.h"
#include<iostream>
#include <opencv2\core\core.hpp>
#include <opencv2\imgproc\imgproc.hpp>
#include <opencv2\opencv.hpp>
using namespace cv;
using namespace std;

//請用戶提前配置好工程頭文件目錄,需要包含GalaxyIncludes.h
#include"GalaxyIncludes.h"

Mat src;

int _tmain(int argc, _TCHAR* argv[])
{
	//初始化
	IGXFactory::GetInstance().Init();
	try
	{
		do
		{
			//枚舉設備
			gxdeviceinfo_vector vectorDeviceInfo;
			IGXFactory::GetInstance().UpdateDeviceList(1000, vectorDeviceInfo);
			if (0 == vectorDeviceInfo.size())
			{
				cout << "無可用設備!" << endl;
				break;
			}
			
			//打開第一臺設備以及設備下面第一個流
			CGXDevicePointer ObjDevicePtr = IGXFactory::GetInstance().OpenDeviceBySN(
				vectorDeviceInfo[0].GetSN(),
				GX_ACCESS_EXCLUSIVE);						
			
			CGXStreamPointer ObjStreamPtr = ObjDevicePtr->OpenStream(0);

			//獲取遠端設備屬性控制器
			CGXFeatureControlPointer ObjFeatureControlPtr = ObjDevicePtr->GetRemoteFeatureControl();

			//發送開採命令
			ObjStreamPtr->StartGrab();
			ObjFeatureControlPtr->GetCommandFeature("AcquisitionStart")->Execute();			
			
			//主程序		
			//採單幀	
			CvCapture* capture = 0;
			CvVideoWriter* video = NULL;
			IplImage* frame = 0;
			const char*videoPath = "camera.avi";
			int n;

			bool flag = true;
			int a = 1;
			while (a)
			{
				CImageDataPointer objImageDataPtr;				

				objImageDataPtr = ObjStreamPtr->GetImage(500);//超時時間使用500ms,用戶可以自行設定
				if (objImageDataPtr->GetStatus() == GX_FRAME_STATUS_SUCCESS)
				{
					//getchar();
					//採圖成功而且是完整幀,可以進行圖像處理...
					cout << "收到一幀圖像!" << endl;
					cout << "ImageInfo: " << objImageDataPtr->GetStatus() << endl;
					cout << "ImageInfo: " << objImageDataPtr->GetWidth() << endl;
					cout << "ImageInfo: " << objImageDataPtr->GetHeight() << endl;
					cout << "ImageInfo: " << objImageDataPtr->GetPayloadSize() << endl;
					cout << objImageDataPtr->GetPixelFormat() << endl;
					cout << GX_PIXEL_FORMAT_BAYER_GR8 << endl;

					//假設原始數據是BayerRG8圖像,則 GX_BIT_0_7
					void* pRGB24Buffer = NULL;				
					pRGB24Buffer = objImageDataPtr->ConvertToRGB24(GX_BIT_0_7, GX_RAW2RGB_NEIGHBOUR, true);				

					Mat test;
					test.create(objImageDataPtr->GetHeight(), objImageDataPtr->GetWidth(), CV_8UC3);
					memcpy(test.data, pRGB24Buffer, objImageDataPtr->GetPayloadSize() * 3);

					if (flag == true)//一次
					{
						flag = false;
						frame = &IplImage(test);

						//int fourcc = CV_FOURCC('X', 'V', 'I', 'D');			//方法一:偶爾會失敗——因爲沒有自動打開編譯器
						int fourcc = -1;//用-1可以自己選擇編碼格式--避免編碼器不打開	//方法二:人工選擇——,從未失敗
						video = cvCreateVideoWriter(videoPath, fourcc, 4,
							cvSize(frame->width, frame->height)); //創建CvVideoWriter對象並分配空間
	
						if (video) //如果能創建CvVideoWriter對象則表明成功
						{
							cout << "VideoWriter has created." << endl;
						}
						cvNamedWindow("Camera Video", 0); //新建一個窗口
					}
				
					frame = &IplImage(test);
					if (!frame) break;
					cvShowImage("Camera Video", frame); //顯示視頻內容的圖片
					n = cvWriteFrame(video, frame); //判斷是否寫入成功,如果返回的是1,表示寫入成功	
					if (!n) break;				

					//system("pause");
					if (waitKey(20)==27)
					{
						a = 0;
						break;
					}
				}
			}

			cvReleaseVideoWriter(&video);
			cvDestroyWindow("Camera Video");
			

			//發送停採命令
			ObjFeatureControlPtr->GetCommandFeature("AcquisitionStop")->Execute();
			ObjStreamPtr->StopGrab();

			//釋放資源
			ObjStreamPtr->Close();
			ObjDevicePtr->Close();
		} while (false);
	}
	catch (CGalaxyException&e)
	{
		cout << "錯誤碼: " << e.GetErrorCode() << endl;
		cout << "錯誤描述信息: " << e.what() << endl;
	}
	catch (std::exception&e)
	{
		cout << "錯誤描述信息: " << e.what() << endl;
	}

	//反初始化庫
	IGXFactory::GetInstance().Uninit();

	return 0;
}

 C++ Opencv glcm

 

#include<iostream>
#include<opencv2/highgui.hpp>
#include<opencv2/core.hpp>
#include<opencv2/opencv.hpp>
#include <vector>
#include <fstream>
#include <io.h>

using namespace std;
using namespace cv;
class MyGLCM
{
public:
	MyGLCM();
	~MyGLCM();
	const int gray_level = 16;//紋理區域塊的大小,通常將圖像劃分成若干個紋理塊計算
	vector<double> glamvalue;//全局變量

	//【】第一步:j計算共生矩陣
	// 0度灰度共生矩陣
	void getglcm_0(Mat& input, Mat& dst)//0度灰度共生矩陣
	{
		Mat src = input;
		CV_Assert(1 == src.channels());
		src.convertTo(src, CV_32S);
		int height = src.rows;
		int width = src.cols;
		int max_gray_level = 0;
		for (int j = 0; j < height; j++)//尋找像素灰度最大值
		{
			int* srcdata = src.ptr<int>(j);
			for (int i = 0; i < width; i++)
			{
				if (srcdata[i] > max_gray_level)
				{
					max_gray_level = srcdata[i];
				}
			}
		}
		max_gray_level++;//像素灰度最大值加1即爲該矩陣所擁有的灰度級數
		if (max_gray_level > 16)//若灰度級數大於16,則將圖像的灰度級縮小至16級,減小灰度共生矩陣的大小。
		{
			for (int i = 0; i < height; i++)
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width; j++)
				{
					srcdata[j] = (int)srcdata[j] / gray_level;
				}
			}

			dst.create(gray_level, gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height; i++)
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width - 1; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata[j + 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
		else//若灰度級數小於16,則生成相應的灰度共生矩陣
		{
			dst.create(max_gray_level, max_gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height; i++)
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width - 1; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata[j + 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
	}

	// 45度灰度共生矩陣
	void getglcm_45(Mat& input, Mat& dst)//45度灰度共生矩陣
	{
		Mat src = input;
		CV_Assert(1 == src.channels());
		src.convertTo(src, CV_32S);
		int height = src.rows;
		int width = src.cols;
		int max_gray_level = 0;
		for (int j = 0; j < height; j++)
		{
			int* srcdata = src.ptr<int>(j);
			for (int i = 0; i < width; i++)
			{
				if (srcdata[i] > max_gray_level)
				{
					max_gray_level = srcdata[i];
				}
			}
		}
		max_gray_level++;
		if (max_gray_level > 16)
		{
			for (int i = 0; i < height; i++)//將圖像的灰度級縮小至16級,減小灰度共生矩陣的大小。
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width; j++)
				{
					srcdata[j] = (int)srcdata[j] / gray_level;
				}
			}

			dst.create(gray_level, gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 0; j < width - 1; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j + 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
		else
		{
			dst.create(max_gray_level, max_gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 0; j < width - 1; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j + 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
	}

	// 90度灰度共生矩陣
	void getglcm_90(Mat& input, Mat& dst)//90度灰度共生矩陣
	{
		Mat src = input;
		CV_Assert(1 == src.channels());
		src.convertTo(src, CV_32S);
		int height = src.rows;
		int width = src.cols;
		int max_gray_level = 0;
		for (int j = 0; j < height; j++)
		{
			int* srcdata = src.ptr<int>(j);
			for (int i = 0; i < width; i++)
			{
				if (srcdata[i] > max_gray_level)
				{
					max_gray_level = srcdata[i];
				}
			}
		}
		max_gray_level++;
		if (max_gray_level > 16)
		{
			for (int i = 0; i < height; i++)//將圖像的灰度級縮小至16級,減小灰度共生矩陣的大小。
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width; j++)
				{
					srcdata[j] = (int)srcdata[j] / gray_level;
				}
			}

			dst.create(gray_level, gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 0; j < width; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
		else
		{
			dst.create(max_gray_level, max_gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 0; j < width; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
	}

	// 135度灰度共生矩陣
	void getglcm_135(Mat& input, Mat& dst)//135度灰度共生矩陣
	{
		Mat src = input;
		CV_Assert(1 == src.channels());
		src.convertTo(src, CV_32S);
		int height = src.rows;
		int width = src.cols;
		int max_gray_level = 0;
		for (int j = 0; j < height; j++)
		{
			int* srcdata = src.ptr<int>(j);
			for (int i = 0; i < width; i++)
			{
				if (srcdata[i] > max_gray_level)
				{
					max_gray_level = srcdata[i];
				}
			}
		}
		max_gray_level++;
		if (max_gray_level > 16)
		{
			for (int i = 0; i < height; i++)//將圖像的灰度級縮小至16級,減小灰度共生矩陣的大小。
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width; j++)
				{
					srcdata[j] = (int)srcdata[j] / gray_level;
				}
			}

			dst.create(gray_level, gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 1; j < width; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j - 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
		else
		{
			dst.create(max_gray_level, max_gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 1; j < width; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j - 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
	}

	// 【】第二步:計算紋理特徵
	// 特徵值計算—— double& Asm, double& Con, double& Ent, double& Idm
	void feature_computer(Mat&src, float& Asm, float& Con, float& Ent, float& Idm)//計算特徵值
	{
		int height = src.rows;
		int width = src.cols;
		int total = 0;
		for (int i = 0; i < height; i++)
		{
			int*srcdata = src.ptr<int>(i);
			for (int j = 0; j < width; j++)
			{
				total += srcdata[j];//求圖像所有像素的灰度值的和
			}
		}

		Mat copy;
		copy.create(height, width, CV_64FC1);
		for (int i = 0; i < height; i++)
		{
			int*srcdata = src.ptr<int>(i);
			float*copydata = copy.ptr<float>(i);
			for (int j = 0; j < width; j++)
			{
				copydata[j] = (float)srcdata[j] / (float)total;//圖像每一個像素的的值除以像素總和
			}
		}


		for (int i = 0; i < height; i++)
		{
			float*srcdata = copy.ptr<float>(i);
			for (int j = 0; j < width; j++)
			{
				Asm += srcdata[j] * srcdata[j];								//能量
				if (srcdata[j]>0)
				{
					Ent -= srcdata[j] * log(srcdata[j]);					//熵   
				}
				Con += (float)(i - j)*(float)(i - j)*srcdata[j];			//對比度
				Idm += srcdata[j] / (1 + (float)(i - j)*(float)(i - j));	//逆差矩
			}
		}
	}

private:

};

MyGLCM::MyGLCM()
{
}

MyGLCM::~MyGLCM()
{
}

C++ Opencv 同態濾波

 

/*
#include <opencv2\core\core.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\imgproc\imgproc.hpp>
#include <opencv2\ml\ml.hpp>
#include <iostream>

using namespace cv;
using namespace std;	
*/
imshow("原圖", src);
int M = getOptimalDFTSize(src.rows);
int N = getOptimalDFTSize(src.cols);

resize(src, src, Size(N, M));
src.convertTo(src, CV_64FC1);//CV_64FC1:每一個像素點元素佔64位浮點數,通道數爲1
dst.convertTo(dst, CV_64FC1);
	
//0. 對圖片進行歸一化
src = src / 255.0;
	
//1. ln
/*
使用ptr操作srcImg,返回的值寫入 srcImg
*/
for (int i = 0; i < src.rows; i++)
{
	double* srcdata = src.ptr<double>(i);
	double* logdata = src.ptr<double>(i);
	for (int j = 0; j < src.cols; j++)
	{
		logdata[j] = log(srcdata[j] + 1);
	}
}
imshow("對數計算結果", src);

//離散餘弦變換(同傅里葉變換)
//2. dct:Discrete Cosine Transform
/*
返回 mat_dct
*/
Mat mat_dct = Mat::zeros(src.rows, src.cols, CV_64FC1);
dct(src, mat_dct);
imshow("離散餘弦變換", mat_dct);

//3. linear filter
/*
頻域濾波器
*/
Mat H_u_v;
double gammaH = 1.5;//1.5 
double gammaL = 0.5;//0.5
double C = 1;
double d0 = 10;//(srcImg.rows / 2)*(srcImg.rows / 2) + (srcImg.cols / 2)*(srcImg.cols / 2);
double d2 = 0;
H_u_v = Mat::zeros(src.rows, src.cols, CV_64FC1);

double totalWeight = 0.0;
for (int i = 0; i < src.rows; i++)
{
	double * dataH_u_v = H_u_v.ptr<double>(i);
	for (int j = 0; j < src.cols; j++)
	{
		d2 = pow((i), 2.0) + pow((j), 2.0);
		dataH_u_v[j] = (gammaH - gammaL)*(1 - exp(-C*d2 / d0)) + gammaL;
		totalWeight += dataH_u_v[j];
	}
}
H_u_v.ptr<double>(0)[0] = 1.1;

imshow("頻域濾波器", H_u_v);
imshow("頻域濾波前", mat_dct);

mat_dct = mat_dct.mul(H_u_v);
imshow("頻域濾波", mat_dct);
		

//4. idct
/*
離散餘弦反變換
*/
idct(mat_dct, dst);
imshow("離散餘弦反變換",dst);

	
// 5. 指數變換
for (int i = 0; i < src.rows; i++)
{
	double* srcdata = dst.ptr<double>(i);
	double* dstdata = dst.ptr<double>(i);
	for (int j = 0; j < src.cols; j++)
	{
		dstdata[j] = exp(srcdata[j]);
	}
}	
imshow("指數計算結果", dst);
	

// 6.數據恢復
normalize(dst, dst, 0, 255, CV_MINMAX);
convertScaleAbs(dst, dst);
dst.convertTo(dst, CV_8UC1);
for (int i = 0; i < dst.rows; i++)
{
	cout << dst.at<uchar>(i) << endl;
}
imshow("數據恢復", dst);	
}

C++ Opencv hsv的h直方圖

#include<opencv2/opencv.hpp>
#include<iostream>
#include<vector>


using namespace cv;
using namespace std;


/*
如何調用
Mat src, histimg = Mat::zeros(540, 540, CV_8UC3);
getHistImg(src, histimg);
*/
void getHistImg(const Mat src, Mat &histimg) {
Mat hue, hist;


int hsize = 16;//直方圖bin的個數
float hranges[] = { 0, 180 };
const float* phranges = hranges;


int ch[] = { 0, 0 };
hue.create(src.size(), src.depth());
mixChannels(&src, 1, &hue, 1, ch, 1);//得到H分量


calcHist(&hue, 1, 0, Mat(), hist, 1, &hsize, &phranges);


normalize(hist, hist, 0, 255, NORM_MINMAX);


histimg = Scalar::all(0);
int binW = histimg.cols / hsize;
Mat buf(1, hsize, CV_8UC3);
for (int i = 0; i < hsize; i++)
buf.at<Vec3b>(i) = Vec3b(saturate_cast<uchar>(i*180. / hsize), 255, 255);
cvtColor(buf, buf, COLOR_HSV2BGR);


for (int i = 0; i < hsize; i++)
{
int val = saturate_cast<int>(hist.at<float>(i)*histimg.rows / 255);
rectangle(histimg, Point(i*binW, histimg.rows),
Point((i + 1)*binW, histimg.rows - val),
Scalar(buf.at<Vec3b>(i)), -1, 8);
}
}
int main()
{
//Mat srcImage = imread("C:\\Users\\Administrator\\Desktop\\b_亮度不同\\B\\01\\菇頭B_3.bmp");



//【1】載入圖片
Mat srcImage = imread("C:\\Users\\Administrator\\Desktop\\b_亮度不同\\B\\01\\菇頭B_3.bmp");
Mat src, histimg = Mat::zeros(540, 540, CV_8UC3);


// 載入圖片
src = imread("C:\\Users\\Administrator\\Desktop\\b_亮度不同\\B\\01\\菇頭B_3.bmp");
if (!src.data)
{
cout << "load image failed" << endl;
return -1;

}



//轉成hsv模型

cvtColor(src, src, CV_BGR2HSV);


// 調用
getHistImg(src, histimg);


imshow("histImage", histimg);
imshow("srcImage", src);



waitKey(0);
system("pause");


return 0;



}

 

C++ Opencv HSV H、S、V直方圖

#include<opencv2/opencv.hpp>
#include<iostream>
#include<vector>

using namespace cv;
using namespace std;

int main()
{
//Mat srcImage = imread("C:\\Users\\Administrator\\Desktop\\b_亮度不同\\B\\01\\菇頭B_3.bmp");
//Mat hsvImage;


//Mat srcImage = imread("C:\\Users\\Administrator\\Desktop\\b_亮度不同\\B\\01\\菇頭B_3.bmp");


//打開測試圖  
IplImage * image = cvLoadImage("C:\\Users\\Administrator\\Desktop\\b_亮度不同\\B\\01\\菇頭B_3.bmp", 1);           //將本地測試圖導入到程序堆中  
if (image == NULL){                                     //判斷是否打開成功  
printf("錯誤:無法打開該圖像,圖像文件路徑不正確!");
return -1;
}
//RGB顏色空間到HSV顏色空間
//創建一張空白圖像用於存儲轉換成HSV顏色空間後的圖像  
IplImage *image1 = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);  //注意圖像必須和輸入圖像的size,顏色位深度,通道一致  
cvZero(image1); //清空image_data數據  


//顏色空間轉換  
cvCvtColor(image, image1, CV_BGR2HSV);//CV_BGR2HSV 


//創建存儲HSV通道圖像  
IplImage *image_h = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);//注意,必須是單通道圖像  
IplImage *image_s = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);//注意,必須是單通道圖像  
IplImage *image_v = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);//注意,必須是單通道圖像  


//分離通道  
cvSplit(image, image_h, image_s, image_v, NULL); //注意Opencv中hsv沒有順序問題


//創建H通道的直方圖  
int arr_size_h = 255;                 //定義一個變量用於表示直方圖行寬  
float hranges_arr_h[] = { 0, 180 };       //圖像方塊範圍數組  
float *phranges_arr_h = hranges_arr_h;      //cvCreateHist參數是一個二級指針,所以要用指針指向數組然後傳參  
CvHistogram *hist_h = cvCreateHist(1, &arr_size_h, CV_HIST_ARRAY, &phranges_arr_h, 1);    //創建一個一維的直方圖,行寬爲255,多維密集數組,方塊範圍爲0-180,bin均化 


//創建S通道的直方圖  
int arr_size_s = 255;                 //定義一個變量用於表示直方圖行寬  
float hranges_arr_s[] = { 0, 255 };       //圖像方塊範圍數組  
float *phranges_arr_s = hranges_arr_s;      //cvCreateHist參數是一個二級指針,所以要用指針指向數組然後傳參  
CvHistogram *hist_s = cvCreateHist(1, &arr_size_s, CV_HIST_ARRAY, &phranges_arr_s, 1);    //創建一個一維的直方圖,行寬爲255,多維密集數組,方塊範圍爲0-255,bin均化 


//創建V通道的直方圖  
int arr_size_v = 255;                 //定義一個變量用於表示直方圖行寬  
float hranges_arr_v[] = { 0, 255 };       //圖像方塊範圍數組  
float *phranges_arr_v = hranges_arr_v;      //cvCreateHist參數是一個二級指針,所以要用指針指向數組然後傳參  
CvHistogram *hist_v = cvCreateHist(1, &arr_size_v, CV_HIST_ARRAY, &phranges_arr_v, 1);    //創建一個一維的直方圖,行寬爲255,多維密集數組,方塊範圍爲0-255,bin均化 


//計算H通道的直方圖大小
cvCalcHist(&image_h, hist_h, 0, 0);


//計算S通道的直方圖大小
cvCalcHist(&image_s, hist_s, 0, 0);


//計算V通道的直方圖大小
cvCalcHist(&image_v, hist_v, 0, 0);


//H通道的直方圖縮小
float max_val_h;  //用於存儲獲取到的最大值
cvGetMinMaxHistValue(hist_h, 0, &max_val_h, 0, 0); //獲取直方圖最大值  
cvConvertScale(hist_h->bins, hist_h->bins, max_val_h ? 180 / max_val_h : 0., 0);  //按比例縮小直方圖  


//S通道的直方圖縮小
float max_val_s;  //用於存儲獲取到的最大值
cvGetMinMaxHistValue(hist_s, 0, &max_val_s, 0, 0); //獲取直方圖最大值  
cvConvertScale(hist_s->bins, hist_s->bins, max_val_s ? 255 / max_val_s : 0., 0);  //按比例縮小直方圖  


//V通道的直方圖縮小
float max_val_v;  //用於存儲獲取到的最大值
cvGetMinMaxHistValue(hist_v, 0, &max_val_v, 0, 0); //獲取直方圖最大值  
cvConvertScale(hist_v->bins, hist_v->bins, max_val_v ? 255 / max_val_v : 0., 0);  //按比例縮小直方圖  


//創建一個空白圖像用於繪製直方圖  
IplImage *histimg = cvCreateImage(cvSize(320, 200), 8, 3);
cvZero(histimg);    //清空histimag-imagedata數據  


//開始繪製H通道的直方圖  
int bin_h;
bin_h = histimg->width / arr_size_h; //得到開始繪製點位置  


for (int i = 0; i < arr_size_h; i++)
{
double val = (cvGetReal1D(hist_h->bins, i)*histimg->height / 360);//獲取矩陣元素值,並轉換爲對應高度  
CvScalar color = CV_RGB(255, 0, 0);
cvRectangle(histimg, cvPoint(i*bin_h, histimg->height), cvPoint((i + 1)*bin_h, (int)(histimg->height - val)), color, 1, 8, 0);
}


//創建一個空白圖像用於繪製直方圖  
IplImage *sistimg = cvCreateImage(cvSize(320, 200), 8, 3);
cvZero(sistimg);    //清空histimag-imagedata數據  


//開始繪製S通道的直方圖  
int bin_s;
bin_s = sistimg->width / arr_size_s; //得到開始繪製點位置  


for (int i = 0; i < arr_size_s; i++)
{
double val = (cvGetReal1D(hist_s->bins, i)*sistimg->height / 255);//獲取矩陣元素值,並轉換爲對應高度  
CvScalar color = CV_RGB(0, 255, 0);
cvRectangle(sistimg, cvPoint(i*bin_s, sistimg->height), cvPoint((i + 1)*bin_s, (int)(sistimg->height - val)), color, 1, 8, 0);
}


//創建一個空白圖像用於繪製直方圖  
IplImage *vistimg = cvCreateImage(cvSize(320, 200), 8, 3);
cvZero(vistimg);    //清空histimag-imagedata數據 


//開始繪製V通道的直方圖  
int bin_v;
bin_v = vistimg->width / arr_size_v; //得到開始繪製點位置  


for (int i = 0; i < arr_size_v; i++)
{
double val = (cvGetReal1D(hist_v->bins, i)*vistimg->height / 255);//獲取矩陣元素值,並轉換爲對應高度  
CvScalar color = CV_RGB(0, 0, 255);
cvRectangle(vistimg, cvPoint(i*bin_v, vistimg->height), cvPoint((i + 1)*bin_v, (int)(vistimg->height - val)), color, 1, 8, 0);
}


//顯示圖像
cvNamedWindow("image_hsv", 0);
cvNamedWindow("H", 0);
cvNamedWindow("S", 0);
cvNamedWindow("V", 0);
cvShowImage("image_hsv", image1);
cvShowImage("H", histimg);
cvShowImage("S", sistimg);
cvShowImage("V", vistimg);
cvWaitKey(0);//message


return 0;

}

C++ ATL::CImage和Mat的轉換

#include <iostream>
#include <opencv2\core\core.hpp>
#include <opencv2\imgproc\imgproc.hpp>
#include <opencv2\opencv.hpp>
#include "afxwin.h"
using namespace cv;
using namespace std;
class Mat_CImage
{
public:
	void MatToCImage(Mat &mat, ATL::CImage &cImage)
	{
		//create new CImage
		int width = mat.cols;
		int height = mat.rows;
		int channels = mat.channels();

		cImage.Destroy(); //clear
		cImage.Create(width, height, 8 * channels); //默認圖像像素單通道佔用1個字節

		//copy values
		uchar* ps;
		uchar* pimg = (uchar*)cImage.GetBits(); //A pointer to the bitmap buffer
		int step = cImage.GetPitch();

		for (int i = 0; i < height; ++i)
		{
			ps = (mat.ptr<uchar>(i));
			for (int j = 0; j < width; ++j)
			{
				if (channels == 1) //gray
				{
					*(pimg + i*step + j) = ps[j];
				}
				else if (channels == 3) //color
				{
					for (int k = 0; k < 3; ++k)
					{
						*(pimg + i*step + j * 3 + k) = ps[j * 3 + k];
					}
				}
			}
		}
	}
	void CImageToMat(ATL::CImage& cimage, Mat& mat)
	{
		if (true == cimage.IsNull())
		{
			return;
		}
		int nChannels = cimage.GetBPP() / 8;
		if ((1 != nChannels) && (3 != nChannels))
		{
			return;
		}
		int nWidth = cimage.GetWidth();
		int nHeight = cimage.GetHeight();
		//重建mat
		if (1 == nChannels)
		{
			mat.create(nHeight, nWidth, CV_8UC1);
		}
		else if (3 == nChannels)
		{
			mat.create(nHeight, nWidth, CV_8UC3);
		}
		//拷貝數據
		uchar* pucRow;//指向數據區的行指針
		uchar* pucImage = (uchar*)cimage.GetBits();//指向數據區的指針
		int nStep = cimage.GetPitch();//每行的字節數,注意這個返回值有正有負
		for (int nRow = 0; nRow < nHeight; nRow++)
		{
			pucRow = (mat.ptr<uchar>(nRow));
			for (int nCol = 0; nCol < nWidth; nCol++)
			{
				if (1 == nChannels)
				{
					pucRow[nCol] = *(pucImage + nRow * nStep + nCol);
				}
				else if (3 == nChannels)
				{
					for (int nCha = 0; nCha < 3; nCha++)
					{
						pucRow[nCol * 3 + nCha] = *(pucImage + nRow * nStep + nCol * 3 + nCha);
					}
				}
			}
		}
	}
};

C++ Opencv 圖像目標分割

#include <opencv2/opencv.hpp>
#include <opencv2\core.hpp>
#include <iostream>

using namespace cv;
using namespace std;

/*
如何使用
輸入:src
輸出: Mat dst, edge;
Water_Cut(Mat& src, OutputArray& dst, OutputArray& edge)
*/
void Water_Cut(Mat& src, OutputArray& dst, OutputArray& edge)
{
	Mat srcImage = src.clone();
	//src.copyTo(srcImage);
	//cv::resize(srcImage, srcImage, Size(srcImage.cols / 2, srcImage.rows / 2));
	//namedWindow("resImage", 0);
	//imshow("resImage", srcImage);
	//waitKey();
	// 【mask兩點】	
	//mask的第一點 maskImage
	Mat maskImage;
	maskImage = Mat(srcImage.size(), CV_8UC1);  // 掩模,在上面做標記,然後傳給findContours
	maskImage = Scalar::all(0);
	Point point1(0, 0), point2(10, 10);
	line(maskImage, point1, point2, Scalar::all(255), 5, 8, 0);

	//mask的第二點 maskImage
	Point point3(srcImage.cols / 2, srcImage.rows / 2), point4(srcImage.cols / 2, srcImage.rows / 2);
	line(maskImage, point3, point4, Scalar::all(255), 5, 8, 0);

	// 【輪廓】
	vector<vector<Point>> contours;
	vector<Vec4i> hierarchy;
	findContours(maskImage, contours, hierarchy, RETR_CCOMP, CHAIN_APPROX_SIMPLE);

	// 【分水嶺】
	// 參數二:maskWaterShed(CV_32S)
	Mat maskWaterShed;  // watershed()函數的參數
	maskWaterShed = Mat(maskImage.size(), CV_32S);//空白掩碼	maskWaterShed
	maskWaterShed = Scalar::all(0);

	/* 在maskWaterShed上繪製輪廓 */
	for (int index = 0; index < contours.size(); index++)
		drawContours(maskWaterShed, contours, index, Scalar::all(index + 1), -1, 8, hierarchy, INT_MAX);

	/* 如果imshow這個maskWaterShed,我們會發現它是一片黑,原因是在上面我們只給它賦了1,2,3這樣的值,通過代碼80行的處理我們才能清楚的看出結果 */
	// 參數一:srcImage(CV_8UC3)
	watershed(srcImage, maskWaterShed);  //int index = maskWaterShed.at<int>(row, col);操作

	// 【隨機生成幾種顏色】
	vector<Vec3b> colorTab;
	for (int i = 0; i < contours.size(); i++)
	{
		int b = theRNG().uniform(0, 255);
		int g = theRNG().uniform(0, 255);
		int r = theRNG().uniform(0, 255);

		colorTab.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
	}
	Mat dst_ = Mat::zeros(maskWaterShed.size(), CV_8UC3);
	Mat dst_edge = Mat::zeros(maskWaterShed.size(), CV_8UC3);
	int index = maskWaterShed.at<int>(maskWaterShed.rows / 2, maskWaterShed.cols / 2);
	int index_temp = 0;
	for (int i = 0; i < maskWaterShed.rows; i++)
	{
		for (int j = 0; j < maskWaterShed.cols; j++)
		{
			index_temp = maskWaterShed.at<int>(i, j);
			//cout << index_temp << endl;
			if (index_temp == index)//取中心的標籤區域
			{
				dst_edge.at<Vec3b>(i, j) = Vec3b((uchar)255, (uchar)255, (uchar)255); //colorTab[index - 1];
				dst_.at<Vec3b>(i, j) = srcImage.at<Vec3b>(i, j);
			}
		}
	}
	//namedWindow("分割結果", 0);
	//imshow("分割結果", dst_);

	/*Mat dst_add;
	addWeighted(dst_edge, 0.3, srcImage, 0.7, 0, dst_add);
	namedWindow("加權結果", 0);
	imshow("加權結果", dst_add);*/

	//namedWindow("邊緣區域", 0);
	//imshow("邊緣區域", dst_edge);
	//imwrite("方格.bmp", dst_edge);
	dst_.copyTo(dst);
	dst_edge.copyTo(edge);
}

C++ Opencv 特徵Feature.h{顏色、形狀、紋理}

#include <opencv2/opencv.hpp>
#include <opencv2\core.hpp>
#include <iostream>
#include <vector>
#include "time.h"

using namespace cv;
using namespace std;

class Feature
{
public:

	/*
	第一步:建立類
	#include <opencv2/opencv.hpp>
	#include <iostream>
	#include <vector>
	#include "time.h"
	using namespace cv;
	using namespace std;

	第二步:包含類
	Feature feature_class;
	第三步:
	集合顏色+形狀+紋理
	// 圖像特徵_HU
	Mat hu_dst = dst.clone();
	double Hu[7] = { 0 };
	feature_class.feature_hu(hu_dst, Hu);
	// 圖像特徵_COLOR
	Mat color_dst = dst.clone();
	float Mom[9] = { 0 };
	feature_class.feature_color(color_dst, Mom);
	// 圖像特徵_GLCM
	Mat glcm_dst = dst.clone();
	cv::cvtColor(glcm_dst, glcm_dst, CV_RGB2GRAY);
	float glcm_data[16] = { 0 };
	feature_class.feature_glcm(glcm_dst, glcm_data);
	第四步:
	// 特徵集合7+9+16
	float test_data[32] = { 0 };
	for (size_t j = 0; j < 7; j++)
	{
	test_data[j] = (float)Hu[j];
	}
	for (size_t j = 0; j < 9; j++)
	{
	test_data[7 + j] = (float)Mom[j];
	}
	for (size_t j = 0; j < 16; j++)
	{
	test_data[16 + j] = (float)glcm_data[j];
	}
	*/
	/* —————————————————————————【顏色】———————————————————— */
	// 顏色 計算三階矩
	double calc3orderMom(Mat &channel)  //計算三階矩
	{
		uchar *p;
		double mom = 0;
		double m = mean(channel)[0];    //計算單通道圖像的均值
		int nRows = channel.rows;
		int nCols = channel.cols;
		if (channel.isContinuous())     //連續存儲有助於提升圖像掃描速度
		{
			nCols *= nRows;
			nRows = 1;
		}
		for (int i = 0; i < nRows; i++) //計算立方和
		{
			p = channel.ptr<uchar>(i);
			for (int j = 0; j < nCols; j++)
				mom += pow((p[j] - m), 3);
		}
		float temp;
		temp = cvCbrt((float)(mom / (nRows*nCols)));    //求均值的立方根
		mom = (double)temp;
		return mom;
	}
	// 顏色 計算9個顏色矩:3個通道的1、2、3階矩
	double *colorMom(Mat &img)
	{
		double *Mom = new double[9];    //存放9個顏色矩
		if (img.channels() != 3)
			std::cout << "Error,input image must be a color image" << endl;
		Mat b(img.rows, img.cols, CV_8U);
		Mat r(img.rows, img.cols, CV_8U);
		Mat g(img.rows, img.cols, CV_8U);
		Mat channels[] = { b, g, r };
		split(img, channels);
		//cv::imshow("r", channels[0]);
		//cv::imshow("g", channels[1]);
		//cv::imshow("b", channels[2]);
		//waitKey(0);
		Mat tmp_m, tmp_sd;
		//計算b通道的顏色矩
		meanStdDev(b, tmp_m, tmp_sd);
		Mom[0] = tmp_m.at<double>(0, 0);
		Mom[3] = tmp_sd.at<double>(0, 0);
		Mom[6] = calc3orderMom(b);
		//  cout << Mom[0] << " " << Mom[1] << " " << Mom[2] << " " << endl;
		//計算g通道的顏色矩
		meanStdDev(g, tmp_m, tmp_sd);
		Mom[1] = tmp_m.at<double>(0, 0);
		Mom[4] = tmp_sd.at<double>(0, 0);
		Mom[7] = calc3orderMom(g);
		//  cout << Mom[3] << " " << Mom[4] << " " << Mom[5] << " " << endl;
		//計算r通道的顏色矩
		meanStdDev(r, tmp_m, tmp_sd);
		Mom[2] = tmp_m.at<double>(0, 0);
		Mom[5] = tmp_sd.at<double>(0, 0);
		Mom[8] = calc3orderMom(r);
		//  cout << Mom[6] << " " << Mom[7] << " " << Mom[8] << " " << endl;
		return Mom;//返回顏色矩數組
	}
	// 顏色 
	bool feature_color(Mat src, float Mom[9])
	{
		if (src.channels() == 3)
		{
			// 圖像特徵_COLOR
			Mat color_dst = src.clone();
			cv::cvtColor(color_dst, color_dst, CV_RGB2HSV);
			double *MOM;
			MOM = colorMom(color_dst);
			for (int i = 0; i < 9; i++)
			{
				std::cout << (float)MOM[i] << endl;
				Mom[i] = (float)MOM[i];
			}
			return  true;
		}
		else
		{
			std::cout << "channels!=3";
			return false;
		}
	}


	/* —————————————————————————【形狀】———————————————————— */
	bool feature_hu(Mat src, double Hu[7])
	{
		if (src.channels() == 3)
		{
			// 圖像特徵_HU
			Mat hu_dst = src.clone();
			Mat kernel(3, 3, CV_32F, Scalar(-1));			
			cv::cvtColor(hu_dst, hu_dst, CV_RGB2GRAY);
			Canny(hu_dst, hu_dst, 0, 120);
			namedWindow("hu_dst", 0);
			imshow("hu_dst", hu_dst);
			waitKey(10);
			//double Hu[7];       //存儲得到的Hu矩陣
			Moments mo = moments(hu_dst);//矩變量
			HuMoments(mo, Hu);
			for (int i = 0; i < 7; i++)
			{
				std::cout << (float)Hu[i] << endl;
			}
			return true;
		}
		else if ((src.channels() == 1))
		{
			Mat hu_dst = src.clone();
			Canny(hu_dst, hu_dst, 0, 120);
			//double Hu[7];       //存儲得到的Hu矩陣
			Moments mo = moments(hu_dst);//矩變量
			HuMoments(mo, Hu);
			for (int i = 0; i < 7; i++)
			{
				std::cout << (float)Hu[i] << endl;
			}
			return true;
		}
		else
		{
			return false;
		}
	}


	/* —————————————————————————【紋理】————————————————————*/
	const int gray_level = 16;//紋理區域塊的大小,通常將圖像劃分成若干個紋理塊計算
	vector<double> glamvalue;//全局變量

	//【】第一步:j計算共生矩陣
	void getglcm_0(Mat& input, Mat& dst)//0度灰度共生矩陣
	{
		Mat src = input;
		CV_Assert(1 == src.channels());
		src.convertTo(src, CV_32S);
		int height = src.rows;
		int width = src.cols;
		int max_gray_level = 0;
		for (int j = 0; j < height; j++)//尋找像素灰度最大值
		{
			int* srcdata = src.ptr<int>(j);
			for (int i = 0; i < width; i++)
			{
				if (srcdata[i] > max_gray_level)
				{
					max_gray_level = srcdata[i];
				}
			}
		}
		max_gray_level++;//像素灰度最大值加1即爲該矩陣所擁有的灰度級數
		if (max_gray_level > 16)//若灰度級數大於16,則將圖像的灰度級縮小至16級,減小灰度共生矩陣的大小。
		{
			for (int i = 0; i < height; i++)
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width; j++)
				{
					srcdata[j] = (int)srcdata[j] / gray_level;
				}
			}

			dst.create(gray_level, gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height; i++)
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width - 1; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata[j + 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
		else//若灰度級數小於16,則生成相應的灰度共生矩陣
		{
			dst.create(max_gray_level, max_gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height; i++)
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width - 1; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata[j + 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
	}

	void getglcm_45(Mat& input, Mat& dst)//45度灰度共生矩陣
	{
		Mat src = input;
		CV_Assert(1 == src.channels());
		src.convertTo(src, CV_32S);
		int height = src.rows;
		int width = src.cols;
		int max_gray_level = 0;
		for (int j = 0; j < height; j++)
		{
			int* srcdata = src.ptr<int>(j);
			for (int i = 0; i < width; i++)
			{
				if (srcdata[i] > max_gray_level)
				{
					max_gray_level = srcdata[i];
				}
			}
		}
		max_gray_level++;
		if (max_gray_level > 16)
		{
			for (int i = 0; i < height; i++)//將圖像的灰度級縮小至16級,減小灰度共生矩陣的大小。
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width; j++)
				{
					srcdata[j] = (int)srcdata[j] / gray_level;
				}
			}

			dst.create(gray_level, gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 0; j < width - 1; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j + 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
		else
		{
			dst.create(max_gray_level, max_gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 0; j < width - 1; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j + 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
	}

	void getglcm_90(Mat& input, Mat& dst)//90度灰度共生矩陣
	{
		Mat src = input;
		CV_Assert(1 == src.channels());
		src.convertTo(src, CV_32S);
		int height = src.rows;
		int width = src.cols;
		int max_gray_level = 0;
		for (int j = 0; j < height; j++)
		{
			int* srcdata = src.ptr<int>(j);
			for (int i = 0; i < width; i++)
			{
				if (srcdata[i] > max_gray_level)
				{
					max_gray_level = srcdata[i];
				}
			}
		}
		max_gray_level++;
		if (max_gray_level > 16)
		{
			for (int i = 0; i < height; i++)//將圖像的灰度級縮小至16級,減小灰度共生矩陣的大小。
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width; j++)
				{
					srcdata[j] = (int)srcdata[j] / gray_level;
				}
			}

			dst.create(gray_level, gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 0; j < width; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
		else
		{
			dst.create(max_gray_level, max_gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 0; j < width; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
	}

	void getglcm_135(Mat& input, Mat& dst)//135度灰度共生矩陣
	{
		Mat src = input;
		CV_Assert(1 == src.channels());
		src.convertTo(src, CV_32S);
		int height = src.rows;
		int width = src.cols;
		int max_gray_level = 0;
		for (int j = 0; j < height; j++)
		{
			int* srcdata = src.ptr<int>(j);
			for (int i = 0; i < width; i++)
			{
				if (srcdata[i] > max_gray_level)
				{
					max_gray_level = srcdata[i];
				}
			}
		}
		max_gray_level++;
		if (max_gray_level > 16)
		{
			for (int i = 0; i < height; i++)//將圖像的灰度級縮小至16級,減小灰度共生矩陣的大小。
			{
				int*srcdata = src.ptr<int>(i);
				for (int j = 0; j < width; j++)
				{
					srcdata[j] = (int)srcdata[j] / gray_level;
				}
			}

			dst.create(gray_level, gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 1; j < width; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j - 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
		else
		{
			dst.create(max_gray_level, max_gray_level, CV_32SC1);
			dst = Scalar::all(0);
			for (int i = 0; i < height - 1; i++)
			{
				int*srcdata = src.ptr<int>(i);
				int*srcdata1 = src.ptr<int>(i + 1);
				for (int j = 1; j < width; j++)
				{
					int rows = srcdata[j];
					int cols = srcdata1[j - 1];
					dst.ptr<int>(rows)[cols]++;
				}
			}
		}
	}

	// 【】第二步:計算紋理特徵	// 特徵值計算—— double& Asm, double& Con, double& Ent, double& Idm
	void feature_computer(Mat&src, float& Asm, float& Con, float& Ent, float& Idm)//計算特徵值
	{
		int height = src.rows;
		int width = src.cols;
		int total = 0;
		for (int i = 0; i < height; i++)
		{
			int*srcdata = src.ptr<int>(i);
			for (int j = 0; j < width; j++)
			{
				total += srcdata[j];//求圖像所有像素的灰度值的和
			}
		}

		Mat copy;
		copy.create(height, width, CV_64FC1);
		for (int i = 0; i < height; i++)
		{
			int*srcdata = src.ptr<int>(i);
			float*copydata = copy.ptr<float>(i);
			for (int j = 0; j < width; j++)
			{
				copydata[j] = (float)srcdata[j] / (float)total;//圖像每一個像素的的值除以像素總和
			}
		}


		for (int i = 0; i < height; i++)
		{
			float*srcdata = copy.ptr<float>(i);
			for (int j = 0; j < width; j++)
			{
				Asm += srcdata[j] * srcdata[j];								//能量
				if (srcdata[j]>0)
				{
					Ent -= srcdata[j] * log(srcdata[j]);					//熵   
				}
				Con += (float)(i - j)*(float)(i - j)*srcdata[j];			//對比度
				Idm += srcdata[j] / (1 + (float)(i - j)*(float)(i - j));	//逆差矩
			}
		}
	}

	// 【】融合第一、二步
	/*
	Mat src_gray;
	float data[16] = {0};
	*/
	void feature_glcm(Mat src_gray, float data[16])
	{
		Mat dst_0, dst_90, dst_45, dst_135;

		getglcm_0(src_gray, dst_0);
		float  asm_0 = 0, con_0 = 0, ent_0 = 0, idm_0 = 0;
		feature_computer(dst_0, asm_0, con_0, ent_0, idm_0);


		getglcm_45(src_gray, dst_45);
		float  asm_45 = 0, con_45 = 0, ent_45 = 0, idm_45 = 0;
		feature_computer(dst_45, asm_45, con_45, ent_45, idm_45);


		getglcm_90(src_gray, dst_90);
		float  asm_90 = 0, con_90 = 0, ent_90 = 0, idm_90 = 0;
		feature_computer(dst_90, asm_90, con_90, ent_90, idm_90);


		getglcm_135(src_gray, dst_135);
		float  asm_135 = 0, con_135 = 0, ent_135 = 0, idm_135 = 0;
		feature_computer(dst_135, asm_135, con_135, ent_135, idm_135);

		float AMS[4] = { asm_0, asm_45, asm_90, asm_135 };
		float COM[4] = { con_0, con_45, con_90, con_135 };
		float ENT[4] = { ent_0, ent_45, ent_90, ent_135 };
		float IDM[4] = { idm_0, idm_45, idm_90, idm_135 };

		float glcm_data[16] = {
			asm_0, asm_45, asm_90, asm_135,
			con_0, con_45, con_90, con_135,
			ent_0, ent_45, ent_90, ent_135,
			idm_0, idm_45, idm_90, idm_135
		};

		/*std::cout << "特徵數據:" << endl;*/
		for (size_t i = 0; i < 16; i++)
		{
			data[i] = glcm_data[i];
			//std::cout << data[i] << " ";
		}
	}

};

C++ vector操作

二維數組賦值給vector

vector複製給Mat

//訓練需要用到的數據
float train_data[4][4] = {
	{ 0.814973, 15.1147, 0.431797, 0.933121 },
	{ 0.95266, 3.70758, 0.139839, 0.983595 },
	{ 0.973312, 1.95185, 0.0861551, 0.991363 },
	{ 0.922384, 6.42396, 0.211111, 0.971575 }
};
int labels[4] = { 
	1,
	2,
	3,
	4 
};	
	
// 二維數組vector賦值
vector<vector<float>> vec;
for (size_t i = 0; i < 4; i++)
{
	vector<float> vec_rows; 
	for (size_t j = 0; j < 4; j++)
	{
		vec_rows.push_back(train_data[i][j]);
	}		
	vec.push_back(vec_rows);				
}		
for (auto it = vec.begin(); it != vec.end(); ++it)	// vector 行
{		
	cout << "wai" << endl;
	for (auto it2 = it->begin(); it2 != it->end(); ++it2) // vector 列
	{
		cout << "nei" << endl;
		cout << *it2 << endl;
	}
}
	
// 二維vector轉Mat
Mat result = Mat::zeros(vec.size(), 4, CV_32FC1);
MatIterator_<float> it = result.begin<float>();
for (int i = 0; i < vec.size(); i++)
{
	for (int j = 0; j < vec[0].size(); j++)
	{
		*it = vec[i][j];
		it++;
	}
}

C++ io頭文件進行zhi指定文件夾的文件名獲取+文件個數返回

// 功能:獲取文件名(files[i].c_str())和文件個數(files.size())
// #include<io.h>
// using namespace std;
/*
char* filePath = ".\\resource";
vector<string> files;

////獲取該路徑下的所有文件  
getFiles(filePath, files);

char str[30];
int size = files.size();//文件個數
for (int i = 0; i < size; i++)
{
	cout << files[i].c_str() << endl;//文件名
}
*/
void getFiles(string path, vector<string>& files)
{
	//文件句柄  
	long   hFile = 0;
	//文件信息  
	struct _finddata_t fileinfo;
	string p;
	if ((hFile = _findfirst(p.assign(path).append("\\*").c_str(), &fileinfo)) != -1)
	{
		do
		{
			//如果是目錄,迭代之  
			//如果不是,加入列表  
			if ((fileinfo.attrib &  _A_SUBDIR))
			{
				if (strcmp(fileinfo.name, ".") != 0 && strcmp(fileinfo.name, "..") != 0)
					getFiles(p.assign(path).append("\\").append(fileinfo.name), files);
			}
			else
			{
				files.push_back(p.assign(path).append("\\").append(fileinfo.name));
			}
		} while (_findnext(hFile, &fileinfo) == 0);
		_findclose(hFile);
	}
}

C++ Opencv 樣本多樣性

不同的任務背景下, 我們可以通過圖像的幾何變換, 使用以下一種或多種組合數據增強變換來增加輸入數據的量. 這裏具體的方法都來自數字圖像處理的內容, 相關的知識點介紹, 網上都有, 就不一一介紹了.

旋轉 | 反射變換(Rotation/reflection): 隨機旋轉圖像一定角度; 改變圖像內容的朝向;

翻轉變換(flip): 沿着水平或者垂直方向翻轉圖像;

縮放變換(zoom): 按照一定的比例放大或者縮小圖像;

平移變換(shift): 在圖像平面上對圖像以一定方式進行平移; 
可以採用隨機或人爲定義的方式指定平移範圍和平移步長, 沿水平或豎直方向進行平移. 改變圖像內容的位置;

尺度變換(scale): 對圖像按照指定的尺度因子, 進行放大或縮小; 或者參照SIFT特徵提取思想, 利用指定的尺度因子對圖像濾波構造尺度空間. 改變圖像內容的大小或模糊程度;

對比度變換(contrast): 在圖像的HSV顏色空間,改變飽和度S和V亮度分量,保持色調H不變. 對每個像素的S和V分量進行指數運算(指數因子在0.25到4之間), 增加光照變化;

噪聲擾動(noise): 對圖像的每個像素RGB進行隨機擾動, 常用的噪聲模式是椒鹽噪聲和高斯噪聲;

顏色變換(color): 在訓練集像素值的RGB顏色空間進行PCA, 得到RGB空間的3個主方向向量,3個特徵值, p1, p2, p3, λ1, λ2, λ3. 對每幅圖像的每個像素

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章