參考https://blog.csdn.net/qq_34914551/article/details/78916084
Opencv尋找連通域的幾何中心
其中連通域的輪廓選取用到了OTSU算法
#include "stdafx.h"
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
//otsu算法實現函數
int Otsu(Mat &image)
{
int width = image.cols;
int height = image.rows;
int x = 0, y = 0;
int pixelCount[256];
float pixelPro[256];
int i, j, pixelSum = width * height, threshold = 0;
uchar* data = (uchar*)image.data;
//初始化
for (i = 0; i < 256; i++)
{
pixelCount[i] = 0;
pixelPro[i] = 0;
}
//統計灰度級中每個像素在整幅圖像中的個數
for (i = y; i < height; i++)
{
for (j = x; j<width; j++)
{
pixelCount[data[i * image.step + j]]++;
}
}
//計算每個像素在整幅圖像中的比例
for (i = 0; i < 256; i++)
{
pixelPro[i] = (float)(pixelCount[i]) / (float)(pixelSum);
}
//經典ostu算法,得到前景和背景的分割
//遍歷灰度級[0,255],計算出方差最大的灰度值,爲最佳閾值
float w0, w1, u0tmp, u1tmp, u0, u1, u, deltaTmp, deltaMax = 0;
for (i = 0; i < 256; i++)
{
w0 = w1 = u0tmp = u1tmp = u0 = u1 = u = deltaTmp = 0;
for (j = 0; j < 256; j++)
{
if (j <= i) //背景部分
{
//以i爲閾值分類,第一類總的概率
w0 += pixelPro[j];
u0tmp += j * pixelPro[j];
}
else //前景部分
{
//以i爲閾值分類,第二類總的概率
w1 += pixelPro[j];
u1tmp += j * pixelPro[j];
}
}
u0 = u0tmp / w0; //第一類的平均灰度
u1 = u1tmp / w1; //第二類的平均灰度
u = u0tmp + u1tmp; //整幅圖像的平均灰度
//計算類間方差
deltaTmp = w0 * (u0 - u)*(u0 - u) + w1 * (u1 - u)*(u1 - u);
//找出最大類間方差以及對應的閾值
if (deltaTmp > deltaMax)
{
deltaMax = deltaTmp;
threshold = i;
}
}
//返回最佳閾值;
return threshold;
}
int main()
{
Mat matSrc = imread("1.png", 0);
GaussianBlur(matSrc, matSrc, Size(5, 5), 0);
vector<vector<Point> > contours;//contours的類型,雙重的vector
vector<Vec4i> hierarchy;//Vec4i是指每一個vector元素中有四個int型數據。
//閾值
threshold(matSrc, matSrc, 100, 255, THRESH_BINARY);
imshow("threshold", matSrc);
//尋找輪廓,這裏注意,findContours的輸入參數要求是二值圖像,二值圖像的來源大致有兩種,第一種用threshold,第二種用canny
findContours(matSrc.clone(), contours, hierarchy,CV_RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point(0, 0));
/// 計算矩
vector<Moments> mu(contours.size());
for (int i = 0; i < contours.size(); i++)
{
mu[i] = moments(contours[i], false);
}
/// 計算中心矩:
vector<Point2f> mc(contours.size());
for (int i = 0; i < contours.size(); i++)
{
mc[i] = Point2f(mu[i].m10 / mu[i].m00, mu[i].m01 / mu[i].m00);
}
/// 繪製輪廓
Mat drawing = Mat::zeros(matSrc.size(), CV_8UC1);
for (int i = 0; i < contours.size(); i++)
{
Scalar color = Scalar(255);
drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point());
circle(drawing, mc[i], 4, Scalar(128), -1, 8, 0); //中心用灰點表示
}
imshow("outImage",drawing);
waitKey();
return 0;
}
篩選最大連通域中心
#include "stdafx.h"
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
//otsu算法實現函數
int Otsu(Mat &image)
{
int width = image.cols;
int height = image.rows;
int x = 0, y = 0;
int pixelCount[256];
float pixelPro[256];
int i, j, pixelSum = width * height, threshold = 0;
uchar* data = (uchar*)image.data;
//初始化
for (i = 0; i < 256; i++)
{
pixelCount[i] = 0;
pixelPro[i] = 0;
}
//統計灰度級中每個像素在整幅圖像中的個數
for (i = y; i < height; i++)
{
for (j = x; j<width; j++)
{
pixelCount[data[i * image.step + j]]++;
}
}
//計算每個像素在整幅圖像中的比例
for (i = 0; i < 256; i++)
{
pixelPro[i] = (float)(pixelCount[i]) / (float)(pixelSum);
}
//經典ostu算法,得到前景和背景的分割
//遍歷灰度級[0,255],計算出方差最大的灰度值,爲最佳閾值
float w0, w1, u0tmp, u1tmp, u0, u1, u, deltaTmp, deltaMax = 0;
for (i = 0; i < 256; i++)
{
w0 = w1 = u0tmp = u1tmp = u0 = u1 = u = deltaTmp = 0;
for (j = 0; j < 256; j++)
{
if (j <= i) //背景部分
{
//以i爲閾值分類,第一類總的概率
w0 += pixelPro[j];
u0tmp += j * pixelPro[j];
}
else //前景部分
{
//以i爲閾值分類,第二類總的概率
w1 += pixelPro[j];
u1tmp += j * pixelPro[j];
}
}
u0 = u0tmp / w0; //第一類的平均灰度
u1 = u1tmp / w1; //第二類的平均灰度
u = u0tmp + u1tmp; //整幅圖像的平均灰度
//計算類間方差
deltaTmp = w0 * (u0 - u)*(u0 - u) + w1 * (u1 - u)*(u1 - u);
//找出最大類間方差以及對應的閾值
if (deltaTmp > deltaMax)
{
deltaMax = deltaTmp;
threshold = i;
}
}
//返回最佳閾值;
return threshold;
}
int main()
{
Mat matSrc = imread("1.png", 0);
GaussianBlur(matSrc, matSrc, Size(5, 5), 0);
vector<vector<Point> > contours;//contours的類型,雙重的vector
vector<Vec4i> hierarchy;//Vec4i是指每一個vector元素中有四個int型數據。
//閾值
threshold(matSrc, matSrc, 100, 255, THRESH_BINARY);
imshow("threshold", matSrc);
//尋找輪廓,這裏注意,findContours的輸入參數要求是二值圖像,二值圖像的來源大致有兩種,第一種用threshold,第二種用canny
findContours(matSrc.clone(), contours, hierarchy,CV_RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point(0, 0));
//計算最大連通域中心點
int index;
double area, maxArea(0);
for (int i = 0; i < contours.size(); i++)
{
area = contourArea(Mat(contours[i]));
if (area > maxArea)
{
maxArea = area;
index = i;
}
}
// 計算矩
vector<Moments> mu(contours.size());
mu[index] = moments(contours[index], false);
// 計算中心矩:
vector<Point2f> mc(contours.size());
mc[index] = Point2f(mu[index].m10 / mu[index].m00, mu[index].m01 / mu[index].m00);
// 繪製輪廓
Mat drawing = Mat::zeros(matSrc.size(), CV_8UC1);
Scalar color = Scalar(255);
drawContours(drawing, contours, index, color, 2, 8, hierarchy, 0, Point());
circle(drawing, mc[index], 4, Scalar(128), -1, 8, 0); //中心用灰點表示
imshow("outImage",drawing);
waitKey();
return 0;
}