傳送帶視頻流圖像拼接算法改進

接上篇:https://blog.csdn.net/iamqianrenzhan/article/details/89648491
原來的算法爲了儘可能的減少拼接,所以,每塊圖像儘可能的大。但是由於每幀圖像採集間隔和傳送帶運動速度不固定,有時會出現匹配靶標過少或循環而出現拼接不上或者拼接錯誤的情況。本文對原來算法進行改進,以期達到更好的穩定性。

算法的思想如下:

在處理速度滿足的情況下,儘可能多的進行拼接,極致的情況時對任意相鄰兩幀圖片進行平移計算並進行拼接。這樣在使用時就更能把面陣相機抽象爲一個線陣相機,每次返回幾行或者幾十行圖像。

代碼備份:

首先還是要對圖像進行校正:

Point2f PerspectivePoints00[4] = { Point2f(32, 41),
	Point2f(1255, 63), Point2f(1233, 908) , Point2f(33, 898) };
Point2f PerspectivePoints11[4] = { Point2f(20, 30),
	Point2f(1260, 30), Point2f(1260, 880), Point2f(20, 880) };
Mat m = getPerspectiveTransform(PerspectivePoints00, PerspectivePoints11);

然後對每一幀圖像:

warpPerspective(frame, frame, m, Size(frame.cols, frame.rows));

需要對每一幀圖片進行預處理

一般是轉爲灰度圖像,濾波,二值化,找輪廓。

cvtColor(src_all, src_all, COLOR_RGB2GRAY);
imshow("1.原圖", src_all);
Mat gray_all;
GaussianBlur(src_all, gray_all, Size(3, 3), 0);
threshold(gray_all, gray_all, 100, 255, THRESH_BINARY); //90以上

//找輪廓,並標準輪廓
findContours(gray_all, contours_all, hierarchy_all, RETR_TREE, CHAIN_APPROX_NONE, Point(0, 0));

靶標的定位,並輸出看處理結果

contours_new.clear();
for (int i = 0; i < contours_all.size(); i++)
{
	//周長,面積,重心位置,這裏的參數根據實際修改
	Point centerpoint;
	double area = contourArea(contours_all[i]);
	GetContourCenter(contours_all[i], centerpoint);
	if (contours_all[i].size() > 450 && contours_all[i].size() < 550
		&& area > 11000 && area < 14000
		&& centerpoint.x > 1000
		&& centerpoint.y >100 && centerpoint.y < 1024-100)
		contours_new.push_back(contours_all[i]);
}
Mat temp2 = frame.clone();
drawContours(temp2, contours_new, -1, CV_RGB(255, 0, 0), 10); //rng.uniform(0,255)  ,CV_FILLED

//imshow("輪廓", temp2);

靶標的識別:

用到的結構體:

struct targetObject {
	vector<Point> points;
	int message = 0;  //message 0代碼沒有,1,2,3,4分別代表信息
};
struct keyFrame{
	Mat frame;
	targetObject targets[4];
	int offsety;
	int offsetx;
};

用到的函數:

void GetContourCenter(vector<Point> contour, Point &p)
{
	//重心法抓中心點
	double avg_px = 0, avg_py = 0;
	for (int i = 0; i < contour.size(); i++)
	{
		avg_px += contour[i].x;
		avg_py += contour[i].y;
	}
	p.x = avg_px / contour.size();
	p.y = avg_py / contour.size();
}

識別代碼:

//靶標識別
tempframe.frame = frame.clone();
for (int i = 0; i < contours_new.size(); i++)
{
	Rect rect = boundingRect(contours_new[i]);
	Point point(rect.x + rect.width / 2, rect.y + rect.height / 2);
	Point centerpoint;
	//GetContourCenter(contours_new[i], centerpoint);
	Moments m = moments(contours_new[i]);
	centerpoint.x = m.m10 / m.m00;
	centerpoint.y = m.m01 / m.m00;

	tempframe.targets[i].points = contours_new[i];
	if (point.x > centerpoint.x) //1,4
	{
		if (point.y > centerpoint.y)
			tempframe.targets[i].message = 4;
		else
			tempframe.targets[i].message = 1;
	}
	else
	{
		if (point.y > centerpoint.y)
			tempframe.targets[i].message = 3;
		else
			tempframe.targets[i].message = 2;
	}
}
for (int i = contours_new.size(); i < 4; i++)
{
	tempframe.targets[i].message = 0;
}

靶標平移計算:

用到的函數:

cv::Mat mergeRows(cv::Mat A, cv::Mat B)
{
	// cv::CV_ASSERT(A.cols == B.cols&&A.type() == B.type());
	int totalRows = A.rows + B.rows;
	cv::Mat mergedDescriptors(totalRows, A.cols, A.type());
	cv::Mat submat = mergedDescriptors.rowRange(0, A.rows);
	A.copyTo(submat);
	submat = mergedDescriptors.rowRange(A.rows, totalRows);
	B.copyTo(submat);
	return mergedDescriptors;
}

首先計算總共有多少個靶標匹配上:

vector<int> offsety;
vector<int> offsetx;
for (int i = 1; i < 5; i++)
{
	int j1, j2;
	bool isexist1 = false;
	bool isexist2 = false;
	bool isexist = false;
	for (j1 = 0; j1 < 4; j1++)
		if (tempframe.targets[j1].message == i) {
			isexist1 = true; break;
		}
	for (j2 = 0; j2 < 4; j2++)
		if (keyframe.targets[j2].message == i) {
			isexist2 = true; break;
		}
	isexist = isexist1 & isexist2;
	if (isexist)
	{
		//計算這兩組點的平移量
		Point centerpoint1;
		Point centerpoint2;
		GetContourCenter(tempframe.targets[j1].points, centerpoint1);
		GetContourCenter(keyframe.targets[j2].points, centerpoint2);
		int y = centerpoint1.y - centerpoint2.y;
		int x = centerpoint1.x - centerpoint2.x;
		if (y > 0)
		{
			offsety.push_back(y);
			offsetx.push_back(x);
		}
	}
}

如果匹配的靶標數量大於1,才繼續處理:

//只有相鄰幀有2個或者3個靶標能匹配,才進行計算
if (offsety.size() > 1)
{
	for (int i = 0; i < offsety.size(); i++)
		for (int j = 0; j < offsety.size(); j++)
			if (abs(offsety[i] - offsety[j]) > 50)
			{
				cout << "全爲正但有錯誤" << endl;
			}
	int sum = 0;
	for (int i = 0; i < offsety.size(); i++)
	{
		cout << offsety[i] << " ";
		sum += offsety[i];
	}
	cout << endl << endl << endl << endl;
	keyframe.offsety = sum / offsety.size();


	Mat tem = tempframe.frame.clone();

	//對keyframe中圖片進行處理,截取掉和上一幀重複的部分(1024-x)
	keyframe.frame = tem(Rect(0, 0, keyframe.frame.cols, keyframe.offsety));
	//imshow("結果", keyframe.frame);

	//如果resultMat達到一定行數,則返回,並把當前處理結果賦值,否則,進行拼接
	if (resultMat.rows > 1000)
	{
		//getpic(resultMat);
		//保存resultMat進行處理
		string filename = to_string(count) + ".bmp";
		imwrite(filename, resultMat);

		resultMat = keyframe.frame.clone();

		newpic = true;
	}
	else
	{
		resultMat = mergeRows(keyframe.frame.clone(), resultMat.clone());
		newpic = false;
	}
		

	//imshow("結果", keyframe.frame);
	//resultMat = mergeRows(keyframe.frame.clone(), resultMat.clone());
	
}

結果處理:

if (newpic)
{
	//這兒寫處理算法
}
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章