記錄 關於Video——stitching的問題

環境:

#include <android/log.h>
#define TAG "myDemo-jni" // 閺夆晜鐟ら柌婊堝及欏栨繂孌伴悗瑙勭煯緇犵喖鎯冮崙銕闁匯劌瀚悥鍀囨嫚閿燂拷
#define LOGD(...)  __android_log_print(ANDROID_LOG_DEBUG,TAG,__VA_ARGS__) // 閻庤鐭粻鐑睴GD緙侇偉顕ч悗錕�
//__android_log_print(ANDROID_LOG_INFO, "Loaded cascade classifier false", "Loaded cascade classifier false");


//鐠嬪囉鏁pencv鎼達拷
#include <opencv2/core/core.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/stitching/stitcher.hpp>
#include <opencv2/features2d/features2d.hpp> 
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv2/legacy/legacy.hpp>


//鐠嬪囉鏁lfeat鎼達拷
//extern "C" {
//#include <vl/generic.h>
//#include <vl/dsift.h>
//#include <vl/sift.h>
//}


//鐠嬪囉鏁ら崗鏈電鉑鎼達拷
#include "Image.h"
#include "ImageFeature.h"
#include "feasTracking_fast.h"
//#include "ImageProcessing.h"
#include "project.h"
#include "BPFlow.h"
#include "vl_imwbackward.h"
#include "mosaicAll_inpainting.h"
#include "detectObject.h"
//鐠嬪囉鏁ら崗鏈電鉑鎼達拷
#include <vector>
#include <math.h>
//#include <windows.h>
#include <stdio.h>


//鐠嬪囉鏁ら懛顏勭箒閸愭瑧孌戞徑瀛樻瀮嫺狅拷
#include "basic_function.h"
#include "computeDisp_.h"
//#include "face_detect.h"


using namespace std;
using namespace cv;


//vector<Mat> Keyframes;
//vector<Mat> H_ ;
extern string sensorPath_;


///#define OPEN_FROM_FILE
#define LOAD_FRAME


//const char *faceCascadeFilename = sensorPath_+"//"+"lbpcascade_frontalface.xml";
//const string faceCascadeFilename = sensorPath_+"//"+"lbpcascade_frontalface.xml";
//const char* video_addr = "videos\\2.avi";
const int resize_factor = 1;  
const double PI = 3.14159265; 
const bool try_use_gpu = true;
//float ratio = 0.5;


//Mat key_frame1, key_frame2;
//Mat H1, H2,H_temp,mid,ref_,mask,mos;
//vector<Mat> homography;
//int fuse_width =40,limit=2;


//Mat mid;
//Mat ,ref_;


Point motionFlow(vector<Point2f> &prev_points, vector<Point2f> &cur_points)
{
int sum_x = 0, sum_y = 0;
int ave_x, ave_y;
int sz = cur_points.size();
for (int i=0; i<cur_points.size(); i++)
{
sum_x += (cur_points[i].x - prev_points[i].x);
sum_y += (cur_points[i].y - prev_points[i].y);
}
ave_x = sum_x/sz;
ave_y = sum_y/sz;
int sz_tmp = sz;

for (int i=0; i<sz; i++)
{
if ((int)abs((cur_points[i].y - prev_points[i].y)) > abs(ave_y)*2)
{
sum_x -= (cur_points[i].x - prev_points[i].x);
sum_y -= (cur_points[i].y - prev_points[i].y);
sz_tmp--;
}
}
ave_x = sum_x/sz_tmp;
ave_y = sum_y/sz_tmp;


return Point(ave_x, ave_y);
}





int feasTracking_fast(string path,const vector<Mat> &frames_set_reduce,vector<Mat> &frames,vector<Mat> &H_,Mat &mid,vector<Mat> &H_s,float cov2[3])
//int feasTracking_fast(vector<Mat> &frames,Mat &pano_result)
{
Mat curFrame = frames_set_reduce[0];
int size_vec[2] = {90,160};
float thre = 0.01;
float inlierRat = 0.47;
//float cov[3] = {0.82,0.37,0.37};
//float cov2[3] = {0.1,0.52,0.1};
float size_ratio1 = (float)size_vec[0]/curFrame.rows;
float size_ratio2 = (float)size_vec[1]/curFrame.cols;
int finish = 0;
int count = 0;
int i = 0;
Size size;
int cur_ind;
Mat curFrame_,cur_frame_gray;
vector<Point2f> initPoints,initPoints_prev,visiblePoints,tmp_points, initPoints_last , visiblePoints_last;
Mat H, H1;
Mat nextFrame, nextFrame_;
vector<uchar> status;
vector<float> err;
float ratio1 = 1;
//float ratio = 1;
float coverage;
int inliers = 0;
float initPoints_min_y,visiblePoints_min_y;
Point motion_vector;
while (1)
   {
if (finish == 1) break;
size = Size(160,90);
curFrame = frames_set_reduce[i];
curFrame_ = imresize(curFrame,size);
cvtColor(curFrame_,cur_frame_gray, CV_RGB2GRAY);
ClearVector(tmp_points);
goodFeaturesToTrack(cur_frame_gray,tmp_points,120,0.01,2);
ClearVector(initPoints);
initPoints = tmp_points;
SIFT sift;
Mat mascara,des;
vector<KeyPoint> key_points;
sift(cur_frame_gray,mascara,key_points,des);
ClearVector(tmp_points);
keyPoint2Point2f(key_points,tmp_points);
vector_push_back(initPoints,tmp_points);
int init_num = initPoints.size();


ClearVector(initPoints_prev);
initPoints_prev = initPoints;


cur_ind = i;
i++;


while(i<frames_set_reduce.size())
{
frames_set_reduce[i].copyTo(nextFrame);
nextFrame_ = imresize(nextFrame,size);
calcOpticalFlowPyrLK(curFrame_,nextFrame_,initPoints_prev,visiblePoints,status,err,Size(21,21),3);
keep_track_point(status,err,initPoints,visiblePoints);


if (!visiblePoints.empty())
{
// initPoints_min_y = find_min_y(initPoints);
// visiblePoints_min_y = find_min_y(visiblePoints);
// coverage = (size_vec[0]-find_min_y(initPoints)+find_min_y(visiblePoints))/size_vec[0];
motion_vector = motionFlow(initPoints, visiblePoints);
inliers = initPoints.size();
if (count > 1)
{
H1 = homographyEstimation(initPoints,visiblePoints,size_ratio1,size_ratio2,inliers);
}


ratio1 = inliers*1.0/initPoints.size();
//printf("image id is %d,visible number is %d,init number is %d,coverage is %f,inlier ratio1 is %f\n",i+1,visiblePoints.size(),init_num,coverage,ratio1 );
}


//內部第1個if函數
if (ratio1 < inlierRat || (i+1) == frames_set_reduce.size() || (abs(motion_vector.y) > nextFrame_.rows*cov2[count])|| visiblePoints.size() < 10 || visiblePoints.empty())
{
//imwrite(path +"input2"+"//"+"key0.jpg",frames[1]);
//內部第1個if函數
if (ratio1 < (inlierRat - 0.015) || /*coverage < (cov[count]-0.015) || */visiblePoints.size() < 10 || visiblePoints.empty())
{
//imwrite(path +"input2"+"//"+"key0.jpg",frames[1]);
// if (ratio1 < inlierRat || (i+1) == frames_set_reduce.size() || coverage <= cov[count] || visiblePoints.size() < 10 || visiblePoints.empty())
// {
// if (ratio1 < (inlierRat - 0.015) || coverage < (cov[count]-0.015) || visiblePoints.size() < 10 || visiblePoints.empty())
// {
ClearVector(initPoints);
initPoints = initPoints_last;
ClearVector(visiblePoints);
visiblePoints = visiblePoints_last;
if (count == 0)
{
frames.push_back(frames_set_reduce[0]);
mid = frames_set_reduce[i-1];
}
else
{
if ((i - cur_ind)>1)
{
frames.push_back(frames_set_reduce[i-1]);
imwrite(path +"input2"+"//"+"key2.jpg",frames[1]);
imwrite(path +"input2"+"//"+"key3.jpg",frames_set_reduce[i-1]);
}
}
i = i-1;
//printf("inlier ratio or coverage is smaller,use %d image instead!!!!!!!!!!!\n",i+1);
}
else
{
if (count ==0)
{
frames.push_back(frames_set_reduce[0]);
mid = frames_set_reduce[i];
}
else
{
frames.push_back(nextFrame);
}
//printf("The selected frame name is %d image\n",i+1);
//imwrite(path + "input2" + "//" + "key2.jpg", frames[1]);
}


//內部第2個if函數
if (count == 0)
{
H = homographyEstimation(visiblePoints,initPoints,size_ratio1,size_ratio2,inliers);
//cout<<H<<endl;
H1 = homographyEstimation(visiblePoints,initPoints,1,1,inliers);
//cout<<H1<<endl;
}
else
{
H = homographyEstimation(initPoints,visiblePoints,size_ratio1,size_ratio2,inliers);
//cout<<H<<endl;
H1 = homographyEstimation(initPoints,visiblePoints,1,1,inliers);
//cout<<H1<<endl;
}
//內部第3個if函數
if (count == 0 || (i - cur_ind)>0)
{
H_.push_back(H);
H_s.push_back(H1);
}
//內部第4個if函數
if ((i+1) >= frames_set_reduce.size() || count == 2)
{
finish = 1;
}
count = count + 1;
break;
}
ClearVector(initPoints_last);
initPoints_last = initPoints;
ClearVector(visiblePoints_last);
visiblePoints_last = visiblePoints;


//交換前一幀後一幀,交換前跟蹤點與後跟蹤點
swap(initPoints_prev,visiblePoints);
swap(curFrame_,nextFrame_);
i = i +1;
}


}
imwrite(path +"input2"+"//"+"000.jpg",frames[0]);
imwrite(path +"input2"+"//"+"001.jpg",frames[1]);
imwrite(path +"input2"+"//"+"002.jpg",frames[2]);
return 1;
}




#pragma endregion

此函數對D:\Genymotion拷貝\2這組圖片有問題


問題:

keyframe[1]在 frames.push_back(nextFrame); 這個函數後是對的,但不知爲什麼到後來這裏的keyframe[1]與keyframe[2]結果一樣了

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章