創建一個Trackbar
// 函數定義
int cv::createTrackbar ( const String & trackbarname,
const String & winname,
int * value,
int count,
TrackbarCallback onChange = 0,
void * userdata = 0
// trackbarname trackbar的名字
// winname trackbar所在窗口的名字
// value 滑塊的值
// count 滑塊最大允許值
// onChange 指向一個函數,每次改變滑塊的值,自動調用這個函數,要求函數的原型爲 void function_name(int, void *);
// 一個例子, on_trackbar爲調用的函數
createTrackbar( "Trackbar", "Linear Blend", &alpha_slider, alpha_slider_max, on_trackbar );
)
Reading Geospatial Raster files with GDAL
dem = cv::imread(argv[2], cv::IMREAD_LOAD_GDAL | cv::IMREAD_ANYDEPTH );
一些簡單處理參照Reading Geospatial Raster files with GDAL opencv官方例子
opencv讀取視頻或調用攝像頭
視頻流實質也就是連續若干幀的圖片,在opencv中讀取視頻的方法也是如此,先聲明一個視頻流對象,然後逐幀的取出圖片
VideoCapture source(sourceVideo);
VideoCapture test;
test.open(testVideo);// 這兩種表達方式含義一致, 都是打開一個視頻文件,生成視頻流對象
// VideoCapture本身除了讀取視頻時候也可以調用攝像頭,此時參數應該位設備的ID號
// 在聲明瞭視頻流後可以通過>>操作取出來幀圖片
Mat sourceImage, testImage;
source >> sourceImage;
test >> testImage;
// 可以通過get函數獲取視頻相關屬性
// Size爲opencv默認數據類型, 構造函數爲Size(width, height);
Size ssource = Size((int) source.get(CAP_PROP_FRAME_WIDTH),
(int) source.get(CAP_PROP_FRAME_HEIGHT));
Size stest = Size((int) test.get(CAP_PROP_FRAME_WIDTH),
(int) test.get(CAP_PROP_FRAME_HEIGHT));
cout<<"width="<<ssource.width<<"Height="<<ssource.height<<"number="<<source.get(CAP_PROP_FRAME_COUNT);
一個輸入兩個視頻文件並計算ssim和psnr的例子
#include<opencv2/core.hpp>//opencv中基礎模塊
#include<opencv2/highgui.hpp>//opencv I/O
#include<opencv2/imgproc.hpp> //gaussian blur
#include<opencv2/video.hpp>
#include<iostream>
#include<string>
#include<iomanip> // 控制輸出精度
#include<sstream>// string to number 轉換
using namespace std;
using namespace cv;
double getPSNR(const Mat& I1, const Mat& I2);//計算PSNR值
Scalar getSSIM( const Mat& i1, const Mat& i2);// 計算SSIM值
int main(int agrc, char **argv)
{
const string sourceVideo = argv[1], testVideo = argv[2];
int frameNumber = -1; // 視頻幀數
//聲明一個視頻流的對象
VideoCapture source(sourceVideo);
VideoCapture test;
test.open(testVideo);// 這兩種表達方式含義一致
if(!source.isOpened())
{
cout<<"can't open source video"<<endl;
return -1;
}
if(!test.isOpened())
{
cout<<"can't open test video"<<endl;
return -1;
}
// 獲取視頻屬性
// Size爲opencv默認數據類型, 構造函數爲Size(width, height);
Size ssource = Size((int) source.get(CAP_PROP_FRAME_WIDTH),
(int) source.get(CAP_PROP_FRAME_HEIGHT));
Size stest = Size((int) test.get(CAP_PROP_FRAME_WIDTH),
(int) test.get(CAP_PROP_FRAME_HEIGHT));
cout<<"width="<<ssource.width<<"Height="<<ssource.height<<"number="<<source.get(CAP_PROP_FRAME_COUNT);
if(ssource!=stest)
{
cout<<"source video and test video have different size"<<endl;
return -1;
}
const char* WIN_SOURCE = "source video";
const char* WIN_TEST = "test video";
namedWindow(WIN_SOURCE, WINDOW_AUTOSIZE);
namedWindow(WIN_TEST, WINDOW_AUTOSIZE);
moveWindow(WIN_SOURCE, 500, 0);
moveWindow(WIN_TEST, 600+ssource.width, 0);
Mat sourceImage, testImage;
double psnrV;
Scalar ssimV;//存儲三通道的ssim
for(;;)
{
// 從視頻流中獲取每一幀圖像
source >> sourceImage;
test >> testImage;
if(sourceImage.empty()||testImage.empty())
{
cout<<"over"<<endl;
break;
}
frameNumber++;
cout<<"frame="<<frameNumber+1<<endl;
psnrV = getPSNR(sourceImage, sourceImage);
cout<<setiosflags(ios::fixed)<<setprecision(3)<<psnrV<<endl;
ssimV = getSSIM(sourceImage, sourceImage);
cout<<"SSIM"
<<"R"<<setiosflags(ios::fixed)<<setprecision(3)<<ssimV[2]*100<<"%"
<<"G"<<setiosflags(ios::fixed)<<setprecision(3)<<ssimV[1]*100<<"%"
<<"B"<<setiosflags(ios::fixed)<<setprecision(3)<<ssimV[0]*100<<"%"<<endl;
imshow(WIN_SOURCE, sourceImage);
imshow(WIN_TEST, testImage);
char c=(char)waitKey(500);
if(c==27)
{
break;
}
}
return 0;
}
// 對照着具體公式即可很容易理解該部分代碼
double getPSNR(const Mat& I1, const Mat& I2)
{
Mat s1;
absdiff(I1, I2, s1); // |I1 - I2|
s1.convertTo(s1, CV_32F); // cannot make a square on 8 bits
s1 = s1.mul(s1); // |I1 - I2|^2
Scalar s = sum(s1); // sum elements per channel
double sse = s.val[0] + s.val[1] + s.val[2]; // sum channels
if( sse <= 1e-10) // for small values return zero
return 0;
else
{
double mse = sse / (double)(I1.channels() * I1.total());
double psnr = 10.0 * log10((255 * 255) / mse);
return psnr;
}
}
Scalar getSSIM( const Mat& i1, const Mat& i2)
{
const double C1 = 6.5025, C2 = 58.5225;
/***************************** INITS **********************************/
int d = CV_32F;
Mat I1, I2;
i1.convertTo(I1, d); // cannot calculate on one byte large values
i2.convertTo(I2, d);
Mat I2_2 = I2.mul(I2); // I2^2
Mat I1_2 = I1.mul(I1); // I1^2
Mat I1_I2 = I1.mul(I2); // I1 * I2
/*************************** END INITS **********************************/
Mat mu1, mu2; // PRELIMINARY COMPUTING
GaussianBlur(I1, mu1, Size(11, 11), 1.5);
GaussianBlur(I2, mu2, Size(11, 11), 1.5);
Mat mu1_2 = mu1.mul(mu1);
Mat mu2_2 = mu2.mul(mu2);
Mat mu1_mu2 = mu1.mul(mu2);
Mat sigma1_2, sigma2_2, sigma12;
GaussianBlur(I1_2, sigma1_2, Size(11, 11), 1.5);
sigma1_2 -= mu1_2;
GaussianBlur(I2_2, sigma2_2, Size(11, 11), 1.5);
sigma2_2 -= mu2_2;
GaussianBlur(I1_I2, sigma12, Size(11, 11), 1.5);
sigma12 -= mu1_mu2;
Mat t1, t2, t3;
t1 = 2 * mu1_mu2 + C1;
t2 = 2 * sigma12 + C2;
t3 = t1.mul(t2); // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
t1 = mu1_2 + mu2_2 + C1;
t2 = sigma1_2 + sigma2_2 + C2;
t1 = t1.mul(t2); // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
Mat ssim_map;
divide(t3, t1, ssim_map); // ssim_map = t3./t1;
Scalar mssim = mean(ssim_map); // mssim = average of ssim map
return mssim;
}
opencv進行簡單的視頻生成
一個視頻文件的內容可以從後綴名中得到,主要包括 video feeds, audio feeds or other tracks, video track存儲方式是由編碼器決定的,opencv主要面對圖像處理,因而其中僅支持.avi文件,也僅能生成.avi文件
- 要生成視頻文件首先要建立一個視頻流對象,包括視頻名稱,編碼格式,fps, 視頻尺寸等,我們再次以建立一個與輸入文件相同格式相同尺寸的視頻爲例,如果已知要存儲的視頻編碼格式,和fps,尺寸就只需要構造一個VideoWriter對象即可,
// 獲取輸入視頻源
VideoCapture inputVideo(source);
// CAP_PROP_FOURCC代表編碼格式,但get屬性獲得是double類型8字節,編碼器類型4字節,存儲在低四字節,故轉換爲int類型
int ex = static_cast<int>(inputVideo.get(CAP_PROP_FOURCC)); // Get Codec Type- Int form
// 通過位運算將數字轉換爲對應的字符串,每1個字節,8位代表一個字符,故通過&運算分別取出每個字節,並在最後加上'\0'
char EXT[] = {(char)(ex & 0XFF) , (char)((ex & 0XFF00) >> 8),(char)((ex & 0XFF0000) >> 16),(char)((ex & 0XFF000000) >> 24), 0};
// 獲取視頻尺寸
Size S = Size((int) inputVideo.get(CAP_PROP_FRAME_WIDTH), // Acquire input size
(int) inputVideo.get(CAP_PROP_FRAME_HEIGHT));
VideoWriter outputVideo;
outputVideo.open(NAME, ex, inputVideo.get(CAP_PROP_FPS), S, true);
- 將每一幀圖片存儲你進行視頻源對象中即可
// res即爲要存儲的幀圖片
outputVideo.write(res); //or
outputVideo << res;