1.1 顯示圖片
讀入圖片,顯示圖片
#include<opencv2\highgui.hpp>
char *path = "D:\\Coder\\vs\\1_OpenCV\\3096.jpg";
int main()
{
cv::Mat img = cv::imread(path,-1);
if (img.empty())
return 1;
cv::namedWindow("test",cv::WINDOW_AUTOSIZE);
cv::imshow("test",img);
cv::waitKey(0);
cv::destroyWindow("test");
return 0;
}
1.2 視頻
讀入視頻,顯示視頻
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<iostream>
char* videopath = "D:/Coder/vs/1_OpenCV/videoTest.mp4";
char* windowName = "test";
int main()
{
cv::namedWindow(windowName, cv::WINDOW_AUTOSIZE);
cv::VideoCapture cap;
cap.open(videopath);
cv::Mat frame;
for (;;)
{
cap >> frame;
if (frame.empty())
break;
cv::imshow(windowName, frame);
//std::cout << cv::waitKey(33) << std::endl;
//waitKey()函數返回255,在沒有按鍵按下的時候。
int keynum = cv::waitKey(33);
//一旦顯示這幀圖片,等待33ms,如果33ms之內用戶沒有進行任何操作,繼續循環讀取視頻,否則跳轉。(33FPS)
//最好檢測cv::VideoCapture結構來確定視頻真正的幀率
//退出時侯,所有數據的內存空間將會由於生命週期的結束被自動釋放。
if (keynum >= 0 && keynum < 255)
break;
}
return 0;
}
1.3 視頻跳轉
#include<iostream>
#include<opencv2\highgui.hpp>
#include<fstream>
#include<opencv2\imgproc.hpp>
char* videopath = "D:/Coder/vs/1_OpenCV/videoTest.mp4";
char* windowName = "test";
int g_slider_position = 0;
int g_run = 1;
int g_dontset = 0;//start out in single step mode
cv::VideoCapture g_cap;
void onTrackbarSlide(int pos, void *)
{
g_cap.set(cv::CAP_PROP_POS_FRAMES, pos);
if (!g_dontset)
{
g_run = 1;
}
g_dontset = 0;
}
int main(int argc, char **argv)
{
cv::namedWindow(windowName, cv::WINDOW_AUTOSIZE);
g_cap.open(videopath);
int frames = (int)g_cap.get(cv::CAP_PROP_FRAME_COUNT);
int tmpw = (int)g_cap.get(cv::CAP_PROP_FRAME_WIDTH);
int tmph = (int)g_cap.get(cv::CAP_PROP_FRAME_HEIGHT);
std::cout << "Video has " << frames << " frames of dimensions(" << tmpw << ", " << tmph << ")." << std::endl;
cv::createTrackbar("position", windowName, &g_slider_position, frames, onTrackbarSlide);
cv::Mat frame;
for (;;)
{
if (g_run != 0)
{
g_cap >> frame;
if (frame.empty()) break;
int current_pos = (int)g_cap.get(cv::CAP_PROP_POS_FRAMES);
g_dontset = 1;
cv::setTrackbarPos("position", windowName, current_pos);
cv::imshow(windowName, frame);
g_run -= 1;
}
char c = (char)cv::waitKey(10);
std::cout << (int)c << std::endl;
if (c == 's')
{
g_run = 1;
std::cout << "single step, run = " << g_run << std::endl;
}
if (c == 'r')
{
g_run = -1;
std::cout << "Run mode, run = " << g_run << std::endl;
}
if (c == 27)
{
break;
}
}
return 0;
}
1.4 打開攝像頭獲取視頻
//1.9 攝像頭中讀取
#include<iostream>
#include<opencv2\highgui.hpp>/提供了攝像頭的視頻獲取
std::string nameWin = "test";
int main(int argc, char ** argv)
{
cv::namedWindow(nameWin, cv::WINDOW_AUTOSIZE);
cv::VideoCapture cap;
cap.open(0);
if (!cap.isOpened())
{
std::cerr << "Could't open capture." << std::endl;
return -1;
}
cv::Mat frame;
for (;;)
{
cap >> frame;
cv::imshow(nameWin, frame);
if ((char)cv::waitKey(33) >= 0)
break;
}
cap.release();
cv::destroyAllWindows();
return 0;
}
1.5 將視頻存入AVI文件
#include <opencv2/opencv.hpp>
#include <iostream>
char *writerPath = "D:\\Coder\\vs\\1_OpenCV\\test.avi";
char* videopath = "D:/Coder/vs/1_OpenCV/videoTest.mp4";
void help(char** argv) {
std::cout << "\n"
<< "Read in a video, write out a log polar of it\n"
<< argv[0] << " <path/video> <paht/video_output>\n"
<< "For example:\n"
<< argv[0] << " ../tree.avi ../vout.avi\n"
<< "\nThen read it with:\n ./example_02-10 ../vout.avi\n"
<< std::endl;
}
int main(int argc, char** argv) {
cv::namedWindow("Example 2-11", cv::WINDOW_AUTOSIZE);
cv::namedWindow("Log_Polar", cv::WINDOW_AUTOSIZE);
// ( Note: could capture from a camera by giving a camera id as an int.)
//
cv::VideoCapture capture;
capture.open(videopath);
double fps = capture.get(CV_CAP_PROP_FPS);
cv::Size size(
(int)capture.get(CV_CAP_PROP_FRAME_WIDTH),
(int)capture.get(CV_CAP_PROP_FRAME_HEIGHT)
);
cv::VideoWriter writer;
writer.open(writerPath, CV_FOURCC('M', 'J', 'P', 'G'), fps, size);//如果是攝像頭的話,fps要修改
cv::Mat logpolar_frame, bgr_frame;
for (;;) {
capture >> bgr_frame;
if (bgr_frame.empty()) break; // end if done
cv::imshow("Example 2-11", bgr_frame);
cv::logPolar(
bgr_frame, // Input color frame
logpolar_frame, // Output log-polar frame
cv::Point2f( // Centerpoint for log-polar transformation
bgr_frame.cols / 2, // x
bgr_frame.rows / 2 // y
),
40, // Magnitude (scale parameter)
CV_WARP_FILL_OUTLIERS // Fill outliers with 'zero'
);
cv::imshow("Log_Polar", logpolar_frame);
writer << logpolar_frame;
char c = cv::waitKey(10);
if (c == 27) break; // allow the user to break out
}
writer.release();
capture.release();
}
5.1 cv::addWeighted()
void cv::addWeighted(
cv::InputArray src1,
double alpha,
cv::InputArray src2,
double beta,
double gamma,
cv::OutputArray dst,
int dtype = -1
);
dst1=saturate(src1*a+src2*b+r);
// Example 5-1. Complete program to alpha-blend the ROI starting at (0,0) in src2 with the ROI starting at (x,y) in src1
// alphablend <imageA> <image B> <x> <y> <<alpha> <beta>
//
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace cv;
using namespace std;
void help(const char **argv) {
cout << "\n\n"
<< "This program alpha blends the first image onto the other \n"
<< "alpha is the blending of the first image and beta onto the second\n"
<< "Call:\n"
<< argv[0] << " <path/blend_this_img> <path/onto_this_img> <where_to_blend_x> <where_to_blend_y> <alpha> <beta>\n\n"
<< "Example:\n"
<< " ./example_05-01 ../faceTemplate.jpg ../faces.png 230 155 0.8 0.2\n"
<< endl;
}
const char *path1 = "D:/Coder/vs/1_OpenCV/3096.jpg";
const char *path2 = "D:/Coder/vs/1_OpenCV/1.jpg";
int main(int argc, const char** argv)
{
/*help(argv);
if (argc != 7) {
cout << "ERROR: Wrong # of parameters (7), you had " << argc << "\n" << endl;
return -1;
}*/
// Using the first two arguments, open up the image to be copied onto
// (src1), and the image that will be copied from (src2).
//
cv::Mat src1 = cv::imread(path1, 1);
cv::Mat src2 = cv::imread(path2, 1);
cv::imshow("1", src1);
cv::imshow("2", src2);
cv::waitKey(0);
int from_w = src1.size().width;
int from_h = src1.size().height;
int to_w = src2.size().width;
int to_h = src2.size().height;
std::cout << "< " << from_w << " , " << from_h << " > " << std::endl;
std::cout << " < " << to_w << " , " << to_h << " > " << std::endl;
//if (argc == 7 && !src1.empty() && !src2.empty())
// Four more arguments tell where in src1 to paste the chunk taken from
// src2. Note that the width and height also specify what portion of
// src2 to actually use.
//
int x = atoi("100");
int y = atoi("50");
// Make sure we don't exceed bounds:
if ((x < 0) || (y < 0) || (x > to_w - 1) || (y > to_h - 1) || (x + from_w > to_w - 1) || (y + from_h > to_h)) {
cout << "\nError, at (x,y) (" << x << ", " << y << "), your input image [w,h] [" << from_w << ", "
<< from_h << "] doesn't fit within the blend to image [w,h] [" << to_w << ", " << to_h << "]\n" << endl;
return -1;
}
// Two more arguments set the blending coefficients.
//
double alpha = (double)atof("0.7");
double beta = (double)atof("0.3");
cv::Mat roi1(src1, cv::Rect(0, 0, from_w - 1, from_h - 1)); //Just take the whole thing
cv::Mat roi2(src2, cv::Rect(x, y, from_w - 1, from_h - 1));
// Blend together the image src2 onto the image src1
// at the specified location.
//
cv::addWeighted(roi1, alpha, roi2, beta, 0.0, roi2);
// Create a window to shoow the result and show it.
//
cv::namedWindow("Alpha Blend", 1);
cv::imshow("Alpha Blend", src2);
// Leave the window up and runnnig until the user hits a key
//
cv::waitKey(0);
return 0;
}
綜合
#include<iostream>
#include<string>
#include<sstream>
#include<Windows.h>
using namespace std;
#include<opencv2\highgui.hpp>
#include<opencv2\core.hpp>
using namespace cv;
const string path = "E:\\Codes\\A_VS\\1_OpenCV\\nature_monte.bmp";
int main()
{
//獲取窗口的大小,顯示屏幕的大小,但不包括任務欄等區域
//int cx = GetSystemMetrics(SM_CXFULLSCREEN);
//int cy = GetSystemMetrics(SM_CYFULLSCREEN);
//獲取的是真正屏幕的大小。
int cx = GetSystemMetrics(SM_CXSCREEN);
int cy = GetSystemMetrics(SM_CYSCREEN);
/*
MFC下的寫法
HDC hDC = ::GetDC(HWND(NULL)); // 得到屏幕DC
int x = ::GetDeviceCaps(hDC,HORZRES); // 寬
int y = ::GetDeviceCaps(hDC,VERTRES); // 高
::ReleaseDC(HWND(NULL),hDC); // 釋放DC
*/
Mat img = imread(path, IMREAD_ANYCOLOR);
if (!img.data)
{
cout << "img missing!" << endl;
return -1;
}
namedWindow("nature_monte", WINDOW_AUTOSIZE);
//圖片在顯示器的正中央顯示,當創建多個窗口的時候,它們是疊加的,但是使用moveWindow()函數將
//窗口移動到桌面的任何區域。
/*
void moveWindow(const String& winname, int x, int y);
@param1:窗口的名字
@param2:橫軸位置
@param3:縱軸位置
*/
moveWindow("nature_monte", (cx-img.cols)/2, (cy-img.rows)/2);
imshow("nature_monte", img);
/*
void resizeWindow(const String& winname, int width, int height);
@param winname Window name.
@param width The new window width.
@param height The new window height.
具體的窗口大小是指圖像區域,工具欄不計算在內,只有未啓動 WINDOWS_AUTOSIZE 標誌的窗口才能調整大小。
*/
resizeWindow("nature_monte", 512,512);
waitKey(0);
destroyWindow("nature_monte");
Mat photo = imread(path,IMREAD_COLOR);
for (int i = 0; i < 10; i++)
{
ostringstream ss;
ss << "Photo" << i;
namedWindow(ss.str(), WINDOW_NORMAL);
moveWindow(ss.str(), 20 * i, 20 * i);
imshow(ss.str(), photo);
}
waitKey(0);
destroyAllWindows();
return 0;
}