顏色空間縮減
如果圖像矩陣存儲的是單通道像素,那麼像素有256種可能取值,但是如果是是三通道的圖像,那麼像素就有256×256×256種可能性,如此多的顏色會對我們處理產生較大的影響。實際上,僅用顏色中有代表性的很小部分就可以達到同樣的效果了,這時候顏色空間縮減就顯得尤爲重要。顏色空間縮減的基本原理是:將現有顏色空間數除以某一特定值,以得到較少的顏色數,比如顏色值0~9取0, 10~19取1,以此類推。在Opencv中訪問像素有三種方式,分別是基於指針訪問(C操作符[])、基於迭代器iterator訪問和動態地址計算。
代碼實現(基於C++)
#include "stdafx.h"
#include<opencv2\highgui\highgui.hpp>
#include<opencv2\core\core.hpp>
#include<opencv2\imgproc\imgproc.hpp>
using namespace std;
using namespace cv;
// 採用指針方式訪問像素
void colorReduce_1(Mat &input, Mat &output, int div)
{
output = input.clone();
int rowNum = output.rows;
int colNum = output.cols * output.channels();
for (int i = 0; i < rowNum; i++)
{
// ptr()函數獲取圖像任意行的首地址
uchar *data = output.ptr<uchar>(i);
for (int j = 0; j < colNum; j++)
{
data[j] = data[j] / div * div / 2;
}
}
}
void colorReduce_2(Mat &input, Mat &output, int div)
{
output = input.clone();
// 迭代器首地址
Mat_<Vec3b>::iterator it = output.begin<Vec3b>();
// 迭代器的終止位置
Mat_<Vec3b>::iterator itend = output.end<Vec3b>();
for (; it != itend; ++it)
{
(*it)[0] = (*it)[0] / div *div + div / 2;
(*it)[1] = (*it)[1] / div * div + div / 2;
(*it)[2] = (*it)[2] / div * div + div / 2;
}
}
void colorReduce_3(Mat &input, Mat &output, int div)
{
output = input.clone();
int rowNum = output.rows;
int colNum = output.cols;
for (int i = 0; i < rowNum; i++)
{
for (int j = 0; j < colNum; j++)
{
// 成員函數at()存儲的圖像元素,但是必須知道圖像數據類型,調用形式如下:
// image.at(i,j)[channel] = value;
output.at<Vec3b>(i, j)[0] = output.at<Vec3b>(i, j)[0] / div * div + div / 2;
output.at<Vec3b>(i, j)[1] = output.at<Vec3b>(i, j)[1] / div * div + div / 2;
output.at<Vec3b>(i, j)[2] = output.at<Vec3b>(i, j)[2] / div * div + div / 2;
}
}
}
int _tmain(int argc, _TCHAR* argv[])
{
namedWindow("原始圖像");
namedWindow("指針訪問");
namedWindow("迭代器訪問");
namedWindow("動態地址訪問");
// 圖像初始化
Mat img = imread("1.jpg");
Mat dst1, dst2, dst3;
dst1.create(img.size(), img.type());
dst2.create(img.size(), img.type());
dst3.create(img.size(), img.type());
// 第一種方式訪問
colorReduce_1(img, dst1, 32);
// 第二種方式訪問
colorReduce_2(img, dst2, 32);
// 第三種方式訪問
colorReduce_3(img, dst3, 32);
// 顯示原圖
imshow("原始圖像", img);
// 效果圖1
imshow("指針訪問", dst1);
// 效果圖2
imshow("迭代器訪問", dst2);
// 效果圖3
imshow("動態地址訪問", dst3);
waitKey(0);
return 0;
}
代碼實現(基於Python)
# coding=UTF-8
import numpy as np
import cv2
def colorReduce_1(img, dst, div):
'''
:param img:
:param dst:
:return:
'''
m, n, c = np.shape(img)
for i in range(m):
for j in range(n):
dst[i, j] = img[i, j] / div * div + div / 2
if __name__ =='__main__':
cv2.namedWindow('原圖')
cv2.namedWindow('效果圖')
img = cv2.imread('1.jpg')
dst = np.zeros(img.shape, np.uint8)
colorReduce_1(img, dst, 32)
cv2.imshow('原圖', img)
cv2.imshow('效果圖', dst)
cv2.waitKey(0)
ROI
在圖像處理中,ROI是感興趣區域,在圖像中選擇一塊感興趣的區域,以便我們進一步處理。使用ROI可以減少處理時間,增加精度,給圖像處理帶來便利。
Opnecv定義ROI有兩種方式,第一種方式如下:
Mat ROI;
ROI = image(Rect(x, y, cols, rows));
前兩個參數是ROI左上角座標,後兩個參數是矩形的長寬。另一種方式是就是制定感興趣的行或者列。
Mat ROI;
ROI = image(Range(y, y + rows), Range(x, x + cols));
下面我們通過一個實例來實現ROI:
代碼實現(基於C++)
#include "stdafx.h"
#include<opencv2\highgui\highgui.hpp>
#include<opencv2\imgproc\imgproc.hpp>
#include<opencv2\core\core.hpp>
using namespace std;
using namespace cv;
int _tmain(int argc, _TCHAR* argv[])
{
Mat image = imread("dota.jpg");
Mat logo = imread("logo.jpg");
// 劃定感興趣區域ROI
Mat roi = image(Rect(200, 250, logo.cols, logo.rows));
// 疊加logo到ROI上
addWeighted(roi, 0.3, logo, 0.5, 0.0, roi);
imshow("ROI疊加到原圖上",image);
waitKey(0);
return 0;
}
代碼實現(基於Python)
# coding=UTF-8
import numpy as np
import cv2
if __name__=='__main__':
image = cv2.imread('dota.jpg')
logo = cv2.imread('logo.jpg')
height = logo.shape[0]
width = logo.shape[1]
roi = image[200: 200 + height, 250: 250 + width]
cv2.addWeighted(roi, 0.3, logo, 0.5, 0.0, roi)
cv2.imshow('', image)
cv2.waitKey(0)
通道分離/合併
有時候爲了更好的觀察圖像的特徵,需要對圖像的RGB的三個顏色通道的分量分別進行顯示和處理,這時候可以用上通道分離函數split(),該函數的C++版本有兩個原型:
void split(const Mat &src, Mat *mvbegin);
void split(InputArray m, OutputArrayOfArray mv);
可以理解爲第一個參數是輸入的多通道圖像,第二個參數是輸出數組或者輸出的vector容器。需要注意的事opencv中存儲的順序是BGR而不是RGB。
通道合併函數merge()函數是split()函數的逆向操作,將多個單通道圖像合併成一個多通道圖像,C++中函數原型爲:
void merge(const Mat *mv., size_tcount, OutputArray dst);
void merge(InputArrayOfArray mv, OutputArray dst);
第一個參數是輸入的矩陣或者vector容器的陣列,這裏所有矩陣必須是一樣的尺度和深度;第二個參數是當第一個參數是空白的C數組的時候,代表矩陣的個數,這個值必須大於1;第二個參數是輸出矩陣。
下面用一個綜合程序來實現分離和合並:
多通道分離和合並(基於C++的實現):
#include "stdafx.h"
#include<opencv2\highgui\highgui.hpp>
#include<opencv2\core\core.hpp>
#include<opencv2\imgproc\imgproc.hpp>
using namespace std;
using namespace cv;
int _tmain(int argc, _TCHAR* argv[])
{
Mat logoImage;
Mat image;
Mat blueChannel, greenChannel, redChannel;
vector<Mat>channel;
// 藍色部分
logoImage = imread("logo.jpg", 0);
image = imread("dota.jpg");
split(image, channel);
// Mat::at()返回一個引用到指定的數組元素(通道)
blueChannel = channel.at(0);
addWeighted(blueChannel(Rect(200, 250, logoImage.cols, logoImage.rows)), 1.0,
logoImage, 0.5, 0.0, blueChannel(Rect(200, 250, logoImage.cols, logoImage.rows)));
merge(channel, image);
imshow("遊戲原畫+logo藍色通道", image);
//綠色部分
logoImage = imread("logo.jpg", 0);
image = imread("dota.jpg");
split(image, channel);
greenChannel = channel.at(1);
addWeighted(greenChannel(Rect(200, 250, logoImage.cols, logoImage.rows)), 1.0,
logoImage, 0.5, 0.0, greenChannel(Rect(200, 250, logoImage.cols, logoImage.rows)));
merge(channel, image);
imshow("遊戲原畫+logo綠色通道", image);
// 紅色部分
logoImage = imread("logo.jpg", 0);
image = imread("dota.jpg");
split(image, channel);
redChannel = channel.at(2);
addWeighted(redChannel(Rect(200, 250, logoImage.cols, logoImage.rows)), 1.0,
logoImage, 0.5, 0.0, redChannel(Rect(200, 250, logoImage.cols, logoImage.rows)));
merge(channel, image);
imshow("遊戲原畫+logo紅色通道", image);
waitKey(0);
return 0;
}
多通道分離和合並(基於Python的實現):
# coding=UTF-8
import numpy as np
import cv2
if __name__=='__main__':
# 使用 Opencv 實現通道分離和合並
# 藍色部分
image = cv2.imread('dota.jpg')
logoImage = cv2.imread('logo.jpg', 0)
height = logoImage.shape[0]
width = logoImage.shape[1]
blueImage, greenImage, redImage = cv2.split(image)
cv2.addWeighted( blueImage[200: 200 + height, 250: 250 + width], 1, logoImage, 0.5, 0.0,
blueImage[200: 200 + height, 250: 250 + width])
mergedBlue = cv2.merge([blueImage, greenImage, redImage])
cv2.imshow('遊戲原畫+logo藍色通道', mergedBlue)
# 紅色部分
image = cv2.imread('dota.jpg')
logoImage = cv2.imread('logo.jpg', 0)
height = logoImage.shape[0]
width = logoImage.shape[1]
blueImage, greenImage, redImage = cv2.split(image)
cv2.addWeighted(greenImage[200: 200 + height, 250: 250 + width], 1, logoImage, 0.5, 0.0,
greenImage[200: 200 + height, 250: 250 + width])
mergedGreen = cv2.merge([blueImage, greenImage, redImage])
cv2.imshow('遊戲原畫+logo綠色通道', mergedGreen)
# 紅色部分
image = cv2.imread('dota.jpg')
logoImage = cv2.imread('logo.jpg', 0)
height = logoImage.shape[0]
width = logoImage.shape[1]
blueImage, greenImage, redImage = cv2.split(image)
cv2.addWeighted(redImage[200: 200 + height, 250: 250 + width], 1, logoImage, 0.5, 0.0,
redImage[200: 200 + height, 250: 250 + width])
mergedRed = cv2.merge([blueImage, greenImage, redImage])
cv2.imshow('遊戲原畫+logo綠色通道', mergedRed)
# 使用 numpy 數組
# 藍色部分
image = cv2.imread('dota.jpg')
logoImage = cv2.imread('logo.jpg', 0)
height = logoImage.shape[0]
width = logoImage.shape[1]
# 數組初始化
b = np.zeros((image.shape[0], image.shape[1]), dtype=image.dtype)
g = np.zeros((image.shape[0], image.shape[1]), dtype=image.dtype)
r = np.zeros((image.shape[0], image.shape[1]), dtype=image.dtype)
# 複製數據
b[:, :] = image[:, :, 0]
g[:, :] = image[:, :, 1]
r[:, :] = image[:, :, 2]
# 疊加
cv2.addWeighted(b[250: 250 + height, 200: 200 + width], 1.0, logoImage, 0.5, 0.0,
b[250: 250 + height, 200: 200 + width])
mergedBlueNp = np.dstack([b, g, r])
cv2.imshow('', mergedBlueNp)
# 綠色部分
image = cv2.imread('dota.jpg')
logoImage = cv2.imread('logo.jpg', 0)
height = logoImage.shape[0]
width = logoImage.shape[1]
# 數組初始化
b = np.zeros((image.shape[0], image.shape[1]), dtype=image.dtype)
g = np.zeros((image.shape[0], image.shape[1]), dtype=image.dtype)
r = np.zeros((image.shape[0], image.shape[1]), dtype=image.dtype)
# 複製數據
b[:, :] = image[:, :, 0]
g[:, :] = image[:, :, 1]
r[:, :] = image[:, :, 2]
# 疊加
cv2.addWeighted(g[250: 250 + height, 200: 200 + width], 1.0, logoImage, 0.5, 0.0,
g[250: 250 + height, 200: 200 + width])
mergedGreenNp = np.dstack([b, g, r])
cv2.imshow('', mergedGreenNp)
# 紅色部分
image = cv2.imread('dota.jpg')
logoImage = cv2.imread('logo.jpg', 0)
height = logoImage.shape[0]
width = logoImage.shape[1]
# 數組初始化
b = np.zeros((image.shape[0], image.shape[1]), dtype=image.dtype)
g = np.zeros((image.shape[0], image.shape[1]), dtype=image.dtype)
r = np.zeros((image.shape[0], image.shape[1]), dtype=image.dtype)
# 複製數據
b[:, :] = image[:, :, 0]
g[:, :] = image[:, :, 1]
r[:, :] = image[:, :, 2]
# 疊加
cv2.addWeighted(r[250: 250 + height, 200: 200 + width], 1.0, logoImage, 0.5, 0.0,
r[250: 250 + height, 200: 200 + width])
mergedRedNp = np.dstack([b, g, r])
cv2.imshow('', mergedRedNp)
cv2.waitKey(0)