前言
2-D下常見的矩陣變換如下圖所示,本文主要介紹平移、旋轉、放縮、剪切、反射,不包含透視變換。
圖像座標系和二維笛卡爾座標系之間存在一定差異,做仿射變換時,需要注意座標系轉換帶來的影響。
上圖是經典的二維笛卡爾座標系,下圖圖像座標系。
仿射變換:平移、旋轉、放縮、剪切、反射
以上各種仿射變換都是可以通過變換矩陣來實現的
OpenCV進行各種變換用的函數是warpAffine
python中使用
cv2.warpAffine
cpp的CPU版本
cv::warpAffine
Cuda版本使用
cv::cuda::warpAffine
使用該函數需要注意四個參數,第一個輸入的原圖,第二個目標圖,第三個,仿射變換矩陣(是個兩行三列的矩陣),第四個是目標圖像的大小。
筆者閱讀了Cuda版本的仿射變換,發現,OpenCV的Cuda版本的仿射變換調用了CUDA的NPP庫。而對於使用者而言,只要詳細閱讀相關文檔,就能夠很好的使用庫。但對於開發者而言,先博而後淵,有了淵博的知識終能成爲一個優秀的開發者。
使用實例
//cpp版本
//旋轉angel弧度制+平移(x0,y0)
cv::Mat H = cv::Mat::zeros(2,3,CV_64FC1);
H.at<double>(0,0) = cos(angle);
H.at<double>(0,1) = -sin(angle);
H.at<double>(0,2) = x0;
H.at<double>(1,0) = sin(angle);
H.at<double>(1,1) = cos(angle);
H.at<double>(1,2) = y0;
cv::cuda::warpAffine(d_src,d_dst,H,_size);
作者說
關於OpenCV的環境搭建,讀者可以參見win10+ Opencv4+Cuda10+vs2017環境搭建;通過此鏈接可直接下載相關三方庫。
OpenCV源碼分析系列的文章旨在介紹經典OpenCV庫的同時介紹一定的圖像處理知識,歡迎讀者關注這個系列的文章。
僅查閱資料可以通過OpenCV的官方文檔
附錄
OpenCV仿射變換的源代碼
void cv::cuda::warpAffine(InputArray _src, OutputArray _dst, InputArray _M, Size dsize, int flags, int borderMode, Scalar borderValue, Stream& stream)
{
GpuMat src = _src.getGpuMat();
Mat M = _M.getMat();
CV_Assert( M.rows == 2 && M.cols == 3 );
const int interpolation = flags & INTER_MAX;
CV_Assert( src.depth() <= CV_32F && src.channels() <= 4 );
CV_Assert( interpolation == INTER_NEAREST || interpolation == INTER_LINEAR || interpolation == INTER_CUBIC );
CV_Assert( borderMode == BORDER_REFLECT101 || borderMode == BORDER_REPLICATE || borderMode == BORDER_CONSTANT || borderMode == BORDER_REFLECT || borderMode == BORDER_WRAP );
_dst.create(dsize, src.type());
GpuMat dst = _dst.getGpuMat();
Size wholeSize;
Point ofs;
src.locateROI(wholeSize, ofs);
static const bool useNppTab[6][4][3] =
{
{
{false, false, true},
{false, false, false},
{false, true, true},
{false, false, false}
},
{
{false, false, false},
{false, false, false},
{false, false, false},
{false, false, false}
},
{
{false, true, true},
{false, false, false},
{false, true, true},
{false, false, false}
},
{
{false, false, false},
{false, false, false},
{false, false, false},
{false, false, false}
},
{
{false, true, true},
{false, false, false},
{false, true, true},
{false, false, true}
},
{
{false, true, true},
{false, false, false},
{false, true, true},
{false, false, true}
}
};
bool useNpp = borderMode == BORDER_CONSTANT && ofs.x == 0 && ofs.y == 0 && useNppTab[src.depth()][src.channels() - 1][interpolation];
// NPP bug on float data
useNpp = useNpp && src.depth() != CV_32F;
if (useNpp)
{
typedef void (*func_t)(const cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, double coeffs[][3], int flags, cudaStream_t stream);
static const func_t funcs[2][6][4] =
{
{
{NppWarp<CV_8U, nppiWarpAffine_8u_C1R>::call, 0, NppWarp<CV_8U, nppiWarpAffine_8u_C3R>::call, NppWarp<CV_8U, nppiWarpAffine_8u_C4R>::call},
{0, 0, 0, 0},
{NppWarp<CV_16U, nppiWarpAffine_16u_C1R>::call, 0, NppWarp<CV_16U, nppiWarpAffine_16u_C3R>::call, NppWarp<CV_16U, nppiWarpAffine_16u_C4R>::call},
{0, 0, 0, 0},
{NppWarp<CV_32S, nppiWarpAffine_32s_C1R>::call, 0, NppWarp<CV_32S, nppiWarpAffine_32s_C3R>::call, NppWarp<CV_32S, nppiWarpAffine_32s_C4R>::call},
{NppWarp<CV_32F, nppiWarpAffine_32f_C1R>::call, 0, NppWarp<CV_32F, nppiWarpAffine_32f_C3R>::call, NppWarp<CV_32F, nppiWarpAffine_32f_C4R>::call}
},
{
{NppWarp<CV_8U, nppiWarpAffineBack_8u_C1R>::call, 0, NppWarp<CV_8U, nppiWarpAffineBack_8u_C3R>::call, NppWarp<CV_8U, nppiWarpAffineBack_8u_C4R>::call},
{0, 0, 0, 0},
{NppWarp<CV_16U, nppiWarpAffineBack_16u_C1R>::call, 0, NppWarp<CV_16U, nppiWarpAffineBack_16u_C3R>::call, NppWarp<CV_16U, nppiWarpAffineBack_16u_C4R>::call},
{0, 0, 0, 0},
{NppWarp<CV_32S, nppiWarpAffineBack_32s_C1R>::call, 0, NppWarp<CV_32S, nppiWarpAffineBack_32s_C3R>::call, NppWarp<CV_32S, nppiWarpAffineBack_32s_C4R>::call},
{NppWarp<CV_32F, nppiWarpAffineBack_32f_C1R>::call, 0, NppWarp<CV_32F, nppiWarpAffineBack_32f_C3R>::call, NppWarp<CV_32F, nppiWarpAffineBack_32f_C4R>::call}
}
};
dst.setTo(borderValue, stream);
double coeffs[2][3];
Mat coeffsMat(2, 3, CV_64F, (void*)coeffs);
M.convertTo(coeffsMat, coeffsMat.type());
const func_t func = funcs[(flags & WARP_INVERSE_MAP) != 0][src.depth()][src.channels() - 1];
CV_Assert(func != 0);
func(src, dst, coeffs, interpolation, StreamAccessor::getStream(stream));
}
else
{
using namespace cv::cuda::device::imgproc;
typedef void (*func_t)(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, float coeffs[2 * 3], PtrStepSzb dst, int interpolation,
int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
static const func_t funcs[6][4] =
{
{warpAffine_gpu<uchar> , 0 /*warpAffine_gpu<uchar2>*/ , warpAffine_gpu<uchar3> , warpAffine_gpu<uchar4> },
{0 /*warpAffine_gpu<schar>*/, 0 /*warpAffine_gpu<char2>*/ , 0 /*warpAffine_gpu<char3>*/, 0 /*warpAffine_gpu<char4>*/},
{warpAffine_gpu<ushort> , 0 /*warpAffine_gpu<ushort2>*/, warpAffine_gpu<ushort3> , warpAffine_gpu<ushort4> },
{warpAffine_gpu<short> , 0 /*warpAffine_gpu<short2>*/ , warpAffine_gpu<short3> , warpAffine_gpu<short4> },
{0 /*warpAffine_gpu<int>*/ , 0 /*warpAffine_gpu<int2>*/ , 0 /*warpAffine_gpu<int3>*/ , 0 /*warpAffine_gpu<int4>*/ },
{warpAffine_gpu<float> , 0 /*warpAffine_gpu<float2>*/ , warpAffine_gpu<float3> , warpAffine_gpu<float4> }
};
const func_t func = funcs[src.depth()][src.channels() - 1];
CV_Assert(func != 0);
float coeffs[2 * 3];
Mat coeffsMat(2, 3, CV_32F, (void*)coeffs);
if (flags & WARP_INVERSE_MAP)
M.convertTo(coeffsMat, coeffsMat.type());
else
{
cv::Mat iM;
invertAffineTransform(M, iM);
iM.convertTo(coeffsMat, coeffsMat.type());
}
Scalar_<float> borderValueFloat;
borderValueFloat = borderValue;
func(src, PtrStepSzb(wholeSize.height, wholeSize.width, src.datastart, src.step), ofs.x, ofs.y, coeffs,
dst, interpolation, borderMode, borderValueFloat.val, StreamAccessor::getStream(stream), deviceSupports(FEATURE_SET_COMPUTE_20));
}
}