文檔圖像傾斜校正算法(2)——直線檢測傾斜校正
原理:檢測文本塊中的直線,根據直線的傾斜角完成傾斜矯正
適用範圍:爲避免背景中可能存在的直線干擾,應先截取到圖像中的帶有表格線的區域,在該區域上進行直線檢測,利用檢測到的直線的傾斜角完成圖像的矯正。
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <vector>
#include <numeric>
#define MY_SKEW 14
//圖像旋轉1:旋轉(截取圖像)Crop
// Mat img :圖像輸入,單通道或者三通道
// Mat & imgout :圖像輸出
// int degree :圖像要旋轉的角度
// int border_value:圖像旋轉填充值
int rotateImage1(Mat img,Mat & imgout, int degree,int border_value)
{
if( img.empty())
return 1;
degree = -degree;//warpAffine默認的旋轉方向是逆時針,所以加負號表示轉化爲順時針
double angle = degree * CV_PI / 180.; // 弧度
double a = sin(angle), b = cos(angle);
int width = img.cols;
int height = img.rows;
int width_rotate = int(width * fabs(b)-height * fabs(a));//height * fabs(a) +
int height_rotate = int(height * fabs(b)-width * fabs(a));//width * fabs(a) +
if(width_rotate<=20||height_rotate<=20)
{
width_rotate = 20;
height_rotate = 20;
}
//旋轉數組map
// [ m0 m1 m2 ] ===> [ A11 A12 b1 ]
// [ m3 m4 m5 ] ===> [ A21 A22 b2 ]
float map[6];
Mat map_matrix = Mat(2, 3, CV_32F, map);
// 旋轉中心
CvPoint2D32f center = cvPoint2D32f(width / 2, height / 2);
CvMat map_matrix2 = map_matrix;
cv2DRotationMatrix(center, degree, 1.0, &map_matrix2);//計算二維旋轉的仿射變換矩陣
map[2] += (width_rotate - width) / 2;
map[5] += (height_rotate - height) / 2;
//Mat img_rotate;
//對圖像做仿射變換
//CV_WARP_FILL_OUTLIERS - 填充所有輸出圖像的象素。
//如果部分象素落在輸入圖像的邊界外,那麼它們的值設定爲 fillval.
//CV_WARP_INVERSE_MAP - 指定 map_matrix 是輸出圖像到輸入圖像的反變換,
int chnnel =img.channels();
if(chnnel == 3)
warpAffine(img, imgout, map_matrix, Size(width_rotate, height_rotate), 1, 0, Scalar(border_value,border_value,border_value));
else
warpAffine(img, imgout, map_matrix, Size(width_rotate, height_rotate), 1, 0, border_value);
return 0;
}
//投影傾斜校正:增值稅傾斜矯正方法舉例
// const Mat rgbimgin :圖像輸入,三通道
// Mat & rgbimgout :矯正後的圖像輸出
// int &theta :圖像傾斜的角度
int skew_correction_line(const Mat rgbimgin, Mat & rgbimgout, int &theta)
{
if (rgbimgin.empty() || rgbimgin.channels() != 3)
{
return 1;
}
Mat imgout_crop = rgbimgin.clone();
Mat imgout;
float zoom_ratio = 400.0 / imgout_crop.rows;
resize(imgout_crop, imgout, Size(0, 0), zoom_ratio, zoom_ratio, 1);
Mat Gray;
cvtColor(imgout, Gray, COLOR_RGB2GRAY);
medianBlur(Gray, Gray, 3);
Mat Bin;
adaptiveThreshold(Gray, Bin, 255, CV_ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY, 111, 5.0);
Bin = 255 - Bin;
vector<Vec4i> lines;
HoughLinesP(Bin, lines, 1, CV_PI / 180, 100, 100, 4);
if (lines.size() <= 0)
{
theta = 0;
rgbimgout = rgbimgin.clone();
return 0;
}
Mat Lineimg(Bin.rows, Bin.cols, CV_8UC1, Scalar::all(255));
int result = 0;
for (size_t i = 0; i < lines.size(); i++)
{
Vec4i l = lines[i];
line(Lineimg, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0), 1, CV_AA);
float param = float(int(l[1]) - int(l[3])) / float(abs(l[2] - l[0]));
int tt = atan(param) * 180 / PI;
if (tt > 45)
tt = tt - 90;
if (tt < -45)
tt = 90 + tt;
result = result + tt;
}
for (size_t i = 0; i < lines.size(); i++)
{
Vec4i l = lines[i];
line(imgout, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(186, 88, 255), 1, CV_AA);
}
theta = result / int(lines.size());
rotateImage1(rgbimgin, rgbimgout, theta, 0);
return 0;
}