這是第一篇博客,想把之前寫的一些東西整理成技術博客,陸續的搬運過來吧。介紹一下一直在做的Stereo match 的基本原理:
圖1.1 cones_left.jpg | 圖1.2 cones_left.jpg |
爲了模擬人眼對立體場景的捕捉和對不同景物遠近的識別能力,立體匹配算法要求採用兩個攝像頭代替人眼,通過獲取兩幅非常接近的圖片以獲取景深(視差:Disparity),從而計算出不同景物與攝像頭的距離,得到景深圖。
圖2:不同攝像頭中同一個像素對應的位置 | 圖3:相似三角形原理 |
具體計算方法爲:
當採取兩個同一水平線上的攝像頭進行拍攝的時候,同一物體將在兩個攝像機內被拍攝到,在兩個攝像機內部,這個物體相對於攝像機中心點位置有不同的座標,如上圖2所示。Xleft是該物體在左攝像機內相對位置,Xright是該物體在右攝像機內相對位置。兩個攝像機相距S,焦距爲f,物體P距離攝像機z,z也就是景深。當我們將兩幅圖像重疊在一起的時候,左攝像機上P的投影和右攝像機上P的投影位置有一個距離|Xleft|+|Xright|,這個距離稱爲Disparity,根據相似三角形圖3可以得到z=sf/d. 也就是隻要計算得到了d的值,就可以計算得到深度圖。而在計算d的值的過程中需要對兩幅圖像進行匹配,尋找到物體P在兩幅圖像中的相對位置。在對圖像進行匹配的過程中,需要用到cost
computation,即通過尋找同一水平線上兩幅圖像上的點的最小誤差來確定這兩個點是否是同一個物體所成的像。由於一個點所能提供的信息太少,因此往往需要擴大對比的範圍。
OpenCV2源碼:
- // OpenCVTest.cpp : 定義控制檯應用程序的入口點。
- //
- #include "stdafx.h"
- #include <stdio.h>
- /*
- * stereo_match.cpp
- * calibration
- *
- * Created by Victor Eruhimov on 1/18/10.
- * Copyright 2010 Argus Corp. All rights reserved.
- *
- */
- #include "opencv2/calib3d/calib3d.hpp"
- #include "opencv2/imgproc/imgproc.hpp"
- #include "opencv2/highgui/highgui.hpp"
- #include "opencv2/contrib/contrib.hpp"
- using namespace cv;
- static void print_help()
- {
- printf("\nDemo stereo matching converting L and R images into disparity and point clouds\n");
- printf("\nUsage: stereo_match <left_image> <right_image> [--algorithm=bm|sgbm|hh|var] [--blocksize=<block_size>]\n"
- "[--max-disparity=<max_disparity>] [--scale=scale_factor>] [-i <intrinsic_filename>] [-e <extrinsic_filename>]\n"
- "[--no-display] [-o <disparity_image>] [-p <point_cloud_file>]\n");
- }
- static void saveXYZ(const char* filename, const Mat& mat)
- {
- const double max_z = 1.0e4;
- FILE* fp = fopen(filename, "wt");
- for (int y = 0; y < mat.rows; y++)
- {
- for (int x = 0; x < mat.cols; x++)
- {
- Vec3f point = mat.at<Vec3f>(y, x);
- if (fabs(point[2] - max_z) < FLT_EPSILON || fabs(point[2]) > max_z) continue;
- fprintf(fp, "%f %f %f\n", point[0], point[1], point[2]);
- }
- }
- fclose(fp);
- }
- int _tmain(int argc, _TCHAR* argv[])
- {
- const char* algorithm_opt = "--algorithm=";
- const char* maxdisp_opt = "--max-disparity=";
- const char* blocksize_opt = "--blocksize=";
- const char* nodisplay_opt = "--no-display";
- const char* scale_opt = "--scale=";
- //if (argc < 3)
- //{
- // print_help();
- // return 0;
- //}
- const char* img1_filename = 0;
- const char* img2_filename = 0;
- const char* intrinsic_filename = 0;
- const char* extrinsic_filename = 0;
- const char* disparity_filename = 0;
- const char* point_cloud_filename = 0;
- enum { STEREO_BM = 0, STEREO_SGBM = 1, STEREO_HH = 2, STEREO_VAR = 3 };
- int alg = STEREO_SGBM;
- int SADWindowSize = 0, numberOfDisparities = 0;
- bool no_display = false;
- float scale = 1.f;
- StereoBM bm;
- StereoSGBM sgbm;
- StereoVar var;
- //------------------------------
- /*img1_filename = "tsukuba_l.png";
- img2_filename = "tsukuba_r.png";*/
- img1_filename = "01.jpg";
- img2_filename = "02.jpg";
- int color_mode = alg == STEREO_BM ? 0 : -1;
- Mat img1 = imread(img1_filename, color_mode);
- Mat img2 = imread(img2_filename, color_mode);
- Size img_size = img1.size();
- Rect roi1, roi2;
- Mat Q;
- numberOfDisparities = numberOfDisparities > 0 ? numberOfDisparities : ((img_size.width / 8) + 15) & -16;
- bm.state->roi1 = roi1;
- bm.state->roi2 = roi2;
- bm.state->preFilterCap = 31;
- bm.state->SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 9;
- bm.state->minDisparity = 0;
- bm.state->numberOfDisparities = numberOfDisparities;
- bm.state->textureThreshold = 10;
- bm.state->uniquenessRatio = 15;
- bm.state->speckleWindowSize = 100;
- bm.state->speckleRange = 32;
- bm.state->disp12MaxDiff = 1;
- sgbm.preFilterCap = 63;
- sgbm.SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 3;
- int cn = img1.channels();
- sgbm.P1 = 8 * cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
- sgbm.P2 = 32 * cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
- sgbm.minDisparity = 0;
- sgbm.numberOfDisparities = numberOfDisparities;
- sgbm.uniquenessRatio = 10;
- sgbm.speckleWindowSize = bm.state->speckleWindowSize;
- sgbm.speckleRange = bm.state->speckleRange;
- sgbm.disp12MaxDiff = 1;
- sgbm.fullDP = alg == STEREO_HH;
- var.levels = 3; // ignored with USE_AUTO_PARAMS
- var.pyrScale = 0.5; // ignored with USE_AUTO_PARAMS
- var.nIt = 25;
- var.minDisp = -numberOfDisparities;
- var.maxDisp = 0;
- var.poly_n = 3;
- var.poly_sigma = 0.0;
- var.fi = 15.0f;
- var.lambda = 0.03f;
- var.penalization = var.PENALIZATION_TICHONOV; // ignored with USE_AUTO_PARAMS
- var.cycle = var.CYCLE_V; // ignored with USE_AUTO_PARAMS
- var.flags = var.USE_SMART_ID | var.USE_AUTO_PARAMS | var.USE_INITIAL_DISPARITY | var.USE_MEDIAN_FILTERING;
- Mat disp, disp8;
- //Mat img1p, img2p, dispp;
- //copyMakeBorder(img1, img1p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);
- //copyMakeBorder(img2, img2p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);
- int64 t = getTickCount();
- if (alg == STEREO_BM)
- bm(img1, img2, disp);
- else if (alg == STEREO_VAR) {
- var(img1, img2, disp);
- }
- else if (alg == STEREO_SGBM || alg == STEREO_HH)
- sgbm(img1, img2, disp);//------
- t = getTickCount() - t;
- printf("Time elapsed: %fms\n", t * 1000 / getTickFrequency());
- //disp = dispp.colRange(numberOfDisparities, img1p.cols);
- if (alg != STEREO_VAR)
- disp.convertTo(disp8, CV_8U, 255 / (numberOfDisparities*16.));
- else
- disp.convertTo(disp8, CV_8U);
- if (!no_display)
- {
- namedWindow("left", 1);
- imshow("left", img1);
- namedWindow("right", 1);
- imshow("right", img2);
- namedWindow("disparity", 0);
- imshow("disparity", disp8);
- imwrite("result.bmp", disp8);
- printf("press any key to continue...");
- fflush(stdout);
- waitKey();
- printf("\n");
- }
- return 0;
- }