OpenCV標定雙目相機配置ORB-SLAM

標定程序

//
// Created by gavyn on 20-4-27.
//
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <vector>
#include <string>
#include <algorithm>
#include <iostream>
#include <iterator>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <opencv2/opencv.hpp>
#include "cv.h"
#include <cv.hpp>
using namespace std;
using namespace cv;
const int imageWidth = 640;
const int imageHeight = 480;
const int boardWidth = 9;//橫向的角點數目
const int boardHeight = 6;//縱向的角點數據
const int boardCorner = boardWidth * boardHeight;//總的角點數據
const int squareSize = 10;//標定板黑白格子的大小單位mm
const int frameNumber = 13;//相機標定時需要採用的圖像幀數
const Size boardSize = Size(boardWidth, boardHeight);

Size imageSize = Size(imageWidth, imageHeight);//標定板的總內角點
Mat R, T, E, F;//R 旋轉矢量 T平移矢量 E本徵矩陣 F基礎矩陣
vector<Mat> rvecs;//旋轉向量
vector<Mat> tvecs;//平移向量
vector<vector<Point2f>> imagePointL;//左邊攝像機所有照片角點的座標集合
vector<vector<Point2f>> imagePointR;//右邊攝像機所有照片角點的座標集合
vector<vector<Point3f>> objRealPoint;//各副圖像的角點的實際物理座標集合
vector<Point2f> cornerL;//左邊攝像機某一照片角點座標集合
vector<Point2f> cornerR;//右邊攝像機某一照片角點座標集合

Mat rgbImageL, grayImageL;
Mat rgbImageR, grayImageR;
Mat Rl, Rr, Pl, Pr, Q;//校正旋轉矩陣R,投影矩陣P 重投影矩陣Q (下面有具體的含義解釋)
Mat mapLx, mapLy, mapRx, mapRy;//映射表
Rect validROIL, validROIR;//圖像校正之後,會對圖像進行裁剪,這裏的validROI就是指裁剪之後的區域

//這時候就需要你把左右相機單目標定的參數給寫上
/*事先標定好的左相機的內參矩陣
fx 0 cx
0 fy cy
0 0  1*/
Mat cameraMatrixL = (Mat_<double>(3, 3) << 296.65731645541695, 0, 343.1975436071541,
        0, 300.71016643747646, 246.01183552967473,
        0, 0, 1);
Mat distCoeffL = (Mat_<double>(4, 1) << -0.23906272129552558, 0.03436102573634348, 0.001517498429211239, -0.005280695866378259);//單目標定獲得左相機的畸變參數
/*事先標定好的右相機的內參矩陣
fx 0 cx
0 fy cy
0 0  1*/
Mat cameraMatrixR = (Mat_<double>(3, 3) << 296.92709649579353, 0, 313.1873142211607,
        0, 300.0649937238372, 217.0722185756087,
        0, 0, 1);
Mat distCoeffR = (Mat_<double>(4, 1) << -0.23753878535018613, 0.03338842944635466, 0.0026030620085220105, -0.0008840126895030034);//單目標定獲得右相機的畸變參數



void calRealPoint(vector<vector<Point3f>>& obj, int boardwidth, int boardheight, int imgNumber, int squaresize)
{
    vector<Point3f> imgpoint;
    for (int rowIndex = 0; rowIndex < boardheight; rowIndex++)
    {
        for (int colIndex = 0; colIndex < boardwidth; colIndex++)
        {
            imgpoint.push_back(Point3f(rowIndex * squaresize, colIndex * squaresize, 0));
        }
    }
    for (int imgIndex = 0; imgIndex < imgNumber; imgIndex++)
    {
        obj.push_back(imgpoint);
    }
}

//外參數
// R--右相機相對左相機的旋轉矩陣
// T--右相機相對左相機的平移矩陣
// R1,R2--左右相機校準變換(旋轉)矩陣  3×3
// P1,P2--左右相機在校準後座標系中的投影矩陣 3×4
// Q--視差-深度映射矩陣,我利用它來計算單個目標點的三維座標
//    Mat R1, R2, P1, P2, Q;//由stereoRectify()求得

void outputCameraParam(void)
{	/*保存數據*/	/*輸出數據*/
    FileStorage fs("intrinsics.yml", FileStorage::WRITE);  //文件存儲器的初始化
    if (fs.isOpened())
    {
        fs << "cameraMatrixL" << cameraMatrixL << "cameraDistcoeffL" << distCoeffL << "cameraMatrixR" << cameraMatrixR << "cameraDistcoeffR" << distCoeffR;
        fs.release();
        cout << "cameraMatrixL=:" << cameraMatrixL << endl << "cameraDistcoeffL=:" << distCoeffL << endl << "cameraMatrixR=:" << cameraMatrixR << endl << "cameraDistcoeffR=:" << distCoeffR << endl;
    }
    else
    {
        cout << "Error: can not save the intrinsics!!!!!" << endl;
    }
    fs.open("extrinsics.yml", FileStorage::WRITE);
    if (fs.isOpened())
    {
        fs << "R" << R << "T" << T << "Rl" << Rl << "Rr" << Rr << "Pl" << Pl << "Pr" << Pr << "Q" << Q;
        cout << "R=" << R << endl << "T=" << T << endl << "Rl=" << Rl << endl << "Rr=" << Rr << endl << "Pl=" << Pl << endl << "Pr=" << Pr << endl << "Q=" << Q << endl;
        fs.release();
    }
    else
        cout << "Error: can not save the extrinsic parameters\n";
}


int main(int argc, char* argv[])
{
    Mat img;
    int goodFrameCount = 1;
    cout << "Total Images:" << frameNumber << endl;
    while (goodFrameCount < frameNumber)
    {
        cout <<"Current image :" << goodFrameCount << endl;
        string 	filenamel,filenamer;
        char filename[100];

        /*讀取左邊的圖像*/
        sprintf(filename, "./data/left%02d.jpg", goodFrameCount + 1);
        rgbImageL = imread(filename, CV_LOAD_IMAGE_COLOR);
        cvtColor(rgbImageL, grayImageL, CV_BGR2GRAY);

        /*讀取右邊的圖像*/
        sprintf(filename, "./data/right%02d.jpg", goodFrameCount + 1);
        //sprintf(filename, "./data/right%02d.jpg", goodFrameCount + 1);
        rgbImageR = imread(filename, CV_LOAD_IMAGE_COLOR);
        cvtColor(rgbImageR, grayImageR, CV_BGR2GRAY);

        bool isFindL, isFindR;
        isFindL = findChessboardCorners(rgbImageL, boardSize, cornerL);
        isFindR = findChessboardCorners(rgbImageR, boardSize, cornerR);
        if (isFindL == true && isFindR == true)
            //如果兩幅圖像都找到了所有的角點 則說明這兩幅圖像是可行的
        {
            cornerSubPix(grayImageL, cornerL, Size(5, 5), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 20, 0.1));
            drawChessboardCorners(rgbImageL, boardSize, cornerL, isFindL);
            imshow("chessboardL", rgbImageL);
            imagePointL.push_back(cornerL);
            cornerSubPix(grayImageR, cornerR, Size(5, 5), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 20, 0.1));
            drawChessboardCorners(rgbImageR, boardSize, cornerR, isFindR);
            imshow("chessboardR", rgbImageR);
            imagePointR.push_back(cornerR);
            goodFrameCount++;
            cout << "The image" << goodFrameCount << " is good" << endl;
        }
        else
        {
            cout << "The image "<< goodFrameCount <<"is bad please try again" << endl;
            goodFrameCount++;
        }

        if (waitKey(10) == 'q')
        {
            break;
        }
    }
    /*	計算實際的校正點的三維座標	根據實際標定格子的大小來設置	*/
    calRealPoint(objRealPoint, boardWidth, boardHeight, frameNumber-1, squareSize);
    cout << "cal real successful" << endl;
    /*	標定攝像頭	由於左右攝像機分別都經過了單目標定	所以在此處選擇flag = CALIB_USE_INTRINSIC_GUESS	*/
    double rms = stereoCalibrate(objRealPoint, imagePointL, imagePointR,
                                 cameraMatrixL, distCoeffL,
                                 cameraMatrixR, distCoeffR,
                                 Size(imageWidth, imageHeight), R, T, E, F, CV_CALIB_USE_INTRINSIC_GUESS,
                                 TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 80, 1e-5));
    cout << "Stereo Calibration done with RMS error = " << rms << endl;
    stereoRectify(cameraMatrixL, distCoeffL, cameraMatrixR, distCoeffR, imageSize, R, T, Rl, Rr, Pl, Pr, Q,		CALIB_ZERO_DISPARITY, -1, imageSize, &validROIL, &validROIR);

    initUndistortRectifyMap(cameraMatrixL, distCoeffL, Rl, Pr, imageSize, CV_32FC1, mapLx, mapLy);
    initUndistortRectifyMap(cameraMatrixR, distCoeffR, Rr, Pr, imageSize, CV_32FC1, mapRx, mapRy);


    Mat rectifyImageL, rectifyImageR;
    cvtColor(grayImageL, rectifyImageL, CV_GRAY2BGR);
    cvtColor(grayImageR, rectifyImageR, CV_GRAY2BGR);

    imshow("Rectify Before", rectifyImageL);

    /*
    經過remap之後,左右相機的圖像已經共面並且行對準了
    */
    remap(rectifyImageL, rectifyImageL, mapLx, mapLy, INTER_LINEAR);
    remap(rectifyImageR, rectifyImageR, mapRx, mapRy, INTER_LINEAR);

    imshow("ImageL", rectifyImageL);
    imshow("ImageR", rectifyImageR);

    /*保存並輸出數據*/
    outputCameraParam();

    /*
        把校正結果顯示出來
        把左右兩幅圖像顯示到同一個畫面上
        這裏只顯示了最後一副圖像的校正結果。並沒有把所有的圖像都顯示出來
    */
    Mat canvas;
    double sf;
    int w, h;
    sf = 600. / MAX(imageSize.width, imageSize.height);
    w = cvRound(imageSize.width * sf);
    h = cvRound(imageSize.height * sf);
    canvas.create(h, w * 2, CV_8UC3);

    /*左圖像畫到畫布上*/
    Mat canvasPart = canvas(Rect(w*0, 0, w, h));								//得到畫布的一部分
    resize(rectifyImageL, canvasPart, canvasPart.size(), 0, 0, INTER_AREA);		//把圖像縮放到跟canvasPart一樣大小
    Rect vroiL(cvRound(validROIL.x*sf), cvRound(validROIL.y*sf),				//獲得被截取的區域
               cvRound(validROIL.width*sf), cvRound(validROIL.height*sf));
    rectangle(canvasPart, vroiL, Scalar(0, 0, 255), 3, 8);						//畫上一個矩形

    cout << "Painted ImageL" << endl;

    /*右圖像畫到畫布上*/
    canvasPart = canvas(Rect(w, 0, w, h));										//獲得畫布的另一部分
    resize(rectifyImageR, canvasPart, canvasPart.size(), 0, 0, INTER_LINEAR);
    Rect vroiR(cvRound(validROIR.x * sf), cvRound(validROIR.y*sf),
               cvRound(validROIR.width * sf), cvRound(validROIR.height * sf));
    rectangle(canvasPart, vroiR, Scalar(0, 255, 0), 3, 8);

    cout << "Painted ImageR" << endl;

    /*畫上對應的線條*/
    for (int i = 0; i < canvas.rows;i+=16)
        line(canvas, Point(0, i), Point(canvas.cols, i), Scalar(0, 255, 0), 1, 8);

    imshow("rectified", canvas);

    cout << "wait key" << endl;
    waitKey(0);
    system("pause");
    return 0;

}

立體標定函數 stereoCalibrate()

stereoCalibrate() 是用來標定一個立體攝像頭的,也就是同時標定兩個攝像頭。標定的結果除了能夠求出兩個攝像頭的內外參數矩陣,也能夠得出兩個攝像頭的位置關係R,T。
在這裏插入圖片描述
opencv標定矯正得到的外參配置文件,各參數的含義:在這裏插入圖片描述

double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1,
             InputArrayOfArrays imagePoints2, InputOutputArray cameraMatrix1,InputOutputArray distCoeffs1, 
             InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2, Size imageSize, OutputArray R,
             OutputArray T, OutputArray E, OutputArray F, int
             flags=CALIB_FIX_INTRINSIC , TermCriteria criteria=
             TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6))
  • objectPoints- vector 型的數據結構,存儲標定角點在世界座標系中的位置
  • imagePoints1- vector<vector> 型的數據結構,存儲標定角點在第一個攝像機下的投影后的亞像素座標
  • imagePoints2- vector<vector> 型的數據結構,存儲標定角點在第二個攝像機下的投影后的亞像素座標
  • cameraMatrix1-輸入/輸出型的第一個攝像機的相機矩陣。如果CV_CALIB_USE_INTRINSIC_GUESS , CV_CALIB_FIX_ASPECT_RATIO ,CV_CALIB_FIX_INTRINSIC , or CV_CALIB_FIX_FOCAL_LENGTH其中的一個或多個標誌被設置,該攝像機矩陣的一些或全部參數需要被初始化
  • distCoeffs1-第一個攝像機的輸入/輸出型畸變向量。根據矯正模型的不同,輸出向量長度由標誌決定
  • cameraMatrix2-輸入/輸出型的第二個攝像機的相機矩陣。參數意義同第一個相機矩陣相似
  • distCoeffs2-第一個攝像機的輸入/輸出型畸變向量。根據矯正模型的不同,輸出向量長度由標誌決定
  • imageSize-圖像的大小
  • R-輸出型,第一和第二個攝像機之間的旋轉矩陣
  • T-輸出型,第一和第二個攝像機之間的平移矩陣
  • E-輸出型,基本矩陣
  • F-輸出型,基礎矩陣
  • flag- CV_CALIB_FIX_INTRINSIC 如果該標誌被設置,那麼就會固定輸入的cameraMatrix和distCoeffs不變,只求解R,T,E,F CV_CALIB_USE_INTRINSIC_GUESS .根據用戶提供的cameraMatrix和distCoeffs爲初始值開始迭代 CV_CALIB_FIX_PRINCIPAL_POINT 迭代過程中不會改變主點的位置 CV_CALIB_FIX_FOCAL_LENGTH 迭代過程中不會改變焦距
    不同的FLAG,可能是零或以下值的結合:
    § CV_CALIB_FIX_INTRINSIC要確認cameraMatrix? and distCoeffs?所以只有R, T, E , 和F矩陣被估計出來
    § CV_CALIB_USE_INTRINSIC_GUESS根據指定的FLAG優化一些或全部的內在參數。初始值是由用戶提供。
    § CV_CALIB_FIX_PRINCIPAL_POINT在優化過程中確定主點。
    § CV_CALIB_FIX_FOCAL_LENGTH確定和 .
    § CV_CALIB_FIX_ASPECT_RATIO優化 . 確定的比值.
    § CV_CALIB_SAME_FOCAL_LENGTH執行以及 .
    § CV_CALIB_ZERO_TANGENT_DIST設置每個相機切向畸變係數爲零並且設爲固定值。
    § CV_CALIB_FIX_K1,…,CV_CALIB_FIX_K6在優化中不改變相應的徑向畸變係數. 如果設置CV_CALIB_USE_INTRINSIC_GUESS , 使用distCoeffs矩陣提供的係數。否則將其置零.
    § CV_CALIB_RATIONAL_MODEL能夠輸出係數k4,k5,k6。提供向後兼容性,這額外FLAG應該明確指定校正函數使用理性模型和返回8個係數。如果FLAG沒有被設置,該函數計算並只返回5畸變係數。

在這裏插入圖片描述在這裏插入圖片描述

標定結果

%YAML:1.0
---
Dl: !!opencv-matrix
   rows: 5
   cols: 1
   dt: d
   data: [ -7.5171058455674650e-02, -3.2559794563034683e-01,
       -1.0720364428814924e-02, 1.1357788200056862e-02,
       3.8727489810368249e-01 ]
cameraMatrixL: !!opencv-matrix
   rows: 3
   cols: 3
   dt: d
   data: [ 7.6297203793051915e+02, 0., 6.5854816905699784e+02, 0.,
       7.6978060487140294e+02, 3.8389901185247084e+02, 0., 0., 1. ]
Rl: !!opencv-matrix
   rows: 3
   cols: 3
   dt: d
   data: [ -8.8948356613028312e-01, 4.5329336993156871e-01,
       -5.7828248808314289e-02, -4.4965299661862007e-01,
       -8.9076100668543179e-01, -6.6007663196415073e-02,
       -8.1431985214960617e-02, -3.2710086286008251e-02,
       9.9614199893344146e-01 ]
Pl: !!opencv-matrix
   rows: 3
   cols: 4
   dt: d
   data: [ 7.6978060487140294e+02, 0., 7.6973354816436768e+02, 0., 0.,
       7.6978060487140294e+02, 3.1559518086910248e+02, 0., 0., 0., 1.,
       0. ]
R: !!opencv-matrix
   rows: 3
   cols: 3
   dt: d
   data: [ 9.9913947755714794e-01, -2.8393806911744292e-02,
       3.0234022489374214e-02, 2.4181389451658181e-02,
       9.9101139915951941e-01, 1.3157380871616811e-01,
       -3.3698142248744782e-02, -1.3072948582837060e-01,
       9.9084521331236852e-01 ]
T: !!opencv-matrix
   rows: 3
   cols: 1
   dt: d
   data: [ 2.1077615034348923e+01, -9.8022446654328519e+00,
       2.0202621830724499e+00 ]
Dl: !!opencv-matrix
   rows: 5
   cols: 1
   dt: d
   data: [ -2.8614681108029399e-01, 6.7003821811925124e-01,
       -3.1740286502597061e-02, 2.2416628349849026e-02,
       -6.9201496590439349e-01 ]
cameraMatrixR: !!opencv-matrix
   rows: 3
   cols: 3
   dt: d
   data: [ 7.6297203793051915e+02, 0., 6.1706332062650586e+02, 0.,
       7.6978060487140294e+02, 3.5318750921719538e+02, 0., 0., 1. ]
Rr: !!opencv-matrix
   rows: 3
   cols: 3
   dt: d
   data: [ -9.0333725055428238e-01, 4.2010126529507569e-01,
       -8.6583708967138920e-02, -4.2596964126874864e-01,
       -9.0231242543261825e-01, 6.6197821922472672e-02,
       -5.0315767691682188e-02, 9.6680989896578362e-02,
       9.9404281080555801e-01 ]
Pr: !!opencv-matrix
   rows: 3
   cols: 4
   dt: d
   data: [ 7.6978060487140294e+02, 0., 7.6973354816436768e+02,
       -1.7961330876626693e+04, 0., 7.6978060487140294e+02,
       3.1559518086910248e+02, 0., 0., 0., 1., 0. ]
Q: !!opencv-matrix
   rows: 4
   cols: 4
   dt: d
   data: [ 1., 0., 0., -7.6973354816436768e+02, 0., 1., 0.,
       -3.1559518086910248e+02, 0., 0., 0., 7.6978060487140294e+02, 0.,
       0., 4.2857659611021823e-02, 0. ]

修改Euroc.yaml

%YAML:1.0

#--------------------------------------------------------------------------------------------
# Camera Parameters. Adjust them!
#--------------------------------------------------------------------------------------------

# Camera calibration and distortion parameters (OpenCV) 
Camera.fx: 7.6978060487140294e+02
Camera.fy: 7.6978060487140294e+02
Camera.cx: 7.6973354816436768e+02
Camera.cy: 3.1559518086910248e+02

Camera.k1: 0
Camera.k2: 0
Camera.p1: 0
Camera.p2: 0


# Camera frames per second 
Camera.fps: 25.0

# stereo baseline times fx
Camera.bf: 1.7961330876626693e+01

# Color order of the images (0: BGR, 1: RGB. It is ignored if images are grayscale)
Camera.RGB: 1

# Close/Far threshold. Baseline times.
ThDepth: 100

#--------------------------------------------------------------------------------------------
# Stereo Rectification. Only if you need to pre-rectify the images.
# Camera.fx, .fy, etc must be the same as in LEFT.P
#--------------------------------------------------------------------------------------------
RIGHT.height: 800
RIGHT.width: 1280
RIGHT.D: !!opencv-matrix
   rows: 1
   cols: 5
   dt: d
   data: [ -7.5171058455674650e-02, -3.2559794563034683e-01, -1.0720364428814924e-02, 1.1357788200056862e-02, 3.8727489810368249e-01 ]
RIGHT.K: !!opencv-matrix
   rows: 3
   cols: 3
   dt: d
   data:  [ 7.6297203793051915e+02, 0., 6.5854816905699784e+02, 0.,
       7.6978060487140294e+02, 3.8389901185247084e+02, 0., 0., 1. ]
RIGHT.R:  !!opencv-matrix
   rows: 3
   cols: 3
   dt: d
   data: [ -8.8948356613028312e-01, 4.5329336993156871e-01,
       -5.7828248808314289e-02, -4.4965299661862007e-01,
       -8.9076100668543179e-01, -6.6007663196415073e-02,
       -8.1431985214960617e-02, -3.2710086286008251e-02,
       9.9614199893344146e-01 ]
RIGHT.P:  !!opencv-matrix
   rows: 3
   cols: 4
   dt: d
   data: [ 7.6978060487140294e+02, 0., 7.6973354816436768e+02, -1.7961330876626693e+01, 0.,
       7.6978060487140294e+02, 3.1559518086910248e+02, 0., 0., 0., 1.,
       0. ]

LEFT.height: 800
LEFT.width: 1280
LEFT.D: !!opencv-matrix
   rows: 1
   cols: 5
   dt: d
   data: [ -2.8614681108029399e-01, 6.7003821811925124e-01,
       -3.1740286502597061e-02, 2.2416628349849026e-02,
       -6.9201496590439349e-01 ]
LEFT.K: !!opencv-matrix
   rows: 3
   cols: 3
   dt: d
   data: [ 7.6297203793051915e+02, 0., 6.1706332062650586e+02, 0.,
       7.6978060487140294e+02, 3.5318750921719538e+02, 0., 0., 1. ]
LEFT.R:  !!opencv-matrix
   rows: 3
   cols: 3
   dt: d
   data: [ -9.0333725055428238e-01, 4.2010126529507569e-01,
       -8.6583708967138920e-02, -4.2596964126874864e-01,
       -9.0231242543261825e-01, 6.6197821922472672e-02,
       -5.0315767691682188e-02, 9.6680989896578362e-02,
       9.9404281080555801e-01 ]
LEFT.P:  !!opencv-matrix
   rows: 3
   cols: 4
   dt: d
   data: [ 7.6978060487140294e+02, 0., 7.6973354816436768e+02,
       0., 0., 7.6978060487140294e+02,
       3.1559518086910248e+02, 0., 0., 0., 1., 0. ]

#--------------------------------------------------------------------------------------------
# ORB Parameters
#--------------------------------------------------------------------------------------------

# ORB Extractor: Number of features per image
ORBextractor.nFeatures: 4200

# ORB Extractor: Scale factor between levels in the scale pyramid 	
ORBextractor.scaleFactor: 1.2

# ORB Extractor: Number of levels in the scale pyramid	
ORBextractor.nLevels: 8

# ORB Extractor: Fast threshold
# Image is divided in a grid. At each cell FAST are extracted imposing a minimum response.
# Firstly we impose iniThFAST. If no corners are detected we impose a lower value minThFAST
# You can lower these values if your images have low contrast			
ORBextractor.iniThFAST: 20
ORBextractor.minThFAST: 4

#--------------------------------------------------------------------------------------------
# Viewer Parameters
#--------------------------------------------------------------------------------------------
Viewer.KeyFrameSize: 0.05
Viewer.KeyFrameLineWidth: 1
Viewer.GraphLineWidth: 0.9
Viewer.PointSize:2
Viewer.CameraSize: 0.08
Viewer.CameraLineWidth: 3
Viewer.ViewpointX: 0
Viewer.ViewpointY: -0.7
Viewer.ViewpointZ: -1.8
Viewer.ViewpointF: 500


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章