GLSL矩陣變換詳解(二、旋轉變換、平移變換以及QMatrix4x4)
的基礎上再增加對攝像機位置、姿態的設置功能,以及成像區域的定義功能。
QMatrix4x4::lookAt(camera, center, upDirection)定義了攝像機的位置與姿態,屬於view matrix操作的範疇。三個輸入變量都是QVector3D類型。camera是攝像機在世界座標系的座標。center是攝像機的光軸上的任一點,也是世界座標系座標,它與camera連線決定了光軸的方向。這樣獲取影像後,還需要確定照片正上方的指向。upDirection決定了這一方向。這一方向同樣在世界座標系中表述。顯然,這一方向不能與攝像機光軸平行。
QMatrix4x4::ortho(left, right, bottom, top, near, far)定義了一塊長方體(視景體),屬於projection 操作的範疇。長方體的正面與camera的光軸垂直。near表示長方體正面與camera的距離,far表示長方體背面與camera的距離。其他量的意義見下圖。
其中l表示left,r表示right,t表示top,b表示bottom
根據https://en.wikibooks.org/wiki/GLSL_Programming/Vertex_Transformations,view matrix操作在model matrix之後,而projection放在view之後。因此,在下面的實例中,我把ortho放在最前面,其次是lookAt,最後纔是translate和rotate
h文件:
#ifndef MAINWINDOW_H
#define MAINWINDOW_H
#include <QOpenGLWidget>
#include <QOpenGLFunctions>
#include <QOpenGLTexture>
#include <QOpenGLShader>
#include <QOpenGLShaderProgram>
class MainWindow : public QOpenGLWidget, protected QOpenGLFunctions
{
Q_OBJECT
public:
MainWindow(QWidget *parent = 0);
~MainWindow();
GLuint m_uiVertLoc;
GLuint m_uiTexNum;
QOpenGLTexture * m_pTextures;
QOpenGLShaderProgram * m_pProgram;
GLfloat * m_pVertices;
protected:
void initializeGL();
void paintGL();
void resizeGL(int w, int h);
};
#endif // MAINWINDOW_H
cpp文件:
#include "mainwindow.h"
MainWindow::MainWindow(QWidget *parent)
: QOpenGLWidget(parent)
{
}
MainWindow::~MainWindow()
{
m_pTextures->release();
delete m_pTextures;
delete m_pProgram;
delete [] m_pVertices;
}
void MainWindow::initializeGL()
{
initializeOpenGLFunctions();
m_uiTexNum = 0;
m_pVertices = new GLfloat[18];
//給頂點賦值
GLfloat arrVertices[18] = {0.0, 1.0, 0.0,
0.0, 0.0, 0.0,
1.0, 0.0, 0.0,
1.0, 0.0, 0.0,
1.0, 1.0, 0.0,
0.0, 1.0, 0.0};
m_pVertices = new GLfloat[18];
memcpy(m_pVertices, arrVertices, 18 * sizeof(GLfloat));
QOpenGLShader *vshader = new QOpenGLShader(QOpenGLShader::Vertex, this);
const char *vsrc =
"#version 330\n"
"in vec3 pos;\n"
"out vec2 texCoord;\n"
"uniform mat4 mat4MVP;\n"
"void main()\n"
"{\n"
" gl_Position = mat4MVP * vec4(pos, 1.0);\n"
" texCoord = pos.xy;\n"
"}\n";
vshader->compileSourceCode(vsrc);
QOpenGLShader *fshader = new QOpenGLShader(QOpenGLShader::Fragment, this);
const char *fsrc =
"#version 330\n"
"out vec4 color;\n"
"in vec2 texCoord;\n"
"uniform sampler2D Tex\n;"
"void main()\n"
"{\n"
" color = texture(Tex, texCoord);\n"
//" color = vec4(1.0, 0.0, 0.0, 0.0);\n"
"}\n";
fshader->compileSourceCode(fsrc);
m_pProgram = new QOpenGLShaderProgram;
m_pProgram->addShader(vshader);
m_pProgram->addShader(fshader);
m_pProgram->link();
m_pProgram->bind();
m_uiVertLoc = m_pProgram->attributeLocation("pos");
m_pProgram->enableAttributeArray(m_uiVertLoc);
m_pProgram->setAttributeArray(m_uiVertLoc, m_pVertices, 3, 0);
m_pTextures = new QOpenGLTexture(QImage(QString("earth.bmp")).mirrored());
m_pTextures->setMinificationFilter(QOpenGLTexture::Nearest);
m_pTextures->setMagnificationFilter(QOpenGLTexture::Linear);
m_pTextures->setWrapMode(QOpenGLTexture::Repeat);
m_pProgram->setUniformValue("Tex", m_uiTexNum);
glEnable(GL_DEPTH_TEST);
glClearColor(0,0,0,1);
}
void MainWindow::paintGL()
{
//QMatrix4x4在聲明時被默認爲單位矩陣
QMatrix4x4 m1, m2, m3, m;
//m1.viewport(0,0,m_iWidth, m_iHeight,-15,15);//useless
m1.ortho(-1.0f, +1.0f, -1.0f, 1.0f, 0.0f, 15.0f);//right//generate projection matrix
m2.lookAt(QVector3D(10,0,10), QVector3D(0,0,0), QVector3D(0,1,0));//generate view matrix, right
qDebug()<<m2;
m3.translate(0,-0.707,0.0);//right, generate model matrices
m3.rotate(45, 0.0f, 0.0f, 1.0f);//right, generate model matrices
m = m1 * m2 * m3;
m_pProgram->setUniformValue("mat4MVP", m);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
m_pTextures->bind(m_uiTexNum);
glDrawArrays(GL_TRIANGLES, 0, 6);
m_pTextures->release();
}
void MainWindow::resizeGL(int w, int h)
{
glViewport(0,0,w,h);
}
效果:
分析:
上圖展現的是攝像機和紋理圖在世界座標系的位置。從攝像機到紋理中心的距離爲14.14。而ortho(-1,1,-1,1,0,15)函數決定了從攝像機沿光軸方向走出15個單位內的物體都被成像,所以紋理可以看到。
再將攝像機移動到更遠的位置(20,0,10),此時由於攝像機視角明顯傾斜,圖像在X方向與Y方向比例差距就明顯了。
現在取一個極限值:22.36 = sqrt(20 * 20 + 10 * 10),這是攝像機到紋理中心的距離。將這個值作爲視景體背面到攝像機的距離,則紋理有一半顯示不出來了--突出視景體的那一半。