Shadertoy 教程 Part 7 - 顏色和3D場景下的多物體繪製

Note: This series blog was translated from Nathan Vaughn's Shaders Language Tutorial and has been authorized by the author. If reprinted or reposted, please be sure to mark the original link and description in the key position of the article after obtaining the author’s consent as well as the translator's. If the article is helpful to you, click this Donation link to buy the author a cup of coffee.
說明:該系列博文翻譯自Nathan Vaughn着色器語言教程。文章已經獲得作者翻譯授權,如有轉載請務必在取得作者譯者同意之後在文章的重點位置標明原文鏈接以及說明。如果你覺得文章對你有幫助,點擊此打賞鏈接請作者喝一杯咖啡。

朋友們,你們好!歡迎來到Shadertoy教程的第七章。這次我們要爲3D場景添加一些顏色,並且還會學到如何在3D場景中添加多個物體例如:地板。

繪製多個3D物體

在上一篇教程當中,我們學會了如何使用Shadertoy 繪製一個球形,但是我們的場景目前還只能繪製一個圖形。

重構之前的代碼,讓 sdScene 函數負責返回場景中最近的一個圖形。

const int MAX_MARCHING_STEPS = 255;
const float MIN_DIST = 0.0;
const float MAX_DIST = 100.0;
const float PRECISION = 0.001;

float sdSphere(vec3 p, float r )
{
  vec3 offset = vec3(0, 0, -2);
  return length(p - offset) - r;
}

float sdScene(vec3 p) {
  return sdSphere(p, 1.);
}

float rayMarch(vec3 ro, vec3 rd, float start, float end) {
  float depth = start;

  for (int i = 0; i < MAX_MARCHING_STEPS; i++) {
    vec3 p = ro + depth * rd;
    float d = sdScene(p);
    depth += d;
    if (d < PRECISION || depth > end) break;
  }

  return depth;
}

vec3 calcNormal(in vec3 p) {
    vec2 e = vec2(1.0, -1.0) * 0.0005; // epsilon
    float r = 1.; // radius of sphere
    return normalize(
      e.xyy * sdScene(p + e.xyy) +
      e.yyx * sdScene(p + e.yyx) +
      e.yxy * sdScene(p + e.yxy) +
      e.xxx * sdScene(p + e.xxx));
}

void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
  vec2 uv = (fragCoord-.5*iResolution.xy)/iResolution.y;
  vec3 backgroundColor = vec3(0.835, 1, 1);

  vec3 col = vec3(0);
  vec3 ro = vec3(0, 0, 3); // ray origin that represents camera position
  vec3 rd = normalize(vec3(uv, -1)); // ray direction

  float d = rayMarch(ro, rd, MIN_DIST, MAX_DIST); // distance to sphere

  if (d > MAX_DIST) {
    col = backgroundColor; // ray didn't hit anything
  } else {
    vec3 p = ro + rd * d; // point on sphere we discovered from ray marching
    vec3 normal = calcNormal(p);
    vec3 lightPosition = vec3(2, 2, 7);
    vec3 lightDirection = normalize(lightPosition - p);

    // Calculate diffuse reflection by taking the dot product of 
    // the normal and the light direction.
    float dif = clamp(dot(normal, lightDirection), 0.3, 1.);

    // Multiply the diffuse reflection value by an orange color and add a bit
    // of the background color to the sphere to blend it more with the background.
    col = dif * vec3(1, 0.58, 0.29) + backgroundColor * .2;
  }

  // Output to screen
  fragColor = vec4(col, 1.0);
}
 

請注意我們是如何用sdScence函數替換sdSphere函數的。如要在場景中加入更多的物體,我們可以使用min函數在場景中獲取最近的物體。

  float sdScene(vec3 p) {
  float sphereLeft = sdSphere(p, 1.);
  float sphereRight = sdSphere(p, 1.);
  return min(sphereLeft, sphereRight);
}

目前,我們的球都在頂部同一個位置。我們給sdSphere函數添加一個偏移值參數offset

float sdSphere(vec3 p, float r, vec3 offset )
{
  return length(p - offset) - r;
}

然後,我們爲給每個球偏移一點位置:

  float sdScene(vec3 p) {
  float sphereLeft = sdSphere(p, 1., vec3(-2.5, 0, -2));
  float sphereRight = sdSphere(p, 1., vec3(2.5, 0, -2));
  return min(sphereLeft, sphereRight);
}

完整的代碼如下所示:

  const int MAX_MARCHING_STEPS = 255;
const float MIN_DIST = 0.0;
const float MAX_DIST = 100.0;
const float PRECISION = 0.001;

float sdSphere(vec3 p, float r, vec3 offset )
{
  return length(p - offset) - r;
}

float sdScene(vec3 p) {
  float sphereLeft = sdSphere(p, 1., vec3(-2.5, 0, -2));
  float sphereRight = sdSphere(p, 1., vec3(2.5, 0, -2));
  return min(sphereLeft, sphereRight);
}

float rayMarch(vec3 ro, vec3 rd, float start, float end) {
  float depth = start;

  for (int i = 0; i < MAX_MARCHING_STEPS; i++) {
    vec3 p = ro + depth * rd;
    float d = sdScene(p);
    depth += d;
    if (d < PRECISION || depth > end) break;
  }

  return depth;
}

vec3 calcNormal(in vec3 p) {
    vec2 e = vec2(1.0, -1.0) * 0.0005; // epsilon
    float r = 1.; // radius of sphere
    return normalize(
      e.xyy * sdScene(p + e.xyy) +
      e.yyx * sdScene(p + e.yyx) +
      e.yxy * sdScene(p + e.yxy) +
      e.xxx * sdScene(p + e.xxx));
}

void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
  vec2 uv = (fragCoord-.5*iResolution.xy)/iResolution.y;
  vec3 backgroundColor = vec3(0.835, 1, 1);

  vec3 col = vec3(0);
  vec3 ro = vec3(0, 0, 3); // ray origin that represents camera position
  vec3 rd = normalize(vec3(uv, -1)); // ray direction

  float d = rayMarch(ro, rd, MIN_DIST, MAX_DIST); // distance to sphere

  if (d > MAX_DIST) {
    col = backgroundColor; // ray didn't hit anything
  } else {
    vec3 p = ro + rd * d; // point on sphere we discovered from ray marching
    vec3 normal = calcNormal(p);
    vec3 lightPosition = vec3(2, 2, 7);
    vec3 lightDirection = normalize(lightPosition - p);

    // Calculate diffuse reflection by taking the dot product of 
    // the normal and the light direction.
    float dif = clamp(dot(normal, lightDirection), 0.3, 1.);

    // Multiply the diffuse reflection value by an orange color and add a bit
    // of the background color to the sphere to blend it more with the background.
    col = dif * vec3(1, 0.58, 0.29) + backgroundColor * .2;
  }

  // Output to screen
  fragColor = vec4(col, 1.0);
}

運行以上的代碼,我們就能看到兩個對稱的黃色的球了。

添加地板

通過下面的方式,我們在距離球下方一個單位的位置添加一個地板。

  float sdFloor(vec3 p) {
  return p.y + 1.;
}

p.y + 1 也可以表示爲 p.y - (-1),意味着給地板一個偏移值,讓它下調一個單位。接下來,我們調用min函數,就可以把地板加入我們的場景當中了:

  float sdScene(vec3 p) {
    float sphereLeft = sdSphere(p, 1., vec3(-2.5, 0, -2));
    float sphereRight = sdSphere(p, 1., vec3(2.5, 0, -2));
    float res = min(sphereLeft, sphereRight);
    res = min(res, sdFloor(p));
    return res;
}

添加顏色

在Shadertoy中,爲3D圖形添加顏色的方式有很多種。其中一種方式是通過修改SDF函數,讓其返回值中包含圖形的距離和顏色,也就是將float類型修改爲vec4類型。vec4類型的變量中第一個元素表示“符號距離”,剩下的三個值就是顏色值。因此,我們需要在各個地方修改我們的返回值。完整的代碼如下:

const int MAX_MARCHING_STEPS = 255;
const float MIN_DIST = 0.0;
const float MAX_DIST = 100.0;
const float PRECISION = 0.001;

vec4 sdSphere(vec3 p, float r, vec3 offset, vec3 col )
{
  float d = length(p - offset) - r;
  return vec4(d, col);
}

vec4 sdFloor(vec3 p, vec3 col) {
  float d = p.y + 1.;
  return vec4(d, col);
}

vec4 minWithColor(vec4 obj1, vec4 obj2) {
  if (obj2.x < obj1.x) return obj2; // The x component of the object holds the "signed distance" value
  return obj1;
}

vec4 sdScene(vec3 p) {
  vec4 sphereLeft = sdSphere(p, 1., vec3(-2.5, 0, -2), vec3(0, .8, .8));
  vec4 sphereRight = sdSphere(p, 1., vec3(2.5, 0, -2), vec3(1, 0.58, 0.29));
  vec4 co = minWithColor(sphereLeft, sphereRight); // co = closest object containing "signed distance" and color
  co = minWithColor(co, sdFloor(p, vec3(0, 1, 0)));
  return co;
}

vec4 rayMarch(vec3 ro, vec3 rd, float start, float end) {
  float depth = start;
  vec4 co; // closest object

  for (int i = 0; i < MAX_MARCHING_STEPS; i++) {
    vec3 p = ro + depth * rd;
    co = sdScene(p);
    depth += co.x;
    if (co.x < PRECISION || depth > end) break;
  }
  
  vec3 col = vec3(co.yzw);

  return vec4(depth, col);
}

vec3 calcNormal(in vec3 p) {
    vec2 e = vec2(1.0, -1.0) * 0.0005; // epsilon
    return normalize(
      e.xyy * sdScene(p + e.xyy).x +
      e.yyx * sdScene(p + e.yyx).x +
      e.yxy * sdScene(p + e.yxy).x +
      e.xxx * sdScene(p + e.xxx).x);
}

void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
  vec2 uv = (fragCoord-.5*iResolution.xy)/iResolution.y;
  vec3 backgroundColor = vec3(0.835, 1, 1);

  vec3 col = vec3(0);
  vec3 ro = vec3(0, 0, 3); // ray origin that represents camera position
  vec3 rd = normalize(vec3(uv, -1)); // ray direction

  vec4 co = rayMarch(ro, rd, MIN_DIST, MAX_DIST); // closest object

  if (co.x > MAX_DIST) {
    col = backgroundColor; // ray didn't hit anything
  } else {
    vec3 p = ro + rd * co.x; // point on sphere or floor we discovered from ray marching
    vec3 normal = calcNormal(p);
    vec3 lightPosition = vec3(2, 2, 7);
    vec3 lightDirection = normalize(lightPosition - p);

    // Calculate diffuse reflection by taking the dot product of 
    // the normal and the light direction.
    float dif = clamp(dot(normal, lightDirection), 0.3, 1.);

    // Multiply the diffuse reflection value by an orange color and add a bit
    // of the background color to the sphere to blend it more with the background.
    col = dif * co.yzw + backgroundColor * .2;
  }

  // Output to screen
  fragColor = vec4(col, 1.0);
}  

如果要編譯器安全編譯通過,我們需要修改多處代碼。首先要修改的就是SDF返回值的類型,由 float 改爲 vec4

  vec4 sdSphere(vec3 p, float r, vec3 offset, vec3 col )
{
  float d = length(p - offset) - r;
  return vec4(d, col);
}

vec4 sdFloor(vec3 p, vec3 col) {
  float d = p.y + 1.;
  return vec4(d, col);
}

現在,這些方法都會接受一個新的color參數,但這樣做就讓我們的min函數失效了,因爲min只能對比float類型的值。因此,我們需要自己創建一個類似min函數的方法來處理這個問題。

  vec4 minWithColor(vec4 obj1, vec4 obj2) {
  if (obj2.x < obj1.x) return obj2;
  return obj1;
}

vec4 sdScene(vec3 p) {
  vec4 sphereLeft = sdSphere(p, 1., vec3(-2.5, 0, -2), vec3(0, .8, .8));
  vec4 sphereRight = sdSphere(p, 1., vec3(2.5, 0, -2), vec3(1, 0.58, 0.29));
  vec4 co = minWithColor(sphereLeft, sphereRight); // co = closest object containing "signed distance" and color
  co = minWithColor(co, sdFloor(p, vec3(0, 1, 0)));
  return co;
}

minWithColor函數與min函數的總體上是相似的。它會接受一個vec4類型的參數,參數中包含需要添加的顏色值和從SDF返回的符號距離。接下來是光線步進算法函數,我們也要去修改它:

  vec4 rayMarch(vec3 ro, vec3 rd, float start, float end) {
  float depth = start;
  vec4 co; // closest object

  for (int i = 0; i < MAX_MARCHING_STEPS; i++) {
    vec3 p = ro + depth * rd;
    co = sdScene(p);
    depth += co.x;
    if (co.x < PRECISION || depth > end) break;
  }
  
  vec3 col = vec3(co.yzw);

  return vec4(depth, col);
}

我們也需要修改 calcNormal 函數,只取sdScene函數返回值的x元素:

  vec3 calcNormal(in vec3 p) {
    vec2 e = vec2(1.0, -1.0) * 0.0005; // epsilon
    return normalize(
      e.xyy * sdScene(p + e.xyy).x +
      e.yyx * sdScene(p + e.yyx).x +
      e.yxy * sdScene(p + e.yxy).x +
      e.xxx * sdScene(p + e.xxx).x);
}

最終,我們修改mainImage函數,使用我們新改的代碼:

  void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
  vec2 uv = (fragCoord-.5*iResolution.xy)/iResolution.y;
  vec3 backgroundColor = vec3(0.835, 1, 1);

  vec3 col = vec3(0);
  vec3 ro = vec3(0, 0, 3); // ray origin that represents camera position
  vec3 rd = normalize(vec3(uv, -1)); // ray direction

  vec4 co = rayMarch(ro, rd, MIN_DIST, MAX_DIST); // closest object

  if (co.x > MAX_DIST) {
    col = backgroundColor; // ray didn't hit anything
  } else {
    vec3 p = ro + rd * co.x; // point on sphere or floor we discovered from ray marching
    vec3 normal = calcNormal(p);
    vec3 lightPosition = vec3(2, 2, 7);
    vec3 lightDirection = normalize(lightPosition - p);

    // Calculate diffuse reflection by taking the dot product of 
    // the normal and the light direction.
    float dif = clamp(dot(normal, lightDirection), 0.3, 1.);

    // Multiply the diffuse reflection value by an orange color and add a bit
    // of the background color to the sphere to blend it more with the background.
    col = dif * co.yzw + backgroundColor * .2;
  }

  // Output to screen
  fragColor = vec4(col, 1.0);
}

我們用col.x獲取符號距離,用col.yzw獲取顏色。使用這個方法允許我們在vec4中存儲值,就像在其他語言中是數組儲值那樣。GLSL允許我們使用數組,但是沒有Javascript這種類型的語言那麼靈活。你需要知道數組中值的個數,並且要保證數組中的值的類型都是相同的。

如果你不認爲使用vec4來儲存距離和顏色是一種好方法,你也可以使用structs來做到這些事情。Structs 是你用來組織GLSL代碼的另外一種方式。Structs有點類似C++語法。如果你熟悉C++和Javascript,那麼你可以把structs想像成類和對象的集合。我們來看看它所代表的含義吧:

struct 都包含了屬性。我們先聲明一個Surface吧。

  struct Surface {
    float signedDistance;
    vec3 color;
  };

此處創建了一個函數,返回Surface結構,然後又創建了一個struct的實例:

// This function's return value is of type "Surface"
Surface sdSphere(vec3 p, float r, vec3 offset, vec3 col)
{
  float d = length(p - offset) - r;
  return Surface(d, col); // We're initializing a new "Surface" struct here and then returning it
}

通過點來訪問struct的屬性

  Surface minWithColor(Surface obj1, Surface obj2) {
    if (obj2.sd < obj1.sd) return obj2; // The sd component of the struct holds the "signed distance" value
    return obj1;
  }

通過學習到的關於structs的新知識,我們就可以來改寫代碼了:

  const int MAX_MARCHING_STEPS = 255;
const float MIN_DIST = 0.0;
const float MAX_DIST = 100.0;
const float PRECISION = 0.001;

struct Surface {
    float sd; // signed distance value
    vec3 col; // color
};

Surface sdSphere(vec3 p, float r, vec3 offset, vec3 col)
{
  float d = length(p - offset) - r;
  return Surface(d, col);
}

Surface sdFloor(vec3 p, vec3 col) {
  float d = p.y + 1.;
  return Surface(d, col);
}

Surface minWithColor(Surface obj1, Surface obj2) {
  if (obj2.sd < obj1.sd) return obj2; // The sd component of the struct holds the "signed distance" value
  return obj1;
}

Surface sdScene(vec3 p) {
  Surface sphereLeft = sdSphere(p, 1., vec3(-2.5, 0, -2), vec3(0, .8, .8));
  Surface sphereRight = sdSphere(p, 1., vec3(2.5, 0, -2), vec3(1, 0.58, 0.29));
  Surface co = minWithColor(sphereLeft, sphereRight); // co = closest object containing "signed distance" and color
  co = minWithColor(co, sdFloor(p, vec3(0, 1, 0)));
  return co;
}

Surface rayMarch(vec3 ro, vec3 rd, float start, float end) {
  float depth = start;
  Surface co; // closest object

  for (int i = 0; i < MAX_MARCHING_STEPS; i++) {
    vec3 p = ro + depth * rd;
    co = sdScene(p);
    depth += co.sd;
    if (co.sd < PRECISION || depth > end) break;
  }
  
  co.sd = depth;
  
  return co;
}

vec3 calcNormal(in vec3 p) {
    vec2 e = vec2(1.0, -1.0) * 0.0005; // epsilon
    return normalize(
      e.xyy * sdScene(p + e.xyy).sd +
      e.yyx * sdScene(p + e.yyx).sd +
      e.yxy * sdScene(p + e.yxy).sd +
      e.xxx * sdScene(p + e.xxx).sd);
}

void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
  vec2 uv = (fragCoord-.5*iResolution.xy)/iResolution.y;
  vec3 backgroundColor = vec3(0.835, 1, 1);

  vec3 col = vec3(0);
  vec3 ro = vec3(0, 0, 3); // ray origin that represents camera position
  vec3 rd = normalize(vec3(uv, -1)); // ray direction

  Surface co = rayMarch(ro, rd, MIN_DIST, MAX_DIST); // closest object

  if (co.sd > MAX_DIST) {
    col = backgroundColor; // ray didn't hit anything
  } else {
    vec3 p = ro + rd * co.sd; // point on sphere or floor we discovered from ray marching
    vec3 normal = calcNormal(p);
    vec3 lightPosition = vec3(2, 2, 7);
    vec3 lightDirection = normalize(lightPosition - p);

    // Calculate diffuse reflection by taking the dot product of 
    // the normal and the light direction.
    float dif = clamp(dot(normal, lightDirection), 0.3, 1.);

    // Multiply the diffuse reflection value by an orange color and add a bit
    // of the background color to the sphere to blend it more with the background.
    col = dif * co.col + backgroundColor * .2;
  }

  // Output to screen
  fragColor = vec4(col, 1.0);
}

這段代碼可以和我們之前使用vec4改寫的代碼是同樣的效果。依我看,使用struct的方式會更加地容易並且讓代碼更加的簡潔。我們也不需要侷限在vec4這種向量,按照你喜歡的方式來即可:

繪製方塊帶格子的地板

如果你想讓地板鋪滿格子,你需要將地板的顏色調整如下:

  Surface sdScene(vec3 p) {
  Surface sphereLeft = sdSphere(p, 1., vec3(-2.5, 0, -2), vec3(0, .8, .8));
  Surface sphereRight = sdSphere(p, 1., vec3(2.5, 0, -2), vec3(1, 0.58, 0.29));
  Surface co = minWithColor(sphereLeft, sphereRight);

  vec3 floorColor = vec3(1. + 0.7*mod(floor(p.x) + floor(p.z), 2.0));
  co = minWithColor(co, sdFloor(p, floorColor));
  return co;
}

給地板繪製格子幫助人們在視覺上確認3D場景的深度。mod方法經常使用到在創建重複模式或者分割碎片的場景當中。

總結

在本篇教程中,我們學會了如何在3D場景中繪製多個物體,並且給他們賦上顏色。我們也學習到了利用兩種技術手段來實現給場景中物體上色,當然上色的手段是多種多樣的!使用struct讓代碼更加結構化。

引用資源

Ray Marching with Unique Colors
Khronos: Arrays
Khronos: Structs
Khronos: mod

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章