【Pixel Shader】SDF建模和Raymarching算法

參考

Ray Marching and Signed Distance Functions
IQ博客:Distance Functions
ShaderToy Combination SDF
GLSL ES 語言—矢量和矩陣的賦值構造函數

SDF(Signed Distance Functions)是使用距離方程來表示幾何體的方式,配合raymarching方法可以僅在pixel shader中渲染出建模效果的畫面,而不需要真正的模型輸入,這也是shadertoy網站上大部分建模效果的作品使用的方法。關於SDF和raymarching的基礎知識可以看第一篇參考文章。
這篇博客在Unity Shader中初步實現了基礎的SDF + raymarching算法。

效果

在這裏插入圖片描述

踩坑

過程中遇到最大的坑是在計算view2world matrix時,由於參考代碼都是GLSL寫的,GLSL的矩陣構造函數是列主序排列的,即當你在GLSL裏寫:

mat4 m = mat4(1.0,   2.0,    3.0,    4.0,
              5.0,   6.0,    7.0,    8.0,
              9.0,   10.0,  11.0,    12.0,
              13.0,  14.0,  15.0,    16.0);

你所得到的矩陣如下:
在這裏插入圖片描述
類似的,如下代碼:

mat4 m = mat4(v1, v2, v3, v4); //v1 v2 v3 v4均爲vec4類型變量

構造出的4x4矩陣m爲依次將v1 v2 v3 v4按列從左到右放置的結果。

而在移植到Unity Shader中時,由於ShaderLab使用CG語言,矩陣的構造函數爲行主序。如果不注意這個細節,直接使用GLSL代碼中的構造順序,如以下代碼:

fixed3x3 viewMatrix(float3 eye, float3 center, float3 up) {
    fixed3 f = normalize(center - eye);
    fixed3 s = normalize(cross(f, up));
    fixed3 u = cross(s, f);
    return fixed3x3 (s, u, -f);
}

通過這樣的代碼,我們得到的將是正確矩陣的轉置。

因此正確的寫法應該是:

fixed3x3 viewMatrix(float3 eye, float3 center, float3 up) {
	// Based on gluLookAt man page
	fixed3 f = normalize(center - eye);
	fixed3 s = normalize(cross(f, up));
	fixed3 u = cross(s, f);
	return fixed3x3(s.x, u.x, -f.x, s.y, u.y, -f.y, s.z, u.z, -f.z);
}

這個問題在構造圍繞XYZ軸旋轉矩陣的時候同樣需要注意。

Unity Shader代碼

Shader "Custom/SDFTest"
{
	Properties
	{

    }

	SubShader{
		Pass {
			CGPROGRAM

			#pragma vertex vert
			#pragma fragment frag

			#include "UnityCG.cginc"

			struct v2f {
				float4 pos : SV_POSITION;
				float4 scrPos : TEXCOORD0;
			};

			v2f vert(appdata_full v) {
				v2f o;
				o.pos = UnityObjectToClipPos(v.vertex);
				o.scrPos = ComputeScreenPos(o.pos);

				return o;
			}

			// 常量定義
			#define MAX_MARCHING_STEPS 255// 最大raymarching次數
			#define MIN_DIST 0.0// 起始raymarching距離
			#define MAX_DIST 100.0// 最遠raymarching距離
			#define EPSILON 0.0001// 極小量

			float opUnion(float d1, float d2)
			{
				return min(d1, d2);
			}

			float opSubtraction(float d1, float d2)
			{
				return max(-d1, d2);
			}

			float opIntersection(float d1, float d2)
			{
				return max(d1, d2);
			}

			float opSmoothUnion(float d1, float d2, float k)
			{
				float h = max(k - abs(d1 - d2), 0.0);
				return min(d1, d2) - h * h * 0.25 / k;
				//float h = clamp( 0.5 + 0.5*(d2-d1)/k, 0.0, 1.0 );
				//return mix( d2, d1, h ) - k*h*(1.0-h);
			}

			float opSmoothSubtraction(float d1, float d2, float k)
			{
				float h = max(k - abs(-d1 - d2), 0.0);
				return max(-d1, d2) + h * h * 0.25 / k;
				//float h = clamp( 0.5 - 0.5*(d2+d1)/k, 0.0, 1.0 );
				//return mix( d2, -d1, h ) + k*h*(1.0-h);
			}

			float opSmoothIntersection(float d1, float d2, float k)
			{
				float h = max(k - abs(d1 - d2), 0.0);
				return max(d1, d2) + h * h * 0.25 / k;
				//float h = clamp( 0.5 - 0.5*(d2-d1)/k, 0.0, 1.0 );
				//return mix( d2, d1, h ) + k*h*(1.0-h);
			}

			/**
			 * Rotation matrix around the X axis.
			 */
			fixed3x3 rotateX(float theta) {
				fixed c = cos(theta);
				fixed s = sin(theta);
				return fixed3x3(
					fixed3(1, 0, 0),
					fixed3(0, c, s),
					fixed3(0, -s, c)
				);
			}

			/**
			 * Rotation matrix around the Y axis.
			 */
			fixed3x3 rotateY(float theta) {
				fixed c = cos(theta);
				fixed s = sin(theta);
				return fixed3x3(
					fixed3(c, 0, -s),
					fixed3(0, 1, 0),
					fixed3(s, 0, c)
				);
			}

			/**
			 * Rotation matrix around the Z axis.
			 */
			fixed3x3 rotateZ(float theta) {
				fixed c = cos(theta);
				fixed s = sin(theta);
				return fixed3x3(
					fixed3(c, s, 0),
					fixed3(-s, c, 0),
					fixed3(0, 0, 1)
				);
			}

			// 圓角長方體SDF
			float sdRoundBox(float3 p, float3 b, float r)
			{
				float3 q = abs(p) - b;
				return length(max(q, 0.0)) + min(max(q.x, max(q.y, q.z)), 0.0) - r;
			}

			// 球體SDF
			float sdSphere(float3 p, float r)
			{
				return length(p) - r;
			}

			// 場景SDF
			float sdScene(float3 p) {
				p /= 0.8;// 全場景uniform scale爲0.8倍
				p = mul(rotateY(_Time[1]*0.5), p);// 全場景繞Y軸旋轉
				float d1 = sdRoundBox(p, float3(2.0, 1.0, 2.0), 0.1);
				float d2 = sdSphere(p - float3(0.0, 1.0 + 1.0 * sin(_Time[1]), 0.0), 1.0);// 球體平移
				return opSmoothUnion(d1, d2, 0.5) * 0.8;// 取平滑交集,*0.8表示第一步uniform scale的補償
			}

			/**
			 * Return the shortest distance from the eyepoint to the scene surface along
			 * the marching direction. If no part of the surface is found between start and end,
			 * return end.
			 *
			 * eye: the eye point, acting as the origin of the ray
			 * marchingDirection: the normalized direction to march in
			 * start: the starting distance away from the eye
			 * end: the max distance away from the ey to march before giving up
			 */
			float shortestDistanceToSurface(float3 eye, float3 marchingDirection, float start, float end) {
				float depth = start;
				for (int i = 0; i < MAX_MARCHING_STEPS; i++) {
					float dist = sdScene(eye + depth * marchingDirection);
					if (dist < EPSILON) {
						return depth;
					}
					depth += dist;
					if (depth >= end) {
						return end;
					}
				}
				return end;
			}

			/**
			 * Return the normalized direction to march in from the eye point for a single pixel.
			 *
			 * fieldOfView: vertical field of view in degrees
			 * size: resolution of the output image
			 * fragCoord: the x,y coordinate of the pixel in the output image
			 */
			fixed3 rayDirection(float fieldOfView, float2 size, float2 fragCoord) {
				float2 xy = fragCoord - size / 2.0;
				float z = size.y / tan(radians(fieldOfView) / 2.0);
				return normalize(float3(xy, -z));
			}

			/**
			 * Return a transform matrix that will transform a ray from view space
			 * to world coordinates, given the eye point, the camera target, and an up vector.
			 *
			 * This assumes that the center of the camera is aligned with the negative z axis in
			 * view space when calculating the ray marching direction. See rayDirection.
			 */
			fixed3x3 viewMatrix(float3 eye, float3 center, float3 up) {
				// Based on gluLookAt man page
				fixed3 f = normalize(center - eye);
				fixed3 s = normalize(cross(f, up));
				fixed3 u = cross(s, f);
				return fixed3x3(s.x, u.x, -f.x, s.y, u.y, -f.y, s.z, u.z, -f.z);
			}

			/**
			 * Using the gradient of the SDF, estimate the normal on the surface at point p.
			 */
			fixed3 estimateNormal(float3 p) {
				return normalize(float3(
					sdScene(float3(p.x + EPSILON, p.y, p.z)) - sdScene(float3(p.x - EPSILON, p.y, p.z)),
					sdScene(float3(p.x, p.y + EPSILON, p.z)) - sdScene(float3(p.x, p.y - EPSILON, p.z)),
					sdScene(float3(p.x, p.y, p.z + EPSILON)) - sdScene(float3(p.x, p.y, p.z - EPSILON))
					));
			}

			/**
			 * Lighting contribution of a single point light source via Phong illumination.
			 *
			 * The vec3 returned is the RGB color of the light's contribution.
			 *
			 * k_a: Ambient color
			 * k_d: Diffuse color
			 * k_s: Specular color
			 * alpha: Shininess coefficient
			 * p: position of point being lit
			 * eye: the position of the camera
			 * lightPos: the position of the light
			 * lightIntensity: color/intensity of the light
			 *
			 * See https://en.wikipedia.org/wiki/Phong_reflection_model#Description
			 */
			fixed3 phongContribForLight(fixed3 k_d, fixed3 k_s, float alpha, float3 p, float3 eye,
				float3 lightPos, fixed3 lightIntensity) {
				fixed3 N = estimateNormal(p);
				fixed3 L = normalize(lightPos - p);
				fixed3 V = normalize(eye - p);
				fixed3 R = normalize(reflect(-L, N));

				fixed dotLN = dot(L, N);
				fixed dotRV = dot(R, V);

				if (dotLN < 0.0) {
					// Light not visible from this point on the surface
					return fixed3(0.0, 0.0, 0.0);
				}

				if (dotRV < 0.0) {
					// Light reflection in opposite direction as viewer, apply only diffuse
					// component
					return lightIntensity * (k_d * dotLN);
				}
				return lightIntensity * (k_d * dotLN + k_s * pow(dotRV, alpha));
			}

			/**
			 * Lighting via Phong illumination.
			 *
			 * The vec3 returned is the RGB color of that point after lighting is applied.
			 * k_a: Ambient color
			 * k_d: Diffuse color
			 * k_s: Specular color
			 * alpha: Shininess coefficient
			 * p: position of point being lit
			 * eye: the position of the camera
			 *
			 * See https://en.wikipedia.org/wiki/Phong_reflection_model#Description
			 */
			float3 phongIllumination(fixed3 k_a, fixed3 k_d, fixed3 k_s, float alpha, float3 p, float3 eye) {
				const fixed3 ambientLight = 0.5 * fixed3(1.0, 1.0, 1.0);
				fixed3 color = ambientLight * k_a;

				float3 lightPos = float3(4.0, 5.0, 4.0);
				fixed3 lightIntensity = fixed3(0.4, 0.4, 0.4);

				color += phongContribForLight(k_d, k_s, alpha, p, eye,
					lightPos,
					lightIntensity);
				return color;
			}


			fixed4 frag(v2f i) : SV_Target{
				float2 scr_coords = (i.scrPos.xy / i.scrPos.w) * _ScreenParams.xy;//片元的屏幕座標
				
				// 相機參數
				float3 eyePos = float3(5.0, 7.0, 6.0);
				float3 centerPos = float3(0.0, 0.0, 0.0);
				float3 up = float3(0.0, 1.0, 0.0);
				float fov = 60.0;

				fixed3 dir = rayDirection(fov, _ScreenParams.xy, scr_coords);// 計算該像素的raymarching方向(view空間)
				fixed3x3 view2world = viewMatrix(eyePos, centerPos, up);// 計算view to world向量變換矩陣
				fixed3 worldDir = mul(view2world, dir);// 將view空間的raymarching方向轉化至world空間
				float dist = shortestDistanceToSurface(eyePos, worldDir, MIN_DIST, MAX_DIST);

				if (dist > MAX_DIST - EPSILON) {
					// Didn't hit anything
					return fixed4(0.0, 0.0, 0.0, 1.0);
				}

				float3 p = eyePos + dist * worldDir;// 計算raymarching與場景相交的點座標

				// phong shading params
				fixed3 K_a = fixed3(0.2, 0.2, 0.2);// ambient color
				fixed3 K_d = fixed3(0.7, 0.2, 0.2);// diffuse color
				fixed3 K_s = fixed3(1.0, 1.0, 1.0);// specular color
				float shininess = 10.0;

				fixed3 color = phongIllumination(K_a, K_d, K_s, shininess, p, eyePos);
				return fixed4(color, 1.0);
			}
			ENDCG
		}
	}
}
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章