Vulkan-光線追蹤(計算着色器實現)

光線追蹤

本次使用計算着色器實現具有陰影和反射的簡單GPU光線跟蹤器。至於vulkan最新發布的光追擴展VK_KHR_ray_tracing或之前的VK-NV-ray-u跟蹤擴展,本次未涉及到,以後可具體研究實現,本次具體計算着色器光追代碼參照國外圖形大神Inigo Quilez文章實現。
另外,本部分主要介紹計算着色器與常規創建新增的部分及光追計算着色器的具體實現邏輯,像基礎的UniformBuffer、Pipeline、CommandBuffer等不再贅述,有想了解的可參照以前博文。下圖展示一個單光源、場景光追反射10次的效果圖:
在這裏插入圖片描述

一、計算管線及光追緩存創建

像首先計算着色器常用的結構體創建:

	vks::Texture textureComputeTarget; //在計算着色器中寫入數據供幾何着色器使用

	// 圖形部分數據結構體
	struct {
		VkDescriptorSetLayout descriptorSetLayout;	//光線追蹤圖像顯示着色器綁定佈局
		VkDescriptorSet descriptorSetPreCompute;	//光線追蹤圖像顯示着色器綁定之前計算着色器圖像操作
		VkDescriptorSet descriptorSet;				//光線追蹤圖像顯示着色器綁定後計算着色器圖像操作
		VkPipeline pipeline;						//光線跟蹤圖像顯示管道
		VkPipelineLayout pipelineLayout;			//圖形管線的佈局
	} graphics;

	// 計算部分數據結構體
	struct {
		struct {
			vks::Buffer spheres;					//(着色器)帶有場景球體的緩衝對象
			vks::Buffer planes;						//(着色器)帶有場景平面的緩衝對象
		} storageBuffers;
		vks::Buffer uniformBuffer;					//包含場景數據的統一緩衝對象
		VkQueue queue;								//用於計算命令的獨立隊列(隊列族可能不同於用於圖形的隊列)
		VkCommandPool commandPool;					//使用單獨的命令池(隊列族可能不同於用於圖形的命令池)
		VkCommandBuffer commandBuffer;				//存儲調度命令和屏障的命令緩衝區
		VkFence fence;								//同步圍欄,以避免重寫計算CB如果仍在使用
		VkDescriptorSetLayout descriptorSetLayout;	//計算着色綁定佈局
		VkDescriptorSet descriptorSet;				//計算着色器綁定
		VkPipelineLayout pipelineLayout;			//計算管道的佈局
		VkPipeline pipeline;						//計算射線管道
		struct UBOCompute {							//計算着色器均勻塊對象
			glm::vec3 lightPos;
			float aspectRatio;						//視口的縱橫比
			glm::vec4 fogColor = glm::vec4(0.0f);
			struct {
				glm::vec3 pos = glm::vec3(0.0f, 0.0f, 4.0f);
				glm::vec3 lookat = glm::vec3(0.0f, 0.5f, 0.0f);
				float fov = 10.0f;
			} camera;
		} ubo;
	} compute;

	//SSBO中球體定義
	struct Sphere {									//着色器使用std140佈局(所以我們只使用vec4而不是vec3)
		glm::vec3 pos;								
		float radius;
		glm::vec3 diffuse;
		float specular;
		uint32_t id;								//用於標識用於射線跟蹤的球體的Id
		glm::ivec3 _pad;
	};

	// SSBO中平面定義
	struct Plane {
		glm::vec3 normal;
		float distance;
		glm::vec3 diffuse;
		float specular;
		uint32_t id;
		glm::ivec3 _pad;
	};

首先我們需要新增一個prepareStorageBuffers函數用於創建SSBO計算着色器存儲緩衝區數據。

	//設置並填充包含光線跟蹤場景原語的計算着色器存儲緩衝區
	void prepareStorageBuffers()
	{
		// Spheres 球定義
		std::vector<Sphere> spheres;
		spheres.push_back(newSphere(glm::vec3(1.75f, -0.5f, 0.0f), 1.0f, glm::vec3(0.0f, 1.0f, 0.0f), 32.0f));
		spheres.push_back(newSphere(glm::vec3(0.0f, 1.0f, -0.5f), 1.0f, glm::vec3(1.0f, 0.0f, 0.0f), 32.0f));
		spheres.push_back(newSphere(glm::vec3(-1.75f, -0.75f, -0.5f), 1.25f, glm::vec3(0.0f, 0.0f, 1.0f), 32.0f));
		VkDeviceSize storageBufferSize = spheres.size() * sizeof(Sphere);

		// Stage 暫存緩衝區
		vks::Buffer stagingBuffer;

		vulkanDevice->createBuffer(
			VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
			VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
			&stagingBuffer,
			storageBufferSize,
			spheres.data());

		vulkanDevice->createBuffer(
			//該SSBO將被用作計算管道的存儲緩衝區和圖形管道中的頂點緩衝區
			VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
			VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
			&compute.storageBuffers.spheres,
			storageBufferSize);

		// 複製到暫存緩衝區
		VkCommandBuffer copyCmd = vulkanDevice->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
		VkBufferCopy copyRegion = {};
		copyRegion.size = storageBufferSize;
		vkCmdCopyBuffer(copyCmd, stagingBuffer.buffer, compute.storageBuffers.spheres.buffer, 1, &copyRegion);
		vulkanDevice->flushCommandBuffer(copyCmd, queue, true);

		stagingBuffer.destroy();

		// Planes 面定義
		std::vector<Plane> planes;
		const float roomDim = 4.0f;
		planes.push_back(newPlane(glm::vec3(0.0f, 1.0f, 0.0f), roomDim, glm::vec3(0.7647f), 32.0f));
		planes.push_back(newPlane(glm::vec3(0.0f, -1.0f, 0.0f), roomDim, glm::vec3(0.7647f), 32.0f));
		planes.push_back(newPlane(glm::vec3(0.0f, 0.0f, 1.0f), roomDim, glm::vec3(0.7647f), 32.0f));
		planes.push_back(newPlane(glm::vec3(0.0f, 0.0f, -1.0f), roomDim, glm::vec3(0.7647f), 32.0f));
		planes.push_back(newPlane(glm::vec3(-1.0f, 0.0f, 0.0f), roomDim, glm::vec3(0.7647f), 32.0f));
		planes.push_back(newPlane(glm::vec3(1.0f, 0.0f, 0.0f), roomDim, glm::vec3(0.7647f), 32.0f));
		storageBufferSize = planes.size() * sizeof(Plane);

		// Stage 暫存
		vulkanDevice->createBuffer(
			VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
			VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
			&stagingBuffer,
			storageBufferSize,
			planes.data());

		vulkanDevice->createBuffer(
			//該SSBO將被用作計算管道的存儲緩衝區和圖形管道中的頂點緩衝區
			VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
			VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
			&compute.storageBuffers.planes,
			storageBufferSize);

		// 複製到暫存緩衝區
		copyCmd = vulkanDevice->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);
		copyRegion.size = storageBufferSize;
		vkCmdCopyBuffer(copyCmd, stagingBuffer.buffer, compute.storageBuffers.planes.buffer, 1, &copyRegion);
		vulkanDevice->flushCommandBuffer(copyCmd, queue, true);

		stagingBuffer.destroy();
	}

另外,本部分主要創建一個計算着色器Buffer,圖形緩存僅是顯示計算着色器中採樣好的圖像數據,因此圖形頂點着色器和片元着色器十分簡單,如下:

//頂點着色器
#version 450

layout (location = 0) out vec2 outUV;

out gl_PerVertex 
{
	vec4 gl_Position;
};

void main() 
{
	outUV = vec2((gl_VertexIndex << 1) & 2, gl_VertexIndex & 2);
	gl_Position = vec4(outUV * 2.0f + -1.0f, 0.0f, 1.0f);
}

//片元着色器
#version 450

layout (binding = 0) uniform sampler2D samplerColor;

layout (location = 0) in vec2 inUV;

layout (location = 0) out vec4 outFragColor;

void main() 
{
  outFragColor = texture(samplerColor, vec2(inUV.s, 1.0 - inUV.t));
}

之後準備一個用於存儲計算着色器計算的紋理目標:

	//準備一個用於存儲計算着色器計算的紋理目標
	void prepareTextureTarget(vks::Texture *tex, uint32_t width, uint32_t height, VkFormat format)
	{
		// 獲取請求的紋理格式的設備屬性
		VkFormatProperties formatProperties;
		vkGetPhysicalDeviceFormatProperties(physicalDevice, format, &formatProperties);
		// 檢查所請求的圖像格式是否支持圖像存儲操作
		assert(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT);

		// Prepare blit target texture 準備blit目標紋理
		tex->width = width;
		tex->height = height;

		VkImageCreateInfo imageCreateInfo = vks::initializers::imageCreateInfo();
		imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
		imageCreateInfo.format = format;
		imageCreateInfo.extent = { width, height, 1 };
		imageCreateInfo.mipLevels = 1;
		imageCreateInfo.arrayLayers = 1;
		imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
		imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
		imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
		// 圖像將在片段着色器中採樣,並在計算着色器中用作存儲目標
		imageCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT;
		imageCreateInfo.flags = 0;

		VkMemoryAllocateInfo memAllocInfo = vks::initializers::memoryAllocateInfo();
		VkMemoryRequirements memReqs;

		VK_CHECK_RESULT(vkCreateImage(device, &imageCreateInfo, nullptr, &tex->image));
		vkGetImageMemoryRequirements(device, tex->image, &memReqs);
		memAllocInfo.allocationSize = memReqs.size;
		memAllocInfo.memoryTypeIndex = vulkanDevice->getMemoryType(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
		VK_CHECK_RESULT(vkAllocateMemory(device, &memAllocInfo, nullptr, &tex->deviceMemory));
		VK_CHECK_RESULT(vkBindImageMemory(device, tex->image, tex->deviceMemory, 0));

		VkCommandBuffer layoutCmd = vulkanDevice->createCommandBuffer(VK_COMMAND_BUFFER_LEVEL_PRIMARY, true);

		tex->imageLayout = VK_IMAGE_LAYOUT_GENERAL;
		vks::tools::setImageLayout(
			layoutCmd, 
			tex->image,
			VK_IMAGE_ASPECT_COLOR_BIT, 
			VK_IMAGE_LAYOUT_UNDEFINED,
			tex->imageLayout);

		vulkanDevice->flushCommandBuffer(layoutCmd, queue, true);

		// 創建取樣器
		VkSamplerCreateInfo sampler = vks::initializers::samplerCreateInfo();
		sampler.magFilter = VK_FILTER_LINEAR;
		sampler.minFilter = VK_FILTER_LINEAR;
		sampler.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
		sampler.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
		sampler.addressModeV = sampler.addressModeU;
		sampler.addressModeW = sampler.addressModeU;
		sampler.mipLodBias = 0.0f;
		sampler.maxAnisotropy = 1.0f;
		sampler.compareOp = VK_COMPARE_OP_NEVER;
		sampler.minLod = 0.0f;
		sampler.maxLod = 0.0f;
		sampler.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE;
		VK_CHECK_RESULT(vkCreateSampler(device, &sampler, nullptr, &tex->sampler));

		// 創建圖像視圖
		VkImageViewCreateInfo view = vks::initializers::imageViewCreateInfo();
		view.viewType = VK_IMAGE_VIEW_TYPE_2D;
		view.format = format;
		view.components = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A };
		view.subresourceRange = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 };
		view.image = tex->image;
		VK_CHECK_RESULT(vkCreateImageView(device, &view, nullptr, &tex->view));

		// 初始化描述符供以後使用
		tex->descriptor.imageLayout = tex->imageLayout;
		tex->descriptor.imageView = tex->view;
		tex->descriptor.sampler = tex->sampler;
		tex->device = vulkanDevice;
	}

再後,我們生成射線跟蹤圖像的計算管道

	//準備生成射線跟蹤圖像的計算管道
	void prepareCompute()
	{
		//創建一個可計算的設備隊列
		//createLogicalDevice函數查找一個支持計算的隊列,並優先選擇只支持計算的隊列族
		//根據不同的實現,這可能會導致不同的隊列族索引的圖形和計算,
		//需要適當的同步(參見buildComputeCommandBuffer中的內存屏障)
		VkDeviceQueueCreateInfo queueCreateInfo = {};
		queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
		queueCreateInfo.pNext = NULL;
		queueCreateInfo.queueFamilyIndex = vulkanDevice->queueFamilyIndices.compute;
		queueCreateInfo.queueCount = 1;
		vkGetDeviceQueue(device, vulkanDevice->queueFamilyIndices.compute, 0, &compute.queue);

		std::vector<VkDescriptorSetLayoutBinding> setLayoutBindings = {
			// 綁定0:存儲圖像(光線跟蹤輸出)
			vks::initializers::descriptorSetLayoutBinding(
				VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
				VK_SHADER_STAGE_COMPUTE_BIT,
				0),
			// 綁定1:統一緩衝塊
			vks::initializers::descriptorSetLayoutBinding(
				VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
				VK_SHADER_STAGE_COMPUTE_BIT,
				1),
			// 綁定1:球體的着色器存儲緩衝區
			vks::initializers::descriptorSetLayoutBinding(
				VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
				VK_SHADER_STAGE_COMPUTE_BIT,
				2),
			// 綁定1:平面的着色器存儲緩衝區
			vks::initializers::descriptorSetLayoutBinding(
				VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
				VK_SHADER_STAGE_COMPUTE_BIT,
				3)
		};

		VkDescriptorSetLayoutCreateInfo descriptorLayout =
			vks::initializers::descriptorSetLayoutCreateInfo(
				setLayoutBindings.data(),
				setLayoutBindings.size());

		VK_CHECK_RESULT(vkCreateDescriptorSetLayout(device, &descriptorLayout, nullptr,	&compute.descriptorSetLayout));

		VkPipelineLayoutCreateInfo pPipelineLayoutCreateInfo =
			vks::initializers::pipelineLayoutCreateInfo(
				&compute.descriptorSetLayout,
				1);

		VK_CHECK_RESULT(vkCreatePipelineLayout(device, &pPipelineLayoutCreateInfo, nullptr, &compute.pipelineLayout));

		VkDescriptorSetAllocateInfo allocInfo =
			vks::initializers::descriptorSetAllocateInfo(
				descriptorPool,
				&compute.descriptorSetLayout,
				1);

		VK_CHECK_RESULT(vkAllocateDescriptorSets(device, &allocInfo, &compute.descriptorSet));

		std::vector<VkWriteDescriptorSet> computeWriteDescriptorSets =
		{
			// 綁定0:輸出存儲映像
			vks::initializers::writeDescriptorSet(
				compute.descriptorSet,
				VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
				0,
				&textureComputeTarget.descriptor),
			// 綁定1:統一緩衝塊
			vks::initializers::writeDescriptorSet(
				compute.descriptorSet,
				VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
				1,
				&compute.uniformBuffer.descriptor),
			// 綁定2:球體的着色器存儲緩衝區
			vks::initializers::writeDescriptorSet(
				compute.descriptorSet,
				VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
				2,
				&compute.storageBuffers.spheres.descriptor),
			// 綁定2:平面的着色器存儲緩衝區
			vks::initializers::writeDescriptorSet(
				compute.descriptorSet,
				VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
				3,
				&compute.storageBuffers.planes.descriptor)
		};

		vkUpdateDescriptorSets(device, computeWriteDescriptorSets.size(), computeWriteDescriptorSets.data(), 0, NULL);

		// 創建計算着色器管道
		VkComputePipelineCreateInfo computePipelineCreateInfo =
			vks::initializers::computePipelineCreateInfo(
				compute.pipelineLayout,
				0);

		computePipelineCreateInfo.stage = loadShader(getAssetPath() + "shaders/computeraytracing/raytracing.comp.spv", VK_SHADER_STAGE_COMPUTE_BIT);
		VK_CHECK_RESULT(vkCreateComputePipelines(device, pipelineCache, 1, &computePipelineCreateInfo, nullptr, &compute.pipeline));

		// Separate command pool as queue family for compute may be different than graphics 單獨的命令池作爲計算的隊列族可能與圖形不同
		VkCommandPoolCreateInfo cmdPoolInfo = {};
		cmdPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
		cmdPoolInfo.queueFamilyIndex = vulkanDevice->queueFamilyIndices.compute;
		cmdPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
		VK_CHECK_RESULT(vkCreateCommandPool(device, &cmdPoolInfo, nullptr, &compute.commandPool));

		// 爲計算操作創建一個命令緩衝區
		VkCommandBufferAllocateInfo cmdBufAllocateInfo =
			vks::initializers::commandBufferAllocateInfo(
				compute.commandPool,
				VK_COMMAND_BUFFER_LEVEL_PRIMARY,
				1);

		VK_CHECK_RESULT(vkAllocateCommandBuffers(device, &cmdBufAllocateInfo, &compute.commandBuffer));

		// 用於計算CB同步的柵欄
		VkFenceCreateInfo fenceCreateInfo = vks::initializers::fenceCreateInfo(VK_FENCE_CREATE_SIGNALED_BIT);
		VK_CHECK_RESULT(vkCreateFence(device, &fenceCreateInfo, nullptr, &compute.fence));

		// 構建一個包含計算調度命令的命令緩衝區(與上節圖像處理類似)
		buildComputeCommandBuffer();
	}

這樣我們所需的管線/緩存等數據都已經完成。最後我們來詳細看一下光追計算着色器。

二、光追計算着色器

先貼上光追計算着色器:

#version 450

layout (local_size_x = 16, local_size_y = 16) in;
layout (binding = 0, rgba8) uniform writeonly image2D resultImage;

#define EPSILON 0.0001
//最大長度
#define MAXLEN 1000.0
//陰影強度
#define SHADOW 0.7
//反射次數
#define RAYBOUNCES 20 
//是否反射
#define REFLECTIONS true
//反射強度
#define REFLECTIONSTRENGTH 0.4 
//反射減弱
#define REFLECTIONFALLOFF 0.6

struct Camera 
{
	vec3 pos;   
	vec3 lookat;
	float fov; 
};

layout (binding = 1) uniform UBO 
{
	vec3 lightPos;
	float aspectRatio;
	vec4 fogColor;
	Camera camera;
	mat4 rotMat;
} ubo;

struct Sphere 
{
	vec3 pos;
	float radius;
	vec3 diffuse;
	float specular;
	int id;
};

struct Plane
{
	vec3 normal;
	float distance;
	vec3 diffuse;
	float specular;
	int id;
};

layout (std140, binding = 2) buffer Spheres
{
	Sphere spheres[ ];
};

layout (std140, binding = 3) buffer Planes
{
	Plane planes[ ];
};

// 反射光線
void reflectRay(inout vec3 rayD, in vec3 mormal)
{
	rayD = rayD + 2.0 * -dot(mormal, rayD) * mormal;
}

// 漫反射
float lightDiffuse(vec3 normal, vec3 lightDir) 
{
	return clamp(dot(normal, lightDir), 0.1, 1.0);
}
// 鏡面反射
float lightSpecular(vec3 normal, vec3 lightDir, float specularFactor)
{
	vec3 viewVec = normalize(ubo.camera.pos);
	vec3 halfVec = normalize(lightDir + viewVec);
	return pow(clamp(dot(normal, halfVec), 0.0, 1.0), specularFactor);
}

// 光線與圓球求交
float sphereIntersect(in vec3 rayO, in vec3 rayD, in Sphere sphere)
{
	vec3 oc = rayO - sphere.pos;
	float b = 2.0 * dot(oc, rayD);
	float c = dot(oc, oc) - sphere.radius*sphere.radius;
	float h = b*b - 4.0*c;
	if (h < 0.0) 
	{
		return -1.0;
	}
	float t = (-b - sqrt(h)) / 2.0;

	return t;
}

vec3 sphereNormal(in vec3 pos, in Sphere sphere)
{
	return (pos - sphere.pos) / sphere.radius;
}

// 光線與平面求交
float planeIntersect(vec3 rayO, vec3 rayD, Plane plane)
{
	float d = dot(rayD, plane.normal);

	if (d == 0.0)
		return 0.0;

	float t = -(plane.distance + dot(rayO, plane.normal)) / d;

	if (t < 0.0)
		return 0.0;

	return t;
}

// 場景求交
int intersect(in vec3 rayO, in vec3 rayD, inout float resT)
{
	int id = -1;

	for (int i = 0; i < spheres.length(); i++)
	{
		float tSphere = sphereIntersect(rayO, rayD, spheres[i]);
		if ((tSphere > EPSILON) && (tSphere < resT))
		{
			id = spheres[i].id;
			resT = tSphere;
		}
	}	

	for (int i = 0; i < planes.length(); i++)
	{
		float tplane = planeIntersect(rayO, rayD, planes[i]);
		if ((tplane > EPSILON) && (tplane < resT))
		{
			id = planes[i].id;
			resT = tplane;
		}	
	}
	
	return id;
}

// 計算陰影
float calcShadow(in vec3 rayO, in vec3 rayD, in int objectId, inout float t)
{
	for (int i = 0; i < spheres.length(); i++)
	{
		if (spheres[i].id == objectId)
			continue;
		float tSphere = sphereIntersect(rayO, rayD, spheres[i]);
		if ((tSphere > EPSILON) && (tSphere < t))
		{
			t = tSphere;
			return SHADOW;
		}
	}		
	return 1.0;
}

vec3 fog(in float t, in vec3 color)
{
	return mix(color, ubo.fogColor.rgb, clamp(sqrt(t*t)/20.0, 0.0, 1.0));
}

vec3 renderScene(inout vec3 rayO, inout vec3 rayD, inout int id)
{
	vec3 color = vec3(0.0);
	float t = MAXLEN;

	// 獲取相交構件ID
	int objectID = intersect(rayO, rayD, t);
	
	if (objectID == -1)
	{
		return color;
	}
	
	vec3 pos = rayO + t * rayD;
	vec3 lightVec = normalize(ubo.lightPos - pos);				
	vec3 normal;

	// Planes
	for (int i = 0; i < planes.length(); i++)
	{
		if (objectID == planes[i].id)
		{
			normal = planes[i].normal;
			float diffuse = lightDiffuse(normal, lightVec);
			float specular = lightSpecular(normal, lightVec, planes[i].specular);
			color = diffuse * planes[i].diffuse + specular;	
		}
	}
	
	// Spheres
	for (int i = 0; i < spheres.length(); i++)
	{
		if (objectID == spheres[i].id)
		{
			normal = sphereNormal(pos, spheres[i]);	
			float diffuse = lightDiffuse(normal, lightVec);
			float specular = lightSpecular(normal, lightVec, spheres[i].specular);
			color = diffuse * spheres[i].diffuse + specular;	
		}
	}

	if (id == -1)
		return color;

	id = objectID;

	// Shadows
	t = length(ubo.lightPos - pos);
	color *= calcShadow(pos, lightVec, id, t);
	
	// Fog 光線模糊
	//color = fog(t, color);	
	
	// Reflect ray for next render pass 反射光線到下一個渲染通道
	reflectRay(rayD, normal);
	rayO = pos;	
	
	return color;
}

void main()
{
	//檢索圖像的尺寸
	ivec2 dim = imageSize(resultImage);
	// gl_GlobalInvocationID意思是獲取當前代碼運行的計算單元的編號,也可以理解成獲取當前線程的索引。
	vec2 uv = vec2(gl_GlobalInvocationID.xy) / dim;

	vec3 rayO = ubo.camera.pos;
	vec3 rayD = normalize(vec3((-1.0 + 2.0 * uv) * vec2(ubo.aspectRatio, 1.0), -1.0));
		
	// primary ray
	int id = 0;
	vec3 finalColor = renderScene(rayO, rayD, id);
	
	// 是否反射
	if (REFLECTIONS)
	{
		float reflectionStrength = REFLECTIONSTRENGTH;
		for (int i = 0; i < RAYBOUNCES; i++)
		{
			//secomdary ray
			vec3 reflectionColor = renderScene(rayO, rayD, id);
			finalColor = (1.0 - reflectionStrength) * finalColor + reflectionStrength * mix(reflectionColor, finalColor, 1.0 - reflectionStrength);			
			reflectionStrength *= REFLECTIONFALLOFF;
		}
	}
    // 在圖像中寫入一個紋素
	imageStore(resultImage, ivec2(gl_GlobalInvocationID.xy), vec4(finalColor, 0.0));
}

首先,我們來看一下光追的主要原理,如下圖:
在這裏插入圖片描述
我們對每個像素進行採樣計算,並計算其對模型場景中物體的交點,彙總其所相交處顏色的集合來當作該像素的顏色值。
接下來開我們來看兩個基礎知識相交的問題,這部分代碼只要你有基礎數學知識即可:

2.1 光線與圓相交

在這裏插入圖片描述
首先來看上圖的定義,當一個點即在圓上又在直線上時說明光線與原存在交點,所以聯立兩式得下圖:
在這裏插入圖片描述
從上圖來看,我們只要解出一元二次方程的解即可,取其最小值,即爲最新的交點,對應這計算着色器中sphereIntersect方法的處理。

2.2 光線與平面相交

在這裏插入圖片描述
從其定義上我們可以列出方程,之後解方程:
在這裏插入圖片描述
可以得到方程解,對應計算着色器中planeIntersect方法。

2.3 計算流程

首先,我們來看一下光追計算器中幾個主要的參數,後續我們也會對其進行一一解釋:

//精度
#define EPSILON 0.0001
//最大長度
#define MAXLEN 1000.0
//陰影強度
#define SHADOW 0.7
//反射次數
#define RAYBOUNCES 3
//是否反射
#define REFLECTIONS false
//反射強度
#define REFLECTIONSTRENGTH 0.4 
//反射減弱
#define REFLECTIONFALLOFF 0.6

總的來說,我們首先進行以相機爲原點,向單個像素位置發出射線,進行一次renderScene進行場景渲染,如果僅執行此處基礎渲染,不考慮光線打在物體上進行反射的話(即REFLECTIONS設置爲false),我們運行可以看到如下效果:
在這裏插入圖片描述
我們可以看到,這其實也就相當於我們之前處理過的陰影效果,其實光追進行一次入射處理,也就完美的處理了光柵化渲染中深度測試部分。
接下來,我們考慮光線的折射,即光線在碰到物體後進行完美的折射,其實這也是Whitted-Style管線追蹤的主要思想,但其中我們還得進行能量損失處理(即REFLECTIONFALLOFF 0.6)及反射率處理(REFLECTIONSTRENGTH 0.4 )。並且,我們還手動定義了光線反射次數(RAYBOUNCES 3)。
在計算着色器的main函數中,我們可以看到對Secondary ary的處理:

	// 是否反射
	if (REFLECTIONS)
	{
		float reflectionStrength = REFLECTIONSTRENGTH;
		for (int i = 0; i < RAYBOUNCES; i++)
		{
			//secomdary ray
			vec3 reflectionColor = renderScene(rayO, rayD, id);
			finalColor = (1.0 - reflectionStrength) * finalColor + reflectionStrength * mix(reflectionColor, finalColor, 1.0 - reflectionStrength);			
			reflectionStrength *= REFLECTIONFALLOFF;
		}
	}

其中將反射強度和衰減都考慮進去後,根據反射次數疊加最終的效果,最後將最終顏色值存儲到對應的紋素供圖形着色器進行採樣顯示,運行可見如下效果:
在這裏插入圖片描述
我們明顯可以看到在四周牆壁上會折射出場景中不同顏色球體的圖像。

最後我們說一下,其實這兒我們手動定義了反射次數,如果考慮真實場景中,我們應該在此處遞歸處理光線折射次數問題,直到折射後的光線打到光源爲止。這樣的話我們就不能保證計算的時間複雜度,因此我們手動定義折射次數,一般在考慮了光線衰減的情況下折射7次左右對於光追效果其實就已經很不錯了,下圖我貼出一張不同折射次數的效果圖,大家可以自己看下效果:

在這裏插入圖片描述

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章