浏览代码

Merge branch 'master' of https://github.com/Unity-Technologies/ScriptableRenderLoop

# Conflicts:
#	Assets/ScriptableRenderLoop/HDRenderLoop/HDRenderLoop.cs
/main
Julien Ignace 8 年前
当前提交
0065b8b6
共有 25 个文件被更改,包括 457 次插入262 次删除
  1. 6
      Assets/BasicRenderLoopTutorial/BasicRenderLoop.cs
  2. 205
      Assets/ScriptableRenderLoop/HDRenderLoop/HDRenderLoop.cs
  3. 6
      Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/Lighting.hlsl
  4. 13
      Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/Resources/Deferred.shader
  5. 1
      Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/SinglePass/SinglePass.cs
  6. 2
      Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/SinglePass/SinglePassLoop.hlsl
  7. 4
      Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/ClusteredUtils.hlsl
  8. 2
      Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/Resources/scrbound.compute
  9. 4
      Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/ShaderBase.hlsl
  10. 114
      Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/TilePass.cs
  11. 38
      Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/TilePass.hlsl
  12. 157
      Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/TilePassLoop.hlsl
  13. 3
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Builtin/BuiltinData.hlsl
  14. 14
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Lit/Lit.hlsl
  15. 24
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Lit/LitData.hlsl
  16. 36
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Material.hlsl
  17. 2
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Unlit/Unlit.hlsl
  18. 9
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Unlit/UnlitData.hlsl
  19. 2
      Assets/ScriptableRenderLoop/HDRenderLoop/ShaderPass/ShaderPassForward.hlsl
  20. 2
      Assets/ScriptableRenderLoop/HDRenderLoop/ShaderPass/ShaderPassForwardUnlit.hlsl
  21. 37
      Assets/ScriptableRenderLoop/HDRenderLoop/Utilities.cs
  22. 4
      Assets/ScriptableRenderLoop/ShaderLibrary/API/D3D11.hlsl
  23. 8
      Assets/ScriptableRenderLoop/ShaderLibrary/AreaLighting.hlsl
  24. 23
      Assets/ScriptableRenderLoop/ShaderLibrary/Common.hlsl
  25. 3
      Assets/ScriptableRenderLoop/ShaderLibrary/ImageBasedLighting.hlsl

6
Assets/BasicRenderLoopTutorial/BasicRenderLoop.cs


// per-camera built-in shader variables).
loop.SetupCameraProperties (camera);
// clear depth buffer
var cmd = new CommandBuffer();
cmd.ClearRenderTarget(true, false, Color.black);
loop.ExecuteCommandBuffer(cmd);
cmd.Release();
// Setup global lighting shader variables
SetupLightShaderVariables (cull.visibleLights, loop);

205
Assets/ScriptableRenderLoop/HDRenderLoop/HDRenderLoop.cs


RenderTargetIdentifier m_CameraDepthBufferRT;
RenderTargetIdentifier m_VelocityBufferRT;
RenderTargetIdentifier m_DistortionBufferRT;
public class LightList
{
public List<DirectionalLightData> directionalLights;

m_SkyRenderer = new SkyRenderer();
m_SkyRenderer.Rebuild();
m_FinalPassMaterial = Utilities.CreateEngineMaterial("Hidden/HDRenderLoop/FinalPass");
m_DebugViewMaterialGBuffer = Utilities.CreateEngineMaterial("Hidden/HDRenderLoop/DebugViewMaterialGBuffer");

// We clear only the depth buffer, no need to clear the various color buffer as we overwrite them.
// Clear depth/stencil and init buffers
using (new Utilities.ProfilingSample("InitGBuffers and clear Depth/Stencil", renderLoop))
{
var cmd = new CommandBuffer();
{
var cmd = new CommandBuffer();
// Init buffer
// With scriptable render loop we must allocate ourself depth and color buffer (We must be independent of backbuffer for now, hope to fix that later).
// Also we manage ourself the HDR format, here allocating fp16 directly.
// With scriptable render loop we can allocate temporary RT in a command buffer, they will not be release with ExecuteCommandBuffer
// These temporary surface are release automatically at the end of the scriptable renderloop if not release explicitly
int w = camera.pixelWidth;
int h = camera.pixelHeight;
cmd.GetTemporaryRT(m_CameraColorBuffer, w, h, 0, FilterMode.Point, RenderTextureFormat.ARGBHalf, RenderTextureReadWrite.Linear);
cmd.GetTemporaryRT(m_CameraDepthBuffer, w, h, 24, FilterMode.Point, RenderTextureFormat.Depth);
if (!debugParameters.useForwardRenderingOnly)
{
m_gbufferManager.InitGBuffers(w, h, cmd);
}
renderLoop.ExecuteCommandBuffer(cmd);
cmd.Dispose();
// Init buffer
// With scriptable render loop we must allocate ourself depth and color buffer (We must be independent of backbuffer for now, hope to fix that later).
// Also we manage ourself the HDR format, here allocating fp16 directly.
// With scriptable render loop we can allocate temporary RT in a command buffer, they will not be release with ExecuteCommandBuffer
// These temporary surface are release automatically at the end of the scriptable renderloop if not release explicitly
int w = camera.pixelWidth;
int h = camera.pixelHeight;
Utilities.SetRenderTarget(renderLoop, m_CameraColorBufferRT, m_CameraDepthBufferRT, ClearFlag.ClearDepth);
cmd.GetTemporaryRT(m_CameraColorBuffer, w, h, 0, FilterMode.Point, RenderTextureFormat.ARGBHalf, RenderTextureReadWrite.Linear);
cmd.GetTemporaryRT(m_CameraDepthBuffer, w, h, 24, FilterMode.Point, RenderTextureFormat.Depth);
if (!debugParameters.useForwardRenderingOnly)
{
m_gbufferManager.InitGBuffers(w, h, cmd);
renderLoop.ExecuteCommandBuffer(cmd);
cmd.Dispose();
// TEMP: As we are in development and have not all the setup pass we still clear the color in emissive buffer and gbuffer, but this will be removed later.
Utilities.SetRenderTarget(renderLoop, m_CameraColorBufferRT, m_CameraDepthBufferRT, ClearFlag.ClearDepth);
}
// TEMP: As we are in development and have not all the setup pass we still clear the color in emissive buffer and gbuffer, but this will be removed later.
// Clear HDR target
// Clear HDR target
{
{
}
}
// Clear GBuffers
// Clear GBuffers
{
{
}
// END TEMP
// END TEMP
}
}
void RenderOpaqueRenderList(CullResults cull, Camera camera, RenderLoop renderLoop, string passName, RendererConfiguration rendererConfiguration = 0)

using (new Utilities.ProfilingSample("Depth Prepass", renderLoop))
{
// TODO: Must do opaque then alpha masked for performance!
// TODO: front to back for opaque and by materal for opaque tested when we split in two
// TODO: Must do opaque then alpha masked for performance!
// TODO: front to back for opaque and by materal for opaque tested when we split in two
RenderOpaqueRenderList(cull, camera, renderLoop, "DepthOnly");
}
RenderOpaqueRenderList(cull, camera, renderLoop, "DepthOnly");
}
}
void RenderGBuffer(CullResults cull, Camera camera, RenderLoop renderLoop)

using (new Utilities.ProfilingSample("GBuffer Pass", renderLoop))
{
// setup GBuffer for rendering
// setup GBuffer for rendering
// render opaque objects into GBuffer
RenderOpaqueRenderList(cull, camera, renderLoop, "GBuffer", Utilities.kRendererConfigurationBakedLighting);
}
// render opaque objects into GBuffer
RenderOpaqueRenderList(cull, camera, renderLoop, "GBuffer", Utilities.kRendererConfigurationBakedLighting);
}
}
// This pass is use in case of forward opaque and deferred rendering. We need to render forward objects before tile lighting pass

using (new Utilities.ProfilingSample("Single Pass - Deferred Lighting Pass", renderLoop))
{
// Bind material data
m_LitRenderLoop.Bind();
m_SinglePassLightLoop.RenderDeferredLighting(camera, renderLoop, m_CameraColorBuffer);
// m_TilePassLightLoop.RenderDeferredLighting(camera, renderLoop, );
}
// Bind material data
m_LitRenderLoop.Bind();
m_SinglePassLightLoop.RenderDeferredLighting(camera, renderLoop, m_CameraColorBuffer);
// m_TilePassLightLoop.RenderDeferredLighting(camera, renderLoop, m_CameraColorBuffer);
}
}
void RenderSky(Camera camera, RenderLoop renderLoop)

{
using (new Utilities.ProfilingSample("Forward Pass", renderLoop))
{
// Bind material data
m_LitRenderLoop.Bind();
// Bind material data
m_LitRenderLoop.Bind();
if (debugParameters.useForwardRenderingOnly)
{
RenderOpaqueRenderList(cullResults, camera, renderLoop, "Forward");
}
RenderTransparentRenderList(cullResults, camera, renderLoop, "Forward", Utilities.kRendererConfigurationBakedLighting);
if (debugParameters.useForwardRenderingOnly)
{
RenderOpaqueRenderList(cullResults, camera, renderLoop, "Forward");
RenderTransparentRenderList(cullResults, camera, renderLoop, "Forward", Utilities.kRendererConfigurationBakedLighting);
}
}
void RenderForwardUnlit(CullResults cullResults, Camera camera, RenderLoop renderLoop)

// Bind material data
m_LitRenderLoop.Bind();
// Bind material data
m_LitRenderLoop.Bind();
RenderOpaqueRenderList(cullResults, camera, renderLoop, "ForwardUnlit");
RenderTransparentRenderList(cullResults, camera, renderLoop, "ForwardUnlit");
}
RenderOpaqueRenderList(cullResults, camera, renderLoop, "ForwardUnlit");
RenderTransparentRenderList(cullResults, camera, renderLoop, "ForwardUnlit");
}
}
void RenderVelocity(CullResults cullResults, Camera camera, RenderLoop renderLoop)

// warning CS0162: Unreachable code detected // warning CS0429: Unreachable expression code detected
#pragma warning disable 162, 429
// If opaque velocity have been render during GBuffer no need to render it here
if ((ShaderConfig.VelocityInGbuffer == 0) || debugParameters.useForwardRenderingOnly)
return ;
#pragma warning disable 162, 429
// If opaque velocity have been render during GBuffer no need to render it here
if ((ShaderConfig.VelocityInGbuffer == 0) || debugParameters.useForwardRenderingOnly)
return ;
int w = camera.pixelWidth;
int h = camera.pixelHeight;
int w = camera.pixelWidth;
int h = camera.pixelHeight;
cmd.GetTemporaryRT(m_VelocityBuffer, w, h, 0, FilterMode.Point, Builtin.RenderLoop.GetVelocityBufferFormat(), Builtin.RenderLoop.GetVelocityBufferReadWrite());
cmd.SetRenderTarget(m_VelocityBufferRT, m_CameraDepthBufferRT);
renderLoop.ExecuteCommandBuffer(cmd);
cmd.Dispose();
cmd.GetTemporaryRT(m_VelocityBuffer, w, h, 0, FilterMode.Point, Builtin.RenderLoop.GetVelocityBufferFormat(), Builtin.RenderLoop.GetVelocityBufferReadWrite());
cmd.SetRenderTarget(m_VelocityBufferRT, m_CameraDepthBufferRT);
renderLoop.ExecuteCommandBuffer(cmd);
cmd.Dispose();
RenderOpaqueRenderList(cullResults, camera, renderLoop, "MotionVectors");
#pragma warning restore 162, 429
}
RenderOpaqueRenderList(cullResults, camera, renderLoop, "MotionVectors");
#pragma warning restore 162, 429
}
}
void RenderDistortion(CullResults cullResults, Camera camera, RenderLoop renderLoop)

int w = camera.pixelWidth;
int h = camera.pixelHeight;
int w = camera.pixelWidth;
int h = camera.pixelHeight;
cmd.GetTemporaryRT(m_DistortionBuffer, w, h, 0, FilterMode.Point, Builtin.RenderLoop.GetDistortionBufferFormat(), Builtin.RenderLoop.GetDistortionBufferReadWrite());
cmd.SetRenderTarget(m_DistortionBufferRT, m_CameraDepthBufferRT);
renderLoop.ExecuteCommandBuffer(cmd);
cmd.Dispose();
cmd.GetTemporaryRT(m_DistortionBuffer, w, h, 0, FilterMode.Point, Builtin.RenderLoop.GetDistortionBufferFormat(), Builtin.RenderLoop.GetDistortionBufferReadWrite());
cmd.SetRenderTarget(m_DistortionBufferRT, m_CameraDepthBufferRT);
renderLoop.ExecuteCommandBuffer(cmd);
cmd.Dispose();
// Only transparent object can render distortion vectors
RenderTransparentRenderList(cullResults, camera, renderLoop, "DistortionVectors");
}
// Only transparent object can render distortion vectors
RenderTransparentRenderList(cullResults, camera, renderLoop, "DistortionVectors");
}
}

{
// Those could be tweakable for the neutral tonemapper, but in the case of the LookDev we don't need that
const float blackIn = 0.02f;
const float whiteIn = 10.0f;
const float blackOut = 0.0f;
const float whiteOut = 10.0f;
const float whiteLevel = 5.3f;
const float whiteClip = 10.0f;
const float dialUnits = 20.0f;
const float halfDialUnits = dialUnits * 0.5f;
// Those could be tweakable for the neutral tonemapper, but in the case of the LookDev we don't need that
const float blackIn = 0.02f;
const float whiteIn = 10.0f;
const float blackOut = 0.0f;
const float whiteOut = 10.0f;
const float whiteLevel = 5.3f;
const float whiteClip = 10.0f;
const float dialUnits = 20.0f;
const float halfDialUnits = dialUnits * 0.5f;
// converting from artist dial units to easy shader-lerps (0-1)
var tonemapCoeff1 = new Vector4((blackIn * dialUnits) + 1.0f, (blackOut * halfDialUnits) + 1.0f, (whiteIn / dialUnits), (1.0f - (whiteOut / dialUnits)));
var tonemapCoeff2 = new Vector4(0.0f, 0.0f, whiteLevel, whiteClip / halfDialUnits);
// converting from artist dial units to easy shader-lerps (0-1)
var tonemapCoeff1 = new Vector4((blackIn * dialUnits) + 1.0f, (blackOut * halfDialUnits) + 1.0f, (whiteIn / dialUnits), (1.0f - (whiteOut / dialUnits)));
var tonemapCoeff2 = new Vector4(0.0f, 0.0f, whiteLevel, whiteClip / halfDialUnits);
m_FinalPassMaterial.SetVector("_ToneMapCoeffs1", tonemapCoeff1);
m_FinalPassMaterial.SetVector("_ToneMapCoeffs2", tonemapCoeff2);
m_FinalPassMaterial.SetVector("_ToneMapCoeffs1", tonemapCoeff1);
m_FinalPassMaterial.SetVector("_ToneMapCoeffs2", tonemapCoeff2);
m_FinalPassMaterial.SetFloat("_EnableToneMap", debugParameters.enableTonemap ? 1.0f : 0.0f);
m_FinalPassMaterial.SetFloat("_Exposure", debugParameters.exposure);
m_FinalPassMaterial.SetFloat("_EnableToneMap", debugParameters.enableTonemap ? 1.0f : 0.0f);
m_FinalPassMaterial.SetFloat("_Exposure", debugParameters.exposure);
// Resolve our HDR texture to CameraTarget.
cmd.Blit(m_CameraColorBufferRT, BuiltinRenderTextureType.CameraTarget, m_FinalPassMaterial, 0);
renderLoop.ExecuteCommandBuffer(cmd);
cmd.Dispose();
}
// Resolve our HDR texture to CameraTarget.
cmd.Blit(m_CameraColorBufferRT, BuiltinRenderTextureType.CameraTarget, m_FinalPassMaterial, 0);
renderLoop.ExecuteCommandBuffer(cmd);
cmd.Dispose();
}
}
// Function to prepare light structure for GPU lighting

// build per tile light lists
m_SinglePassLightLoop.PrepareLightsForGPU(cullResults, camera, m_lightList);
m_TilePassLightLoop.PrepareLightsForGPU(cullResults, camera, m_lightList);
//m_TilePassLightLoop.PrepareLightsForGPU(cullResults, camera, m_lightList);
}
void Resize(Camera camera)

ShadowOutput shadows;
using (new Utilities.ProfilingSample("Shadow Pass", renderLoop))
{
m_ShadowPass.Render(renderLoop, cullResults, out shadows);
m_ShadowPass.Render(renderLoop, cullResults, out shadows);
}
renderLoop.SetupCameraProperties(camera); // Need to recall SetupCameraProperties after m_ShadowPass.Render

PrepareLightsForGPU(cullResults, camera, ref shadows, ref m_lightList);
m_TilePassLightLoop.BuildGPULightLists(camera, renderLoop, m_lightList, m_CameraDepthBuffer);
//m_TilePassLightLoop.BuildGPULightLists(camera, renderLoop, m_lightList, m_CameraDepthBuffer);
PushGlobalParams(camera, renderLoop, m_lightList);
}
RenderDeferredLighting(camera, renderLoop);

6
Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/Lighting.hlsl


#include "Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/LightDefinition.cs.hlsl"
#ifdef LIGHTLOOP_SINGLE_PASS
#ifdef LIGHTLOOP_SINGLE_PASS
#elif LIGHTLOOP_TILED_PASS
#include "Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/TilePass.hlsl"
#endif
// Shadow use samling function define in header above and must be include before Material.hlsl

// LightLoop use evaluation BSDF function for light type define in Material.hlsl
#ifdef LIGHTLOOP_SINGLE_PASS
#include "Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/SinglePass/SinglePassLoop.hlsl"
#elif LIGHTLOOP_TILED_PASS
#include "Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/TilePassLoop.hlsl"
#endif

13
Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/Resources/Deferred.shader


#pragma fragment FragDeferred
// Chose supported lighting architecture in case of deferred rendering
#pragma multi_compile LIGHTLOOP_SINGLE_PASS
#pragma multi_compile LIGHTLOOP_SINGLE_PASS LIGHTLOOP_TILE_PASS
// TODO: This must be on lightloop side and include here....
#pragma multi_compile USE_FPTL_LIGHTLIST USE_CLUSTERED_LIGHTLIST
#pragma multi_compile __ ENABLE_DEBUG
// TODO: Workflow problem here, I would like to only generate variant for the LIGHTLOOP_TILE_PASS case, not the LIGHTLOOP_SINGLE_PASS case. This must be on lightloop side and include here.... (Can we codition
#pragma multi_compile LIGHTLOOP_TILE_DIRECT LIGHTLOOP_TILE_INDIRECT LIGHTLOOP_TILE_ALL
#pragma multi_compile USE_FPTL_LIGHTLIST USE_CLUSTERED_LIGHTLIST
#pragma multi_compile _ ENABLE_DEBUG
//-------------------------------------------------------------------------------------
// Include

Coordinate coord = GetCoordinate(unPositionSS.xy, _ScreenSize.zw);
// No need to manage inverse depth, this is handled by the projection matrix
float depth = LOAD_TEXTURE2D(_CameraDepthTexture, uint3(coord.unPositionSS, 0)).x;
float depth = LOAD_TEXTURE2D(_CameraDepthTexture, coord.unPositionSS).x;
float3 positionWS = UnprojectToWorld(depth, coord.positionSS, _InvViewProjMatrix);
float3 V = GetWorldSpaceNormalizeViewDir(positionWS);

float3 diffuseLighting;
float3 specularLighting;
LightLoop(V, positionWS, preLightData, bsdfData, bakeDiffuseLighting, diffuseLighting, specularLighting);
LightLoop(V, positionWS, coord, preLightData, bsdfData, bakeDiffuseLighting, diffuseLighting, specularLighting);
return float4(diffuseLighting + specularLighting, 1.0);
}

1
Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/SinglePass/SinglePass.cs


s_PunctualShadowList = new ComputeBuffer(HDRenderLoop.k_MaxShadowOnScreen, System.Runtime.InteropServices.Marshal.SizeOf(typeof(PunctualShadowData)));
m_DeferredMaterial = Utilities.CreateEngineMaterial("Hidden/HDRenderLoop/Deferred");
m_DeferredMaterial.EnableKeyword("LIGHTLOOP_SINGLE_PASS");
}
public void OnDisable()

2
Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/SinglePass/SinglePassLoop.hlsl


// ----------------------------------------------------------------------------
// bakeDiffuseLighting is part of the prototype so a user is able to implement a "base pass" with GI and multipass direct light (aka old unity rendering path)
void LightLoop( float3 V, float3 positionWS, PreLightData prelightData, BSDFData bsdfData, float3 bakeDiffuseLighting,
void LightLoop( float3 V, float3 positionWS, Coordinate coord, PreLightData prelightData, BSDFData bsdfData, float3 bakeDiffuseLighting,
out float3 diffuseLighting,
out float3 specularLighting)
{

4
Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/ClusteredUtils.hlsl


#ifndef __CLUSTEREDUTILS_H__
#define __CLUSTEREDUTILS_H__
#ifndef FLT_EPSILON
#define FLT_EPSILON 1.192092896e-07f
#endif
float GetScaleFromBase(float base)
{
const float C = (float)(1 << g_iLog2NumClusters);

2
Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/Resources/scrbound.compute


StructuredBuffer<SFiniteLightBound> g_data : register( t0 );
#define FLT_EPSILON 1.192092896e-07F // smallest such that 1.0+FLT_EPSILON != 1.0
#define NR_THREADS 64
// output buffer

4
Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/ShaderBase.hlsl


float FetchDepth(Texture2D depthTexture, uint2 pixCoord)
{
return 1 - LOAD_TEXTURE2D(depthTexture, uint3(pixCoord.xy, 0)).x;
return 1.0 - LOAD_TEXTURE2D(depthTexture, pixCoord.xy).x;
return 1 - LOAD_TEXTURE2D_MSAA(depthTexture, uint3(pixCoord.xy, 0), sampleIdx).x;
return 1.0 - LOAD_TEXTURE2D_MSAA(depthTexture, pixCoord.xy, sampleIdx).x;
}
#endif

114
Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/TilePass.cs


{
string GetKeyword()
{
return "LIGHTLOOP_SINGLE_PASS";
return "LIGHTLOOP_TILE_PASS";
}
public const int MaxNumLights = 1024;

private static ComputeBuffer s_GlobalLightListAtomic;
// clustered light list specific buffers and data end
const int k_TileSize = 16;
SFiniteLightBound[] m_boundData;
SFiniteLightData[] m_lightData;
int m_lightCount;

Material m_DeferredMaterial;
Material m_DeferredReflectionMaterial;
const int k_TileSize = 16;
int GetNumTileX(Camera camera)
{
return (camera.pixelWidth + (k_TileSize - 1)) / k_TileSize;
}
int GetNumTileY(Camera camera)
{
return (camera.pixelWidth + (k_TileSize - 1)) / k_TileSize;
}
// Local function
void ClearComputeBuffers()
{

s_EnvLightList = new ComputeBuffer(HDRenderLoop.k_MaxEnvLightsOnSCreen, System.Runtime.InteropServices.Marshal.SizeOf(typeof(EnvLightData)));
s_PunctualShadowList = new ComputeBuffer(HDRenderLoop.k_MaxShadowOnScreen, System.Runtime.InteropServices.Marshal.SizeOf(typeof(PunctualShadowData)));
// m_DeferredMaterial = Utilities.CreateEngineMaterial("Hidden/HDRenderLoop/TileDeferred");
// m_DeferredReflectionMaterial = Utilities.CreateEngineMaterial("Hidden/HDRenderLoop/TileDeferredReflection");
m_DeferredMaterial = Utilities.CreateEngineMaterial("Hidden/HDRenderLoop/Deferred");
m_DeferredMaterial.EnableKeyword("LIGHTLOOP_TILE_PASS");
m_DeferredMaterial.EnableKeyword("LIGHTLOOP_TILE_DIRECT");
m_DeferredReflectionMaterial = Utilities.CreateEngineMaterial("Hidden/HDRenderLoop/Deferred");
m_DeferredReflectionMaterial.EnableKeyword("LIGHTLOOP_TILE_PASS");
m_DeferredReflectionMaterial.EnableKeyword("LIGHTLOOP_TILE_INDIRECT");
}
public void OnDisable()

s_EnvLightList = null;
s_PunctualShadowList.Release();
s_PunctualShadowList = null;
/*
*/
}
public bool NeedResize()

}
}
// TEMP: These functions should be implemented C++ side, for now do it in C#
private static void SetMatrixCS(CommandBuffer cmd, ComputeShader shadercs, string name, Matrix4x4 mat)
{
var data = new float[16];
for (int c = 0; c < 4; c++)
for (int r = 0; r < 4; r++)
data[4 * c + r] = mat[r, c];
cmd.SetComputeFloatParams(shadercs, name, data);
}
private static void SetMatrixArrayCS(CommandBuffer cmd, ComputeShader shadercs, string name, Matrix4x4[] matArray)
{
int numMatrices = matArray.Length;
var data = new float[numMatrices * 16];
for (int n = 0; n < numMatrices; n++)
for (int c = 0; c < 4; c++)
for (int r = 0; r < 4; r++)
data[16 * n + 4 * c + r] = matArray[n][r, c];
cmd.SetComputeFloatParams(shadercs, name, data);
}
private static void SetVectorArrayCS(CommandBuffer cmd, ComputeShader shadercs, string name, Vector4[] vecArray)
{
int numVectors = vecArray.Length;
var data = new float[numVectors * 4];
for (int n = 0; n < numVectors; n++)
for (int i = 0; i < 4; i++)
data[4 * n + i] = vecArray[n][i];
cmd.SetComputeFloatParams(shadercs, name, data);
}
static Matrix4x4 GetFlipMatrix()
{
Matrix4x4 flip = Matrix4x4.identity;

cmd.DispatchCompute(buildPerVoxelLightListShader, s_ClearVoxelAtomicKernel, 1, 1, 1);
cmd.SetComputeIntParam(buildPerVoxelLightListShader, "g_iNrVisibLights", m_lightCount);
SetMatrixCS(cmd, buildPerVoxelLightListShader, "g_mScrProjection", projscr);
SetMatrixCS(cmd, buildPerVoxelLightListShader, "g_mInvScrProjection", invProjscr);
Utilities.SetMatrixCS(cmd, buildPerVoxelLightListShader, "g_mScrProjection", projscr);
Utilities.SetMatrixCS(cmd, buildPerVoxelLightListShader, "g_mInvScrProjection", invProjscr);
cmd.SetComputeIntParam(buildPerVoxelLightListShader, "g_iLog2NumClusters", k_Log2NumClusters);

cmd.SetComputeBufferParam(buildPerVoxelLightListShader, s_GenListPerVoxelKernel, "g_logBaseBuffer", s_PerTileLogBaseTweak);
}
var numTilesX = (camera.pixelWidth + 15) / 16;
var numTilesY = (camera.pixelHeight + 15) / 16;
var numTilesX = GetNumTileX(camera);
var numTilesY = GetNumTileY(camera);
cmd.DispatchCompute(buildPerVoxelLightListShader, s_GenListPerVoxelKernel, numTilesX, numTilesY, 1);
}

var h = camera.pixelHeight;
var numTilesX = (w + 15) / 16;
var numTilesY = (h + 15) / 16;
var numTilesX = GetNumTileX(camera);
var numTilesY = GetNumTileY(camera);
var numBigTilesX = (w + 63) / 64;
var numBigTilesY = (h + 63) / 64;

var invProjh = projh.inverse;
cmd.SetComputeIntParam(buildScreenAABBShader, "g_iNrVisibLights", m_lightCount);
SetMatrixCS(cmd, buildScreenAABBShader, "g_mProjection", projh);
SetMatrixCS(cmd, buildScreenAABBShader, "g_mInvProjection", invProjh);
Utilities.SetMatrixCS(cmd, buildScreenAABBShader, "g_mProjection", projh);
Utilities.SetMatrixCS(cmd, buildScreenAABBShader, "g_mInvProjection", invProjh);
cmd.SetComputeBufferParam(buildScreenAABBShader, s_GenAABBKernel, "g_vBoundsBuffer", s_AABBBoundsBuffer);
cmd.DispatchCompute(buildScreenAABBShader, s_GenAABBKernel, (m_lightCount + 7) / 8, 1, 1);
}

{
cmd.SetComputeIntParams(buildPerBigTileLightListShader, "g_viDimensions", new int[2] { w, h });
cmd.SetComputeIntParam(buildPerBigTileLightListShader, "g_iNrVisibLights", m_lightCount);
SetMatrixCS(cmd, buildPerBigTileLightListShader, "g_mScrProjection", projscr);
SetMatrixCS(cmd, buildPerBigTileLightListShader, "g_mInvScrProjection", invProjscr);
Utilities.SetMatrixCS(cmd, buildPerBigTileLightListShader, "g_mScrProjection", projscr);
Utilities.SetMatrixCS(cmd, buildPerBigTileLightListShader, "g_mInvScrProjection", invProjscr);
/*
SetMatrixCS(cmd, buildPerTileLightListShader, "g_mScrProjection", projscr);
SetMatrixCS(cmd, buildPerTileLightListShader, "g_mInvScrProjection", invProjscr);
Utilities.SetMatrixCS(cmd, buildPerTileLightListShader, "g_mScrProjection", projscr);
Utilities.SetMatrixCS(cmd, buildPerTileLightListShader, "g_mInvScrProjection", invProjscr);
cmd.SetComputeTextureParam(buildPerTileLightListShader, s_GenListPerTileKernel, "g_depth_tex", new RenderTargetIdentifier(cameraDepthBuffer));
cmd.SetComputeBufferParam(buildPerTileLightListShader, s_GenListPerTileKernel, "g_vLightList", s_LightList);
if (enableBigTilePrepass) cmd.SetComputeBufferParam(buildPerTileLightListShader, s_GenListPerTileKernel, "g_vBigTileLightList", s_BigTileLightList);

{
VoxelLightListGeneration(cmd, camera, projscr, invProjscr, cameraDepthBuffer);
}
*/
loop.ExecuteCommandBuffer(cmd);
cmd.Dispose();
}

Shader.SetGlobalBuffer("_AreaLightList", s_AreaLightList);
Shader.SetGlobalBuffer("_PunctualShadowList", s_PunctualShadowList);
Shader.SetGlobalBuffer("_EnvLightList", s_EnvLightList);
/*
var cmd = new CommandBuffer { name = "Push Global Parameters" };
cmd.SetGlobalFloat("_NumTileX", (float)GetNumTileX(camera));
cmd.SetGlobalFloat("_NumTileY", (float)GetNumTileY(camera));
if (enableBigTilePrepass)
cmd.SetGlobalBuffer("g_vBigTileLightList", s_BigTileLightList);

}
}
cmd.SetGlobalFloat("g_nNumDirLights", numDirLights);
cmd.SetGlobalBuffer("g_dirLightData", s_DirLightList);
*/
/*
var bUseClusteredForDeferred = !usingFptl; // doesn't work on reflections yet but will soon
var cmd = new CommandBuffer();

}
else
{*/
// cmd.Blit(0, colorBuffer, m_DeferredMaterial, 0);
// cmd.Blit(0, colorBuffer, m_DeferredReflectionMaterial, 0);
cmd.Blit(0, colorBuffer, m_DeferredMaterial, 0);
cmd.Blit(0, colorBuffer, m_DeferredReflectionMaterial, 0);
// renderLoop.ExecuteCommandBuffer(cmd);
// cmd.Dispose();
renderLoop.ExecuteCommandBuffer(cmd);
cmd.Dispose();
}
}
}

38
Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/TilePass.hlsl


// uses the optimized single layered light list for opaques only
#if defined (LIGHTLOOP_TILE_DIRECT) || defined(LIGHTLOOP_TILE_ALL)
#define PROCESS_DIRECTIONAL_LIGHT
#define PROCESS_PUNCTUAL_LIGHT
#define PROCESS_AREA_LIGHT
#endif
#ifdef USE_FPTL_LIGHTLIST
#define OPAQUES_ONLY
#if defined (LIGHTLOOP_TILE_INDIRECT) || defined(LIGHTLOOP_TILE_ALL)
#define PROCESS_ENV_LIGHT
uint _NumTileX;
uint _NumTileY;
#define TILE_SIZE 16 // This is fixed
#define DWORD_PER_TILE 16 // See dwordsPerTile in TilePass.cs, we have roomm for 31 lights and a number of light value all store on 16 bit (ushort)
// these uniforms are only needed for when OPAQUES_ONLY is NOT defined
// but there's a problem with our front-end compilation of compute shaders with multiple kernels causing it to error
//#ifdef USE_CLUSTERED_LIGHTLIST
float g_fClustScale;
float g_fClustBase;
float g_fNearPlane;
float g_fFarPlane;
int g_iLog2NumClusters; // We need to always define these to keep constant buffer layouts compatible
uint g_isLogBaseBufferEnabled;
uint g_isOpaquesOnlyEnabled;
//#endif
#ifdef USE_CLUSTERED_LIGHTLIST
Buffer<uint> g_vLayeredOffsetsBuffer;
Buffer<float> g_logBaseBuffer;
#endif
// TODO: Need to correctly define the shadow framework, WIP
#include "../SinglePass/SinglePass.hlsl"

157
Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/TilePassLoop.hlsl


// LightLoop
// ----------------------------------------------------------------------------
// Calculate the offset in global light index light for current light category
int GetTileOffset(Coordinate coord, uint lightCategory)
{
uint2 tileIndex = coord.unPositionSS / TILE_SIZE;
return (tileIndex.y + lightCategory * _NumTileY) * _NumTileX + tileIndex.x;
}
void GetCountAndStartOpaque(Coordinate coord, uint lightCategory, float linearDepth, out uint start, out uint lightCount)
{
const int tileOffset = GetTileOffset(coord, lightCategory);
// The first entry inside a tile is the number of light for lightCategory (thus the +0)
lightCount = g_vLightListGlobal[DWORD_PER_TILE * tileOffs + 0] & 0xffff;
start = tileOffset;
}
uint FetchIndexOpaque(uint tileOffset, uint lightIndex)
{
const uint lightIndexPlusOne = lightIndex + 1; // Add +1 as first slot is reserved to store number of light
// Light index are store on 16bit
return (g_vLightListGlobal[DWORD_PER_TILE * tileOffset + (lightIndexPlusOne >> 1)] >> ((lightIndexPlusOne & 1) * DWORD_PER_TILE)) & 0xffff;
}
#ifdef USE_FPTL_LIGHTLIST
void GetCountAndStart(Coordinate coord, uint lightCategory, float linearDepth, out uint start, out uint lightCount)
{
GetCountAndStartOpaque(coord, lightCategory, linearDepth, start, lightCount);
}
uint FetchIndex(uint tileOffset, uint lightIndex)
{
return FetchIndexOpaque(tileOffs, lightIndex);
}
#else
#include "ClusteredUtils.hlsl"
void GetCountAndStart(Coordinate coord, uint lightCategory, float linearDepth, out uint start, out uint lightCount)
{
if(g_isOpaquesOnlyEnabled)
{
GetCountAndStartOpaque(coord, lightCategory, linearDepth, start, lightCount);
}
else
{
uint2 tileIndex = coord.unPositionSS / TILE_SIZE;
float logBase = g_fClustBase;
if (g_isLogBaseBufferEnabled)
{
logBase = g_logBaseBuffer[tileIndex.y * _NumTileX + tileIndex.x];
}
int clustIdx = SnapToClusterIdxFlex(linearDepth, logBase, g_isLogBaseBufferEnabled != 0);
int nrClusters = (1 << g_iLog2NumClusters);
const int idx = ((model * nrClusters + clustIdx) * _NumTileY + tileIndex.y) * _NumTileX + tileIndex.x;
uint dataPair = g_vLayeredOffsetsBuffer[idx];
start = dataPair & 0x7ffffff;
lightCount = (dataPair >> 27) & 31;
}
}
uint FetchIndex(uint tileOffset, uint lightIndex)
{
if(g_isOpaquesOnlyEnabled)
return FetchIndexOpaque(tileOffset, lightIndex);
else
return g_vLightListGlobal[tileOffset + lightIndex];
}
float GetLinearDepth(float zDptBufSpace) // 0 is near 1 is far
{
// todo (simplify): m22 is zero and m23 is +1/-1 (depends on left/right hand proj)
float m22 = g_mInvScrProjection[2].z, m23 = g_mInvScrProjection[2].w;
float m32 = g_mInvScrProjection[3].z, m33 = g_mInvScrProjection[3].w;
return (m22 * zDptBufSpace + m23) / (m32 * zDptBufSpace + m33);
//float3 vP = float3(0.0f,0.0f,zDptBufSpace);
//float4 v4Pres = mul(g_mInvScrProjection, float4(vP,1.0));
//return v4Pres.z / v4Pres.w;
}
#endif
void LightLoop(float3 V, float3 positionWS, PreLightData prelightData, BSDFData bsdfData, float3 bakeDiffuseLighting,
out float3 diffuseLighting,
out float3 specularLighting)
void LightLoop( float3 V, float3 positionWS, Coordinate coord, PreLightData prelightData, BSDFData bsdfData, float3 bakeDiffuseLighting,
out float3 diffuseLighting,
out float3 specularLighting)
#if USE_CLUSTERED_LIGHTLIST
// TODO: Think more about the design, it is ok to do that ? hope the compiler could optimize it out as we do it before LightLoop call, else need to pass it as argument...
float depth = LOAD_TEXTURE2D(_CameraDepthTexture, coord.unPositionSS).x;
float linearDepth = GetLinearDepth(depth); // View space linear depth
#else
float linearDepth = 0.0; // unsued
#endif
LightLoopContext context;
ZERO_INITIALIZE(LightLoopContext, context);

int i = 0; // Declare once to avoid the D3D11 compiler warning.
#ifdef PROCESS_DIRECTIONAL_LIGHT
EvaluateBSDF_Directional(context, V, positionWS, prelightData, _DirectionalLightList[i], bsdfData,
localDiffuseLighting, localSpecularLighting);
EvaluateBSDF_Directional( context, V, positionWS, prelightData, _DirectionalLightList[i], bsdfData,
localDiffuseLighting, localSpecularLighting);
#endif
for (i = 0; i < _PunctualLightCount; ++i)
#ifdef PROCESS_PUNCTUAL_LIGHT
uint punctualLightStart;
uint punctualLightCount;
GetCountAndStart(coord, DIRECT_LIGHT, linearDepth, start, punctualLightCount);
for (i = 0; i < punctualLightCount; ++i)
EvaluateBSDF_Punctual(context, V, positionWS, prelightData, _PunctualLightList[i], bsdfData,
localDiffuseLighting, localSpecularLighting);
EvaluateBSDF_Punctual( context, V, positionWS, prelightData, _PunctualLightList[FetchIndex(punctualLightStart, i)], bsdfData,
localDiffuseLighting, localSpecularLighting);
#endif
for (i = 0; i < _AreaLightCount; ++i)
#ifdef PROCESS_AREA_LIGHT
/*
// TODO: Area lights are where the sorting is important (Morten approach with while loop)
uint areaLightStart;
uint areaLightCount;
GetCountAndStart(coord, LightCatergory.AreaLight, linearDepth, start, punctualLightCount);
for (i = 0; i < areaLightCount; ++i)
EvaluateBSDF_Area(context, V, positionWS, prelightData, _AreaLightList[i], bsdfData,
localDiffuseLighting, localSpecularLighting);
EvaluateBSDF_Area( context, V, positionWS, prelightData, _AreaLightList[FetchIndex(areaLightStart, i)], bsdfData,
localDiffuseLighting, localSpecularLighting);
*/
#endif
#ifdef PROCESS_ENV_LIGHT
uint envLightStart;
uint envLightCount;
GetCountAndStart(coord, REFLECTION_LIGHT, linearDepth, start, envLightCount);
float3 iblDiffuseLighting = float3(0.0, 0.0, 0.0);
float3 iblSpecularLighting = float3(0.0, 0.0, 0.0);
for (i = 0; i < envLightCount; ++i)
{
float3 localDiffuseLighting, localSpecularLighting;
float2 weight;
context.sampleReflection = SINGLE_PASS_CONTEXT_SAMPLE_REFLECTION_PROBES;
EvaluateBSDF_Env(context, V, positionWS, prelightData, _EnvLightList[FetchIndex(punctualLightStart, i)], bsdfData, localDiffuseLighting, localSpecularLighting, weight);
iblDiffuseLighting = lerp(iblDiffuseLighting, localDiffuseLighting, weight.x); // Should be remove by the compiler if it is smart as all is constant 0
iblSpecularLighting = lerp(iblSpecularLighting, localSpecularLighting, weight.y);
}
#endif
// TODO
#if ENABLE_DEBUG
// c = OverlayHeatMap(pixCoord & 15, numLightsProcessed, c);
#endif
}

3
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Builtin/BuiltinData.hlsl


result = builtinData.bakeDiffuseLighting;
break;
case DEBUGVIEW_BUILTIN_BUILTINDATA_EMISSIVE_COLOR:
result = builtinData.emissiveColor; needLinearToSRGB = true;
// emissiveColor is premultiply by emissive intensity
result = (builtinData.emissiveColor / builtinData.emissiveIntensity); needLinearToSRGB = true;
break;
case DEBUGVIEW_BUILTIN_BUILTINDATA_EMISSIVE_INTENSITY:
result = builtinData.emissiveIntensity.xxx;

14
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Lit/Lit.hlsl


TEXTURE2D(_LtcDisneyDiffuseMatrix); // RGBA
TEXTURE2D(_LtcMultiGGXFresnelDisneyDiffuse); // RGB, A unused
static const float3x3 _identity3x3 = {1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0};
//-----------------------------------------------------------------------------
// Helper functions/variable specific to this material
//-----------------------------------------------------------------------------

GetPreIntegratedFGD(iblNdotV, bsdfData.perceptualRoughness, bsdfData.fresnel0, preLightData.specularFGD, preLightData.diffuseFGD);
// #if SHADERPASS == SHADERPASS_GBUFFER
// preLightData.ambientOcclusion = LOAD_TEXTURE2D(_AmbientOcclusion, uint3(coord.unPositionSS, 0)).x;
// preLightData.ambientOcclusion = LOAD_TEXTURE2D(_AmbientOcclusion, coord.unPositionSS).x;
// #endif
// Area light specific

float3 GetBakedDiffuseLigthing(SurfaceData surfaceData, BuiltinData builtinData, BSDFData bsdfData, PreLightData preLightData)
{
// Premultiply bake diffuse lighting information with DisneyDiffuse pre-integration
return builtinData.bakeDiffuseLighting * preLightData.diffuseFGD * surfaceData.ambientOcclusion * bsdfData.diffuseColor + builtinData.emissiveColor * builtinData.emissiveIntensity;
return builtinData.bakeDiffuseLighting * preLightData.diffuseFGD * surfaceData.ambientOcclusion * bsdfData.diffuseColor + builtinData.emissiveColor;
}
//-----------------------------------------------------------------------------

// we want to take some of that into account too.
lightTransportData.diffuseColor = bsdfData.diffuseColor + bsdfData.fresnel0 * bsdfData.roughness * 0.5 * surfaceData.metallic;
lightTransportData.emissiveColor = builtinData.emissiveColor * builtinData.emissiveIntensity;
lightTransportData.emissiveColor = builtinData.emissiveColor;
return lightTransportData;
}

// Evaluate the diffuse part.
{
#ifdef LIT_DIFFUSE_LAMBERT_BRDF
ltcValue = LTCEvaluate(P1, P2, B, _identity3x3);
ltcValue = LTCEvaluate(P1, P2, B, k_identity3x3);
#else
ltcValue = LTCEvaluate(P1, P2, B, preLightData.ltcXformDisneyDiffuse);
#endif

{
#ifdef LIT_DIFFUSE_LAMBERT_BRDF
ltcValue = LTCEvaluate(matL, V, bsdfData.normalWS, preLightData.NdotV, lightData.twoSided,
_identity3x3);
k_identity3x3);
#else
ltcValue = LTCEvaluate(matL, V, bsdfData.normalWS, preLightData.NdotV, lightData.twoSided,
preLightData.ltcXformDisneyDiffuse);

24
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Lit/LitData.hlsl


// Note that data input above can be use to sample into lightmap (like normal)
builtinData.bakeDiffuseLighting = SampleBakedGI(input.positionWS, surfaceData.normalWS, input.texCoord1, input.texCoord2);
// Emissive Intensity is only use here, but is part of BuiltinData to enforce UI parameters as we want the users to fill one color and one intensity
builtinData.emissiveIntensity = _EmissiveIntensity; // We still store intensity here so we can reuse it with debug code
builtinData.emissiveColor = SAMPLE_TEXTURE2D(_EmissiveColorMap, sampler_EmissiveColorMap, input.texCoord0).rgb * _EmissiveColor;
builtinData.emissiveColor = SAMPLE_TEXTURE2D(_EmissiveColorMap, sampler_EmissiveColorMap, input.texCoord0).rgb * _EmissiveColor * builtinData.emissiveIntensity;
builtinData.emissiveColor = _EmissiveColor;
builtinData.emissiveColor = _EmissiveColor * builtinData.emissiveIntensity;
builtinData.emissiveColor = surfaceData.baseColor * SAMPLE_TEXTURE2D(_MaskMap, sampler_MaskMap, input.texCoord0).bbb;
builtinData.emissiveColor = surfaceData.baseColor * (SAMPLE_TEXTURE2D(_MaskMap, sampler_MaskMap, input.texCoord0).b * builtinData.emissiveIntensity).xxx;
builtinData.emissiveIntensity = _EmissiveIntensity;
builtinData.velocity = CalculateVelocity(input.positionCS, input.previousPositionCS);

// Note that data input above can be use to sample into lightmap (like normal)
builtinData.bakeDiffuseLighting = SampleBakedGI(input.positionWS, surfaceData.normalWS, input.texCoord1, input.texCoord2);
// Emissive Intensity is only use here, but is part of BuiltinData to enforce UI parameters as we want the users to fill one color and one intensity
PROP_DECL(float, emissiveIntensity);
PROP_ASSIGN(emissiveIntensity, _EmissiveIntensity, r);
PROP_BLEND_SCALAR(emissiveIntensity, weights);
builtinData.emissiveIntensity = emissiveIntensity; // We still store intensity here so we can reuse it with debug code
// If we chose an emissive color, we have a dedicated texture for it and don't use MaskMap
PROP_DECL(float3, emissiveColor);
#ifdef _EMISSIVE_COLOR

PROP_ASSIGN_VALUE(emissiveColor, float3(0.0, 0.0, 0.0));
#endif
PROP_BLEND_COLOR(emissiveColor, weights);
builtinData.emissiveColor = emissiveColor;
PROP_DECL(float, emissiveIntensity);
PROP_ASSIGN(emissiveIntensity, _EmissiveIntensity, r);
PROP_BLEND_SCALAR(emissiveIntensity, weights);
builtinData.emissiveIntensity = emissiveIntensity;
builtinData.emissiveColor = emissiveColor * builtinData.emissiveIntensity;
builtinData.velocity = CalculateVelocity(input.positionCS, input.previousPositionCS);

36
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Material.hlsl


TEXTURE2D(MERGE_NAME(NAME, 0)); \
TEXTURE2D(MERGE_NAME(NAME, 1));
#define FETCH_GBUFFER(NAME, TEX, UV) \
GBufferType0 MERGE_NAME(NAME, 0) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 0), uint3(UV, 0)); \
GBufferType1 MERGE_NAME(NAME, 1) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 1), uint3(UV, 0));
#define FETCH_GBUFFER(NAME, TEX, unCoord2) \
GBufferType0 MERGE_NAME(NAME, 0) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 0), unCoord2); \
GBufferType1 MERGE_NAME(NAME, 1) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 1), unCoord2);
#define ENCODE_INTO_GBUFFER(SURFACE_DATA, BAKE_DIFFUSE_LIGHTING, NAME) EncodeIntoGBuffer(SURFACE_DATA, BAKE_DIFFUSE_LIGHTING, MERGE_NAME(NAME,0), MERGE_NAME(NAME,1))
#define DECODE_FROM_GBUFFER(NAME, BSDF_DATA, BAKE_DIFFUSE_LIGHTING) DecodeFromGBuffer(MERGE_NAME(NAME,0), MERGE_NAME(NAME,1), BSDF_DATA, BAKE_DIFFUSE_LIGHTING)

TEXTURE2D(MERGE_NAME(NAME, 1)); \
TEXTURE2D(MERGE_NAME(NAME, 2));
#define FETCH_GBUFFER(NAME, TEX, UV) \
GBufferType0 MERGE_NAME(NAME, 0) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 0), uint3(UV, 0)); \
GBufferType1 MERGE_NAME(NAME, 1) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 1), uint3(UV, 0)); \
GBufferType2 MERGE_NAME(NAME, 2) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 2), uint3(UV, 0));
#define FETCH_GBUFFER(NAME, TEX, unCoord2) \
GBufferType0 MERGE_NAME(NAME, 0) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 0), unCoord2); \
GBufferType1 MERGE_NAME(NAME, 1) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 1), unCoord2); \
GBufferType2 MERGE_NAME(NAME, 2) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 2), unCoord2);
#define ENCODE_INTO_GBUFFER(SURFACE_DATA, BAKE_DIFFUSE_LIGHTING, NAME) EncodeIntoGBuffer(SURFACE_DATA, BAKE_DIFFUSE_LIGHTING, MERGE_NAME(NAME,0), MERGE_NAME(NAME,1), MERGE_NAME(NAME,2))
#define DECODE_FROM_GBUFFER(NAME, BSDF_DATA, BAKE_DIFFUSE_LIGHTING) DecodeFromGBuffer(MERGE_NAME(NAME,0), MERGE_NAME(NAME,1), MERGE_NAME(NAME,2), BSDF_DATA, BAKE_DIFFUSE_LIGHTING)

TEXTURE2D(MERGE_NAME(NAME, 2)); \
TEXTURE2D(MERGE_NAME(NAME, 3));
#define FETCH_GBUFFER(NAME, TEX, UV) \
GBufferType0 MERGE_NAME(NAME, 0) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 0), uint3(UV, 0)); \
GBufferType1 MERGE_NAME(NAME, 1) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 1), uint3(UV, 0)); \
GBufferType2 MERGE_NAME(NAME, 2) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 2), uint3(UV, 0)); \
GBufferType3 MERGE_NAME(NAME, 3) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 3), uint3(UV, 0));
#define FETCH_GBUFFER(NAME, TEX, unCoord2) \
GBufferType0 MERGE_NAME(NAME, 0) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 0), unCoord2); \
GBufferType1 MERGE_NAME(NAME, 1) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 1), unCoord2); \
GBufferType2 MERGE_NAME(NAME, 2) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 2), unCoord2); \
GBufferType3 MERGE_NAME(NAME, 3) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 3), unCoord2);
#define ENCODE_INTO_GBUFFER(SURFACE_DATA, BAKE_DIFFUSE_LIGHTING, NAME) EncodeIntoGBuffer(SURFACE_DATA, BAKE_DIFFUSE_LIGHTING, MERGE_NAME(NAME, 0), MERGE_NAME(NAME, 1), MERGE_NAME(NAME, 2), MERGE_NAME(NAME, 3))
#define DECODE_FROM_GBUFFER(NAME, BSDF_DATA, BAKE_DIFFUSE_LIGHTING) DecodeFromGBuffer(MERGE_NAME(NAME, 0), MERGE_NAME(NAME, 1), MERGE_NAME(NAME, 2), MERGE_NAME(NAME, 3), BSDF_DATA, BAKE_DIFFUSE_LIGHTING)

TEXTURE2D(MERGE_NAME(NAME, 3)); \
TEXTURE2D(MERGE_NAME(NAME, 4));
#define FETCH_GBUFFER(NAME, TEX, UV) \
GBufferType0 MERGE_NAME(NAME, 0) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 0), uint3(UV, 0)); \
GBufferType1 MERGE_NAME(NAME, 1) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 1), uint3(UV, 0)); \
GBufferType2 MERGE_NAME(NAME, 2) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 2), uint3(UV, 0)); \
GBufferType3 MERGE_NAME(NAME, 3) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 3), uint3(UV, 0)); \
GBufferType4 MERGE_NAME(NAME, 4) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 4), uint3(UV, 0));
#define FETCH_GBUFFER(NAME, TEX, unCoord2) \
GBufferType0 MERGE_NAME(NAME, 0) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 0), unCoord2); \
GBufferType1 MERGE_NAME(NAME, 1) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 1), unCoord2); \
GBufferType2 MERGE_NAME(NAME, 2) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 2), unCoord2); \
GBufferType3 MERGE_NAME(NAME, 3) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 3), unCoord2); \
GBufferType4 MERGE_NAME(NAME, 4) = LOAD_TEXTURE2D(MERGE_NAME(TEX, 4), unCoord2);
#define ENCODE_INTO_GBUFFER(SURFACE_DATA, BAKE_DIFFUSE_LIGHTING, NAME) EncodeIntoGBuffer(SURFACE_DATA, BAKE_DIFFUSE_LIGHTING, MERGE_NAME(NAME, 0), MERGE_NAME(NAME, 1), MERGE_NAME(NAME, 2), MERGE_NAME(NAME, 3), MERGE_NAME(NAME, 4))
#define DECODE_FROM_GBUFFER(NAME, BSDF_DATA, BAKE_DIFFUSE_LIGHTING) DecodeFromGBuffer(MERGE_NAME(NAME, 0), MERGE_NAME(NAME, 1), MERGE_NAME(NAME, 2), MERGE_NAME(NAME, 3), MERGE_NAME(NAME, 4), BSDF_DATA, BAKE_DIFFUSE_LIGHTING)

2
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Unlit/Unlit.hlsl


LighTransportData lightTransportData;
lightTransportData.diffuseColor = float3(0.0, 0.0, 0.0);
lightTransportData.emissiveColor = builtinData.emissiveColor * builtinData.emissiveIntensity;
lightTransportData.emissiveColor = builtinData.emissiveColor;
return lightTransportData;
}

9
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Unlit/UnlitData.hlsl


builtinData.bakeDiffuseLighting = float3(0.0, 0.0, 0.0);
// Emissive Intensity is only use here, but is part of BuiltinData to enforce UI parameters as we want the users to fill one color and one intensity
builtinData.emissiveIntensity = _EmissiveIntensity;
builtinData.emissiveColor = SAMPLE_TEXTURE2D(_EmissiveColorMap, sampler_EmissiveColorMap, input.texCoord0).rgb * _EmissiveColor;
builtinData.emissiveColor = SAMPLE_TEXTURE2D(_EmissiveColorMap, sampler_EmissiveColorMap, input.texCoord0).rgb * _EmissiveColor * builtinData.emissiveIntensity;
builtinData.emissiveColor = _EmissiveColor;
builtinData.emissiveColor = _EmissiveColor * builtinData.emissiveIntensity;
builtinData.emissiveIntensity = _EmissiveIntensity;
builtinData.velocity = float2(0.0, 0.0);

2
Assets/ScriptableRenderLoop/HDRenderLoop/ShaderPass/ShaderPassForward.hlsl


float3 diffuseLighting;
float3 specularLighting;
float3 bakeDiffuseLighting = GetBakedDiffuseLigthing(surfaceData, builtinData, bsdfData, preLightData);
LightLoop(V, positionWS, preLightData, bsdfData, bakeDiffuseLighting, diffuseLighting, specularLighting);
LightLoop(V, positionWS, coord, preLightData, bsdfData, bakeDiffuseLighting, diffuseLighting, specularLighting);
return float4(diffuseLighting + specularLighting, builtinData.opacity);
}

2
Assets/ScriptableRenderLoop/HDRenderLoop/ShaderPass/ShaderPassForwardUnlit.hlsl


BSDFData bsdfData = ConvertSurfaceDataToBSDFData(surfaceData);
// TODO: we must not access bsdfData here, it break the genericity of the code!
return float4(bsdfData.color + builtinData.emissiveColor * builtinData.emissiveIntensity, builtinData.opacity);
return float4(bsdfData.color + builtinData.emissiveColor, builtinData.opacity);
}

37
Assets/ScriptableRenderLoop/HDRenderLoop/Utilities.cs


{
return new Vector4(camera.pixelWidth, camera.pixelHeight, 1.0f / camera.pixelWidth, 1.0f / camera.pixelHeight);
}
// TEMP: These functions should be implemented C++ side, for now do it in C#
public static void SetMatrixCS(CommandBuffer cmd, ComputeShader shadercs, string name, Matrix4x4 mat)
{
var data = new float[16];
for (int c = 0; c < 4; c++)
for (int r = 0; r < 4; r++)
data[4 * c + r] = mat[r, c];
cmd.SetComputeFloatParams(shadercs, name, data);
}
public static void SetMatrixArrayCS(CommandBuffer cmd, ComputeShader shadercs, string name, Matrix4x4[] matArray)
{
int numMatrices = matArray.Length;
var data = new float[numMatrices * 16];
for (int n = 0; n < numMatrices; n++)
for (int c = 0; c < 4; c++)
for (int r = 0; r < 4; r++)
data[16 * n + 4 * c + r] = matArray[n][r, c];
cmd.SetComputeFloatParams(shadercs, name, data);
}
public static void SetVectorArrayCS(CommandBuffer cmd, ComputeShader shadercs, string name, Vector4[] vecArray)
{
int numVectors = vecArray.Length;
var data = new float[numVectors * 4];
for (int n = 0; n < numVectors; n++)
for (int i = 0; i < 4; i++)
data[4 * n + i] = vecArray[n][i];
cmd.SetComputeFloatParams(shadercs, name, data);
}
}
}

4
Assets/ScriptableRenderLoop/ShaderLibrary/API/D3D11.hlsl


#define SAMPLER2D_HALF SAMPLER2D
#define SAMPLER2D_FLOAT SAMPLER2D
#define LOAD_TEXTURE2D(textureName, unCoord3) textureName.Load(unCoord3)
#define LOAD_TEXTURE2D_MSAA(textureName, unCoord3, sampleIndex) textureName.Load(unCoord3, sampleIndex)
#define LOAD_TEXTURE2D(textureName, unCoord2) textureName.Load(int3(unCoord2, 0))
#define LOAD_TEXTURE2D_MSAA(textureName, unCoord2, sampleIndex) textureName.Load(unCoord2, sampleIndex)

8
Assets/ScriptableRenderLoop/ShaderLibrary/AreaLighting.hlsl


float LineFpo(float tLDDL, float lrcpD, float rcpD)
{
// Compute: ((l / d) / (d * d + l * l)) + (1.0 / (d * d)) * atan(l / d).
return tLDDL + sq(rcpD) * atan(lrcpD);
return tLDDL + Square(rcpD) * atan(lrcpD);
}
float LineFwt(float tLDDL, float l)

float d = length(normal);
float l1rcpD = l1 * rcp(d);
float l2rcpD = l2 * rcp(d);
float tLDDL1 = l1rcpD * rcp(sq(d) + sq(l1));
float tLDDL2 = l2rcpD * rcp(sq(d) + sq(l2));
float tLDDL1 = l1rcpD * rcp(Square(d) + Square(l1));
float tLDDL2 = l2rcpD * rcp(Square(d) + Square(l2));
float intWt = LineFwt(tLDDL2, l2) - LineFwt(tLDDL1, l1);
float intP0 = LineFpo(tLDDL2, l2rcpD, rcp(d)) - LineFpo(tLDDL1, l1rcpD, rcp(d));
return intP0 * normal.z + intWt * tangent.z;

if (P2.z <= 0.0)
{
// Convention: 'P2' is above the horizon.
swap(P1, P2);
Swap(P1, P2);
}
// Recompute the length and the tangent in the new coordinate system.

23
Assets/ScriptableRenderLoop/ShaderLibrary/Common.hlsl


}
#endif // INTRINSIC_MINMAX3
float sq(float x)
float Square(float x)
void swap(inout float a, inout float b)
void Swap(inout float a, inout float b)
void swap(inout float2 a, inout float2 b)
void Swap(inout float2 a, inout float2 b)
void swap(inout float3 a, inout float3 b)
void Swap(inout float3 a, inout float3 b)
void swap(inout float4 a, inout float4 b)
void Swap(inout float4 a, inout float4 b)
{
float4 t = a; a = b; b = t;
}

return x * x * (3.0 - (2.0 * x));
}
const float3x3 k_identity3x3 = {1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0};
const float4x4 k_identity4x4 = {1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0 };
// ----------------------------------------------------------------------------
// World position reconstruction / transformation
// ----------------------------------------------------------------------------

// Normalize coordinates
float2 positionSS;
// Unormalize coordinates
int2 unPositionSS;
uint2 unPositionSS;
};
// This function is use to provide an easy way to sample into a screen texture, either from a pixel or a compute shaders.

#endif
coord.positionSS *= invScreenSize;
coord.unPositionSS = int2(unPositionSS);
coord.unPositionSS = uint2(unPositionSS);
return coord;
}

3
Assets/ScriptableRenderLoop/ShaderLibrary/ImageBasedLighting.hlsl


// For now disabled
#if 0
float m = PerceptualRoughnessToRoughness(perceptualRoughness); // m is the real roughness parameter
const float fEps = 1.192092896e-07F; // smallest such that 1.0+FLT_EPSILON != 1.0 (+1e-4h is NOT good here. is visibly very wrong)
float n = (2.0 / max(fEps, m*m)) - 2.0; // remap to spec power. See eq. 21 in --> https://dl.dropboxusercontent.com/u/55891920/papers/mm_brdf.pdf
float n = (2.0 / max(FLT_EPSILON, m*m)) - 2.0; // remap to spec power. See eq. 21 in --> https://dl.dropboxusercontent.com/u/55891920/papers/mm_brdf.pdf
n /= 4.0; // remap from n_dot_h formulatino to n_dot_r. See section "Pre-convolved Cube Maps vs Path Tracers" --> https://s3.amazonaws.com/docs.knaldtech.com/knald/1.0.0/lys_power_drops.html

正在加载...
取消
保存