浏览代码

Merge pull request #44 from Unity-Technologies/pixel-depth-offset

Prepare for depth pixel offset
/main
GitHub 8 年前
当前提交
9690c7b7
共有 33 个文件被更改,包括 352 次插入234 次删除
  1. 3
      Assets/ScriptableRenderLoop/HDRenderLoop/Debug/DebugViewMaterial.cs
  2. 4
      Assets/ScriptableRenderLoop/HDRenderLoop/Debug/DebugViewMaterial.cs.hlsl
  3. 13
      Assets/ScriptableRenderLoop/HDRenderLoop/Debug/Resources/DebugViewMaterialGBuffer.shader
  4. 46
      Assets/ScriptableRenderLoop/HDRenderLoop/Debug/Resources/DebugViewTiles.shader
  5. 1
      Assets/ScriptableRenderLoop/HDRenderLoop/HDRenderLoop.asset
  6. 2
      Assets/ScriptableRenderLoop/HDRenderLoop/HDRenderLoop.asset.meta
  7. 61
      Assets/ScriptableRenderLoop/HDRenderLoop/HDRenderLoop.cs
  8. 18
      Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/Resources/Deferred.shader
  9. 50
      Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/TilePass.cs
  10. 64
      Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/TilePassLoop.hlsl
  11. 25
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Attributes.hlsl
  12. 4
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Builtin/BuiltinData.cs
  13. 2
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Builtin/BuiltinData.cs.hlsl
  14. 3
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Builtin/BuiltinData.hlsl
  15. 29
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Lit/Lit.hlsl
  16. 23
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Lit/LitData.hlsl
  17. 8
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Lit/LitDepthPass.hlsl
  18. 8
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Lit/LitMetaPass.hlsl
  19. 8
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Lit/LitSharePass.hlsl
  20. 12
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Lit/LitSurfaceData.hlsl
  21. 8
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Lit/LitVelocityPass.hlsl
  22. 6
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Unlit/Unlit.shader
  23. 4
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Unlit/UnlitData.hlsl
  24. 6
      Assets/ScriptableRenderLoop/HDRenderLoop/Material/Unlit/UnlitSharePass.hlsl
  25. 9
      Assets/ScriptableRenderLoop/HDRenderLoop/ShaderPass/ShaderPassDebugViewMaterial.hlsl
  26. 9
      Assets/ScriptableRenderLoop/HDRenderLoop/ShaderPass/ShaderPassDepthOnly.hlsl
  27. 16
      Assets/ScriptableRenderLoop/HDRenderLoop/ShaderPass/ShaderPassForward.hlsl
  28. 9
      Assets/ScriptableRenderLoop/HDRenderLoop/ShaderPass/ShaderPassForwardUnlit.hlsl
  29. 14
      Assets/ScriptableRenderLoop/HDRenderLoop/ShaderPass/ShaderPassGBuffer.hlsl
  30. 9
      Assets/ScriptableRenderLoop/HDRenderLoop/ShaderPass/ShaderPassLightTransport.hlsl
  31. 9
      Assets/ScriptableRenderLoop/HDRenderLoop/ShaderPass/ShaderPassVelocity.hlsl
  32. 11
      Assets/ScriptableRenderLoop/HDRenderLoop/Sky/Resources/SkyProcedural.shader
  33. 92
      Assets/ScriptableRenderLoop/ShaderLibrary/Common.hlsl

3
Assets/ScriptableRenderLoop/HDRenderLoop/Debug/DebugViewMaterial.cs


VertexNormalWS,
VertexColor,
VertexColorAlpha,
// caution if you add something here, it must start below
};
// Number must be contiguous

Depth = DebugViewVarying.VertexColor + 1,
Depth = DebugViewVarying.VertexColorAlpha + 1,
BakeDiffuseLighting,
}
}

4
Assets/ScriptableRenderLoop/HDRenderLoop/Debug/DebugViewMaterial.cs.hlsl


//
// UnityEngine.Experimental.ScriptableRenderLoop.Attributes.DebugViewGbuffer: static fields
//
#define DEBUGVIEWGBUFFER_DEPTH (9)
#define DEBUGVIEWGBUFFER_BAKE_DIFFUSE_LIGHTING (10)
#define DEBUGVIEWGBUFFER_DEPTH (10)
#define DEBUGVIEWGBUFFER_BAKE_DIFFUSE_LIGHTING (11)
#endif

13
Assets/ScriptableRenderLoop/HDRenderLoop/Debug/Resources/DebugViewMaterialGBuffer.shader


#include "Assets/ScriptableRenderLoop/HDRenderLoop/Debug/DebugViewMaterial.cs.hlsl"
#include "Assets/ScriptableRenderLoop/HDRenderLoop/Material/Material.hlsl"
float4x4 _InvViewProjMatrix;
DECLARE_GBUFFER_TEXTURE(_GBufferTexture);

float4 Frag(Varyings input) : SV_Target
{
float4 unPositionSS = input.positionCS; // as input we have the vpos
Coordinate coord = GetCoordinate(unPositionSS.xy, _ScreenSize.zw);
float depth = LOAD_TEXTURE2D(_CameraDepthTexture, coord.unPositionSS).x;
// input.positionCS is SV_Position
PositionInputs posInput = GetPositionInput(input.positionCS.xy, _ScreenSize.zw);
float depth = LOAD_TEXTURE2D(_CameraDepthTexture, posInput.unPositionSS).x;
UpdatePositionInput(depth, _InvViewProjMatrix, GetWorldToViewMatrix(), posInput);
FETCH_GBUFFER(gbuffer, _GBufferTexture, coord.unPositionSS);
FETCH_GBUFFER(gbuffer, _GBufferTexture, posInput.unPositionSS);
BSDFData bsdfData;
float3 bakeDiffuseLighting;
DECODE_FROM_GBUFFER(gbuffer, bsdfData, bakeDiffuseLighting);

if (_DebugViewMaterial == DEBUGVIEWGBUFFER_DEPTH)
{
float linearDepth = frac(LinearEyeDepth(depth, _ZBufferParams) * 0.1);
float linearDepth = frac(posInput.depthVS * 0.1);
result = linearDepth.xxx;
}
else if (_DebugViewMaterial == DEBUGVIEWGBUFFER_BAKE_DIFFUSE_LIGHTING)

46
Assets/ScriptableRenderLoop/HDRenderLoop/Debug/Resources/DebugViewTiles.shader


float4 Frag(float4 positionCS : SV_POSITION) : SV_Target
{
Coordinate coord = GetCoordinate(positionCS.xy, _ScreenSize.zw);
int2 pixelCoord = coord.unPositionSS.xy;
// positionCS is SV_Position
PositionInputs posInput = GetPositionInput(positionCS.xy, _ScreenSize.zw);
float depth = LOAD_TEXTURE2D(_CameraDepthTexture, posInput.unPositionSS).x;
UpdatePositionInput(depth, _InvViewProjMatrix, GetWorldToViewMatrix(), posInput);
int2 pixelCoord = posInput.unPositionSS.xy;
#ifdef USE_CLUSTERED_LIGHTLIST
// Perform same calculation than in deferred.shader
float depth = LOAD_TEXTURE2D(_CameraDepthTexture, coord.unPositionSS).x;
float3 positionWS = UnprojectToWorld(depth, coord.positionSS, _InvViewProjMatrix);
float linearDepth = TransformWorldToView(positionWS).z; // View space linear depth
#else
float linearDepth = 0.0; // unused
#endif
int n = 0;
for (int category = 0; category < LIGHTCATEGORY_COUNT; category++)

{
uint start;
uint count;
GetCountAndStart(coord, category, linearDepth, start, count);
GetCountAndStart(posInput, category, start, count);
float4 result = 0.0f;
float4 result = float4(0.0, 0.0, 0.0, 0.0);
result = OverlayHeatMap(int2(coord.unPositionSS.xy) & (TILE_SIZE - 1), n);
result = OverlayHeatMap(int2(posInput.unPositionSS.xy) & (TILE_SIZE - 1), n);
if(all(mouseTileCoord == tileCoord))
if (all(mouseTileCoord == tileCoord))
float4 result2 = float4(1, 1, 1, border ? 1.0f : 0.5);
float4 result2 = float4(1.0, 1.0, 1.0, border ? 1.0 : 0.5);
if(tileCoord.y < LIGHTCATEGORY_COUNT && tileCoord.x < maxLights + 3)
if (tileCoord.y < LIGHTCATEGORY_COUNT && tileCoord.x < maxLights + 3)
Coordinate mouseCoord = GetCoordinate(_MousePixelCoord, _ScreenSize.zw);
PositionInput mousePosInput = GetPositionInput(_MousePixelCoord, _ScreenSize.zw);
float depthMouse = LOAD_TEXTURE2D(_CameraDepthTexture, mousePosInput.unPositionSS).x;
UpdatePositionInput(depthMouse, _InvViewProjMatrix, GetWorldToViewMatrix(), mousePosInput);
GetCountAndStart(mouseCoord, category, linearDepth, start, count);
GetCountAndStart(mousePosInput, category, start, count);
float4 result2 = float4(.1,.1,.1,.9);
int2 fontCoord = int2(pixelCoord.x, offsetInTile.y);

n = FetchIndex(start, lightListIndex);
}
if(n >= 0)
if (n >= 0)
if(SampleDebugFontNumber(offsetInTile, n))
result2 = float4(0, 0, 0, 1);
if(SampleDebugFontNumber(offsetInTile + 1, n))
result2 = float4(1, 1, 1, 1);
if (SampleDebugFontNumber(offsetInTile, n))
result2 = float4(0.0, 0.0, 0.0, 1.0);
if (SampleDebugFontNumber(offsetInTile + 1, n))
result2 = float4(1.0, 1.0, 1.0, 1.0);
}
result = AlphaBlend(result, result2);

1
Assets/ScriptableRenderLoop/HDRenderLoop/HDRenderLoop.asset


spotCookieSize: 128
pointCookieSize: 512
reflectionCubemapSize: 128
m_Dirty: 0

2
Assets/ScriptableRenderLoop/HDRenderLoop/HDRenderLoop.asset.meta


fileFormatVersion: 2
guid: 2400b74f5ce370c4481e5dc417d03703
timeCreated: 1481084818
timeCreated: 1481618742
licenseType: Pro
NativeFormatImporter:
userData:

61
Assets/ScriptableRenderLoop/HDRenderLoop/HDRenderLoop.cs


#if UNITY_EDITOR
UnityEditor.SupportedRenderingFeatures.active = UnityEditor.SupportedRenderingFeatures.Default;
#endif
UnityEngine.Rendering.GraphicsSettings.lightsUseLinearIntensity = previousLightsUseLinearIntensity;
UnityEngine.Rendering.GraphicsSettings.lightsUseCCT = previousLightsUseCCT;
// UnityEngine.Rendering.GraphicsSettings.lightsUseLinearIntensity = previousLightsUseLinearIntensity;
// UnityEngine.Rendering.GraphicsSettings.lightsUseCCT = previousLightsUseCCT;
}
void InitAndClearBuffer(Camera camera, RenderLoop renderLoop)

}
// This pass is use in case of forward opaque and deferred rendering. We need to render forward objects before tile lighting pass
void RenderForwardOpaqueDepth(CullResults cull, Camera camera, RenderLoop renderLoop)
void RenderForwardOnlyDepthPrepass(CullResults cull, Camera camera, RenderLoop renderLoop)
// If we have render a depth prepass, no need for this pass
if (debugParameters.useDepthPrepass)
// If we are forward only we don't need to render ForwardOpaqueDepth object
// But in case we request a prepass we render it
if (debugParameters.useForwardRenderingOnly && !debugParameters.useDepthPrepass)
// TODO: Use the render queue index to only send the forward opaque!
// or use the new MAterial.SetPassEnable ?
RenderOpaqueRenderList(cull, camera, renderLoop, "DepthOnly");
RenderOpaqueRenderList(cull, camera, renderLoop, "ForwardOnlyDepthOnly");
}
}

// Render GBuffer opaque
if (!debugParameters.useForwardRenderingOnly)
{
Vector4 screenSize = Utilities.ComputeScreenSize(camera);
var invViewProj = Utilities.GetViewProjectionMatrix(camera).inverse;
var screenSize = Utilities.ComputeScreenSize(camera);
m_DebugViewMaterialGBuffer.SetMatrix("_InvViewProjMatrix", invViewProj);
// m_gbufferManager.BindBuffers(m_DebugViewMaterialGBuffer);
// TODO: Bind depth textures

}
}
// Render material that are forward opaque only (like eye)
// TODO: Think about hair that could be render both as opaque and transparent...
void RenderForwardOnly(CullResults cullResults, Camera camera, RenderLoop renderLoop)
{
using (new Utilities.ProfilingSample("Forward Only Pass", renderLoop))
{
// Bind material data
m_LitRenderLoop.Bind();
Utilities.SetRenderTarget(renderLoop, m_CameraColorBufferRT, m_CameraDepthBufferRT);
m_lightLoop.RenderForward(camera, renderLoop, true);
RenderOpaqueRenderList(cullResults, camera, renderLoop, "ForwardOnly");
}
}
void RenderForwardUnlit(CullResults cullResults, Camera camera, RenderLoop renderLoop)
{
using (new Utilities.ProfilingSample("Forward Unlit Pass", renderLoop))

Shader.SetGlobalInt("_EnvLightSkyEnabled", 0);
}
var invViewProj = Utilities.GetViewProjectionMatrix(camera).inverse;
var screenSize = Utilities.ComputeScreenSize(camera);
var cmd = new CommandBuffer { name = "Push Global Parameters" };
cmd.SetGlobalVector("_ScreenSize", screenSize);
cmd.SetGlobalMatrix("_InvViewProjMatrix", invViewProj);
renderLoop.ExecuteCommandBuffer(cmd);
cmd.Dispose();
m_lightLoop.PushGlobalParams(camera, renderLoop);
}

RenderDepthPrepass(cullResults, camera, renderLoop);
RenderGBuffer(cullResults, camera, renderLoop);
// Forward opaque with deferred tile require that we fill the depth buffer
// Forward opaque with deferred/cluster tile require that we fill the depth buffer
// TODO: ask Morten why this pass is not before GBuffer ? Will make more sense and avoid
// to do gbuffer pass on unseen mesh.
// TODO: how do we select only the object that must be render forward ?
// this is all object with gbuffer pass disabled ?
//RenderForwardOpaqueDepth(cullResults, camera, renderLoop);
RenderForwardOnlyDepthPrepass(cullResults, camera, renderLoop);
RenderGBuffer(cullResults, camera, renderLoop);
if (debugParameters.debugViewMaterial != 0)
{

}
RenderDeferredLighting(camera, renderLoop);
// TODO: enable this for tile forward opaque
// RenderForward(cullResults, camera, renderLoop, true);
RenderForward(cullResults, camera, renderLoop, true);
RenderForwardOnly(cullResults, camera, renderLoop);
RenderSky(camera, renderLoop);

18
Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/Resources/Deferred.shader


float4 Frag(Varyings input) : SV_Target
{
float4 unPositionSS = input.positionCS; // as input we have the vpos
Coordinate coord = GetCoordinate(unPositionSS.xy, _ScreenSize.zw);
// No need to manage inverse depth, this is handled by the projection matrix
float depth = LOAD_TEXTURE2D(_CameraDepthTexture, coord.unPositionSS).x;
float3 positionWS = UnprojectToWorld(depth, coord.positionSS, _InvViewProjMatrix);
float3 V = GetWorldSpaceNormalizeViewDir(positionWS);
// input.positionCS is SV_Position
PositionInputs posInput = GetPositionInput(input.positionCS.xy, _ScreenSize.zw);
float depth = LOAD_TEXTURE2D(_CameraDepthTexture, posInput.unPositionSS).x;
UpdatePositionInput(depth, _InvViewProjMatrix, GetWorldToViewMatrix(), posInput);
float3 V = GetWorldSpaceNormalizeViewDir(posInput.positionWS);
FETCH_GBUFFER(gbuffer, _GBufferTexture, coord.unPositionSS);
FETCH_GBUFFER(gbuffer, _GBufferTexture, posInput.unPositionSS);
PreLightData preLightData = GetPreLightData(V, positionWS, coord, bsdfData);
PreLightData preLightData = GetPreLightData(V, posInput, bsdfData);
LightLoop(V, positionWS, coord, preLightData, bsdfData, bakeDiffuseLighting, diffuseLighting, specularLighting);
LightLoop(V, posInput, preLightData, bsdfData, bakeDiffuseLighting, diffuseLighting, specularLighting);
return float4(diffuseLighting + specularLighting, 1.0);
}

50
Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/TilePass.cs


m_DebugViewTilesMaterial = Utilities.CreateEngineMaterial("Hidden/HDRenderLoop/DebugViewTiles");
m_SingleDeferredMaterial = Utilities.CreateEngineMaterial("Hidden/HDRenderLoop/Deferred");
m_SingleDeferredMaterial.EnableKeyword("LIGHTLOOP_SINGLE_PASS");
m_SingleDeferredMaterial.EnableKeyword("LIGHTLOOP_SINGLE_PASS");
UnityEditor.SceneView.onSceneGUIDelegate += OnSceneGUI;
UnityEditor.SceneView.onSceneGUIDelegate += OnSceneGUI;
{
{
Utilities.SafeRelease(s_DirectionalLightDatas);
Utilities.SafeRelease(s_LightDatas);
Utilities.SafeRelease(s_EnvLightDatas);

loop.ExecuteCommandBuffer(cmd);
cmd.Dispose();
}
#if UNITY_EDITOR
private Vector2 m_mousePosition = Vector2.zero;
private void OnSceneGUI(UnityEditor.SceneView sceneview)
{
m_mousePosition = Event.current.mousePosition;
}
}
#if UNITY_EDITOR
private Vector2 m_mousePosition = Vector2.zero;
private void OnSceneGUI(UnityEditor.SceneView sceneview)
{
m_mousePosition = Event.current.mousePosition;
}
var screenSize = Utilities.ComputeScreenSize(camera);
Vector2 mousePixelCoord = Input.mousePosition;
#if UNITY_EDITOR
if (!UnityEditor.EditorApplication.isPlayingOrWillChangePlaymode)
{
mousePixelCoord = m_mousePosition;
mousePixelCoord.y = (screenSize.y - 1.0f) - mousePixelCoord.y;
}
#endif
var screenSize = Utilities.ComputeScreenSize(camera);
Vector2 mousePixelCoord = Input.mousePosition;
#if UNITY_EDITOR
if (!UnityEditor.EditorApplication.isPlayingOrWillChangePlaymode)
{
mousePixelCoord = m_mousePosition;
mousePixelCoord.y = (screenSize.y - 1.0f) - mousePixelCoord.y;
}
#endif
m_DeferredDirectMaterial.SetMatrix("_InvViewProjMatrix", invViewProj);
m_DeferredDirectMaterial.SetVector("_ScreenSize", screenSize);
m_DeferredDirectMaterial.SetInt("_SrcBlend", (int)UnityEngine.Rendering.BlendMode.One);

64
Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/TilePassLoop.hlsl


#ifdef LIGHTLOOP_TILE_PASS
// Calculate the offset in global light index light for current light category
int GetTileOffset(Coordinate coord, uint lightCategory)
int GetTileOffset(PositionInputs posInput, uint lightCategory)
uint2 tileIndex = coord.unPositionSS / TILE_SIZE;
uint2 tileIndex = posInput.unPositionSS / TILE_SIZE;
void GetCountAndStartTile(Coordinate coord, uint lightCategory, float linearDepth, out uint start, out uint lightCount)
void GetCountAndStartTile(PositionInputs posInput, uint lightCategory, out uint start, out uint lightCount)
const int tileOffset = GetTileOffset(coord, lightCategory);
const int tileOffset = GetTileOffset(posInput, lightCategory);
// The first entry inside a tile is the number of light for lightCategory (thus the +0)
lightCount = g_vLightListGlobal[DWORD_PER_TILE * tileOffset + 0] & 0xffff;

#ifdef USE_FPTL_LIGHTLIST
void GetCountAndStart(Coordinate coord, uint lightCategory, float linearDepth, out uint start, out uint lightCount)
void GetCountAndStart(PositionInputs posInput, uint lightCategory, out uint start, out uint lightCount)
GetCountAndStartTile(coord, lightCategory, linearDepth, start, lightCount);
GetCountAndStartTile(posInput, lightCategory, start, lightCount);
}
uint FetchIndex(uint tileOffset, uint lightIndex)

#include "ClusteredUtils.hlsl"
void GetCountAndStartCluster(Coordinate coord, uint lightCategory, float linearDepth, out uint start, out uint lightCount)
void GetCountAndStartCluster(PositionInputs posInput, uint lightCategory, out uint start, out uint lightCount)
uint2 tileIndex = coord.unPositionSS / TILE_SIZE;
uint2 tileIndex = posInput.unPositionSS / TILE_SIZE;
float logBase = g_fClustBase;
if (g_isLogBaseBufferEnabled)

int clustIdx = SnapToClusterIdxFlex(linearDepth, logBase, g_isLogBaseBufferEnabled != 0);
int clustIdx = SnapToClusterIdxFlex(posInput.depthVS, logBase, g_isLogBaseBufferEnabled != 0);
int nrClusters = (1 << g_iLog2NumClusters);
const int idx = ((lightCategory * nrClusters + clustIdx) * _NumTileY + tileIndex.y) * _NumTileX + tileIndex.x;

return g_vLightListGlobal[tileOffset + lightIndex];
}
void GetCountAndStart(Coordinate coord, uint lightCategory, float linearDepth, out uint start, out uint lightCount)
void GetCountAndStart(PositionInputs posInput, uint lightCategory, out uint start, out uint lightCount)
GetCountAndStartTile(coord, lightCategory, linearDepth, start, lightCount);
GetCountAndStartTile(posInput, lightCategory, start, lightCount);
GetCountAndStartCluster(coord, lightCategory, linearDepth, start, lightCount);
GetCountAndStartCluster(posInput, lightCategory, start, lightCount);
}
uint FetchIndex(uint tileOffset, uint lightIndex)

#endif
// bakeDiffuseLighting is part of the prototype so a user is able to implement a "base pass" with GI and multipass direct light (aka old unity rendering path)
void LightLoop( float3 V, float3 positionWS, Coordinate coord, PreLightData prelightData, BSDFData bsdfData, float3 bakeDiffuseLighting,
void LightLoop( float3 V, PositionInputs posInput, PreLightData prelightData, BSDFData bsdfData, float3 bakeDiffuseLighting,
#ifdef USE_CLUSTERED_LIGHTLIST
float linearDepth = TransformWorldToView(positionWS).z; // View space linear depth
#else
float linearDepth = 0.0; // unused
#endif
LightLoopContext context;
ZERO_INITIALIZE(LightLoopContext, context);

{
float3 localDiffuseLighting, localSpecularLighting;
EvaluateBSDF_Directional( context, V, positionWS, prelightData, _DirectionalLightDatas[i], bsdfData,
EvaluateBSDF_Directional( context, V, posInput, prelightData, _DirectionalLightDatas[i], bsdfData,
localDiffuseLighting, localSpecularLighting);
diffuseLighting += localDiffuseLighting;

// TODO: Convert the for loop below to a while on each type as we know we are sorted!
uint punctualLightStart;
uint punctualLightCount;
GetCountAndStart(coord, LIGHTCATEGORY_PUNCTUAL, linearDepth, punctualLightStart, punctualLightCount);
GetCountAndStart(posInput, LIGHTCATEGORY_PUNCTUAL, punctualLightStart, punctualLightCount);
EvaluateBSDF_Punctual( context, V, positionWS, prelightData, _LightDatas[FetchIndex(punctualLightStart, i)], bsdfData,
EvaluateBSDF_Punctual( context, V, posInput, prelightData, _LightDatas[FetchIndex(punctualLightStart, i)], bsdfData,
localDiffuseLighting, localSpecularLighting);
diffuseLighting += localDiffuseLighting;

// TODO: Convert the for loop below to a while on each type as we know we are sorted!
uint areaLightStart;
uint areaLightCount;
GetCountAndStart(coord, LIGHTCATEGORY_AREA, linearDepth, areaLightStart, areaLightCount);
GetCountAndStart(posInput, LIGHTCATEGORY_AREA, areaLightStart, areaLightCount);
for (i = 0; i < areaLightCount; ++i)
{
float3 localDiffuseLighting, localSpecularLighting;

if(_LightDatas[areaIndex].lightType == GPULIGHTTYPE_LINE)
{
EvaluateBSDF_Line( context, V, positionWS, prelightData, _LightDatas[areaIndex], bsdfData,
EvaluateBSDF_Line( context, V, posInput, prelightData, _LightDatas[areaIndex], bsdfData,
EvaluateBSDF_Area( context, V, positionWS, prelightData, _LightDatas[areaIndex], bsdfData,
EvaluateBSDF_Area( context, V, posInput, prelightData, _LightDatas[areaIndex], bsdfData,
localDiffuseLighting, localSpecularLighting);
}

// The sky is a single cubemap texture separate from the reflection probe texture array (different resolution and compression)
context.sampleReflection = SINGLE_PASS_CONTEXT_SAMPLE_SKY;
EnvLightData envLightSky = InitSkyEnvLightData(0); // The sky data are generated on the fly so the compiler can optimize the code
EvaluateBSDF_Env(context, V, positionWS, prelightData, envLightSky, bsdfData, localDiffuseLighting, localSpecularLighting, weight);
EvaluateBSDF_Env(context, V, posInput, prelightData, envLightSky, bsdfData, localDiffuseLighting, localSpecularLighting, weight);
iblDiffuseLighting = lerp(iblDiffuseLighting, localDiffuseLighting, weight.x); // Should be remove by the compiler if it is smart as all is constant 0
iblSpecularLighting = lerp(iblSpecularLighting, localSpecularLighting, weight.y);
}

GetCountAndStart(coord, LIGHTCATEGORY_ENV, linearDepth, envLightStart, envLightCount);
GetCountAndStart(posInput, LIGHTCATEGORY_ENV, envLightStart, envLightCount);
for (i = 0; i < envLightCount; ++i)
{

EvaluateBSDF_Env(context, V, positionWS, prelightData, _EnvLightDatas[FetchIndex(envLightStart, i)], bsdfData, localDiffuseLighting, localSpecularLighting, weight);
EvaluateBSDF_Env(context, V, posInput, prelightData, _EnvLightDatas[FetchIndex(envLightStart, i)], bsdfData, localDiffuseLighting, localSpecularLighting, weight);
iblDiffuseLighting = lerp(iblDiffuseLighting, localDiffuseLighting, weight.x); // Should be remove by the compiler if it is smart as all is constant 0
iblSpecularLighting = lerp(iblSpecularLighting, localSpecularLighting, weight.y);
}

#else // LIGHTLOOP_SINGLE_PASS
// bakeDiffuseLighting is part of the prototype so a user is able to implement a "base pass" with GI and multipass direct light (aka old unity rendering path)
void LightLoop( float3 V, float3 positionWS, Coordinate coord, PreLightData prelightData, BSDFData bsdfData, float3 bakeDiffuseLighting,
void LightLoop( float3 V, PositionInputs posInput, PreLightData prelightData, BSDFData bsdfData, float3 bakeDiffuseLighting,
out float3 diffuseLighting,
out float3 specularLighting)
{

{
float3 localDiffuseLighting, localSpecularLighting;
EvaluateBSDF_Directional( context, V, positionWS, prelightData, _DirectionalLightDatas[i], bsdfData,
EvaluateBSDF_Directional( context, V, posInput, prelightData, _DirectionalLightDatas[i], bsdfData,
localDiffuseLighting, localSpecularLighting);
diffuseLighting += localDiffuseLighting;

{
float3 localDiffuseLighting, localSpecularLighting;
EvaluateBSDF_Punctual( context, V, positionWS, prelightData, _LightDatas[i], bsdfData,
EvaluateBSDF_Punctual( context, V, posInput, prelightData, _LightDatas[i], bsdfData,
localDiffuseLighting, localSpecularLighting);
diffuseLighting += localDiffuseLighting;

if (_LightDatas[i].lightType == GPULIGHTTYPE_LINE)
{
EvaluateBSDF_Line( context, V, positionWS, prelightData, _LightDatas[i], bsdfData,
EvaluateBSDF_Line( context, V, posInput, prelightData, _LightDatas[i], bsdfData,
EvaluateBSDF_Area( context, V, positionWS, prelightData, _LightDatas[i], bsdfData,
EvaluateBSDF_Area( context, V, posInput, prelightData, _LightDatas[i], bsdfData,
localDiffuseLighting, localSpecularLighting);
}

// The sky is a single cubemap texture separate from the reflection probe texture array (different resolution and compression)
context.sampleReflection = SINGLE_PASS_CONTEXT_SAMPLE_SKY;
EnvLightData envLightSky = InitSkyEnvLightData(0); // The sky data are generated on the fly so the compiler can optimize the code
EvaluateBSDF_Env(context, V, positionWS, prelightData, envLightSky, bsdfData, localDiffuseLighting, localSpecularLighting, weight);
EvaluateBSDF_Env(context, V, posInput, prelightData, envLightSky, bsdfData, localDiffuseLighting, localSpecularLighting, weight);
iblDiffuseLighting = lerp(iblDiffuseLighting, localDiffuseLighting, weight.x); // Should be remove by the compiler if it is smart as all is constant 0
iblSpecularLighting = lerp(iblSpecularLighting, localSpecularLighting, weight.y);
}

float3 localDiffuseLighting, localSpecularLighting;
float2 weight;
context.sampleReflection = SINGLE_PASS_CONTEXT_SAMPLE_REFLECTION_PROBES;
EvaluateBSDF_Env(context, V, positionWS, prelightData, _EnvLightDatas[i], bsdfData, localDiffuseLighting, localSpecularLighting, weight);
EvaluateBSDF_Env(context, V, posInput, prelightData, _EnvLightDatas[i], bsdfData, localDiffuseLighting, localSpecularLighting, weight);
iblDiffuseLighting = lerp(iblDiffuseLighting, localDiffuseLighting, weight.x); // Should be remove by the compiler if it is smart as all is constant 0
iblSpecularLighting = lerp(iblSpecularLighting, localSpecularLighting, weight.y);
}

25
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Attributes.hlsl


//-------------------------------------------------------------------------------------
// FragInput
// FragInputs
struct FragInput
struct FragInputs
float4 unPositionSS; // This is the position return by VPOS (That is name positionCS in PackedVarying), only xy is use
// Contain value return by SV_POSITION (That is name positionCS in PackedVarying).
// xy: unormalized screen position (offset by 0.5), z: device depth, w: depth in view space
// Note: SV_POSITION is the result of the clip space position provide to the vertex shaders that is transform by the viewport
float4 unPositionSS; // In case depth offset is use, positionWS.w is equal to depth offset
float3 positionWS;
float2 texCoord0;
float2 texCoord1;

float4 vertexColor;
// For velocity
// Note: Z component is not use
float4 positionCS; // This is the clip spae position. Warning, do not confuse with the value of positionCS in PackedVarying which is VPOS and store in unPositionSS
// CAUTION: Only use with velocity currently, null else
// Note: Z component is not use currently
// This is the clip space position. Warning, do not confuse with the value of positionCS in PackedVarying which is SV_POSITION and store in unPositionSS
float4 positionCS;
// end velocity specific
void GetVaryingsDataDebug(uint paramId, FragInput input, inout float3 result, inout bool needLinearToSRGB)
void ApplyDepthOffsetVS(float depthOffset, inout FragInputs fragInput)
{
fragInput.positionCS.w += depthOffset;
fragInput.previousPositionCS.w += depthOffset;
}
void GetVaryingsDataDebug(uint paramId, FragInputs input, inout float3 result, inout bool needLinearToSRGB)
{
switch (paramId)
{

4
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Builtin/BuiltinData.cs


public Vector2 distortion;
[SurfaceDataAttributes("Distortion Blur")]
public float distortionBlur; // Define the color buffer mipmap level to use
// Depth
[SurfaceDataAttributes("Depth Offset")]
public float depthOffset; // define the depth in unity unit to add in Z forward direction
};
//-----------------------------------------------------------------------------

2
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Builtin/BuiltinData.cs.hlsl


#define DEBUGVIEW_BUILTIN_BUILTINDATA_VELOCITY (104)
#define DEBUGVIEW_BUILTIN_BUILTINDATA_DISTORTION (105)
#define DEBUGVIEW_BUILTIN_BUILTINDATA_DISTORTION_BLUR (106)
#define DEBUGVIEW_BUILTIN_BUILTINDATA_DEPTH_OFFSET (107)
//
// UnityEngine.Experimental.ScriptableRenderLoop.Builtin.LighTransportData: static fields

float2 velocity;
float2 distortion;
float distortionBlur;
float depthOffset;
};
// Generated from UnityEngine.Experimental.ScriptableRenderLoop.Builtin.LighTransportData

3
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Builtin/BuiltinData.hlsl


case DEBUGVIEW_BUILTIN_BUILTINDATA_DISTORTION_BLUR:
result = builtinData.distortionBlur.xxx;
break;
case DEBUGVIEW_BUILTIN_BUILTINDATA_DEPTH_OFFSET:
result = builtinData.depthOffset.xxx * 10.0; // * 10 assuming 1 unity unity is 1m
break;
}
}

29
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Lit/Lit.hlsl


float2 unPositionSS;
};
PreLightData GetPreLightData(float3 V, float3 positionWS, Coordinate coord, BSDFData bsdfData)
PreLightData GetPreLightData(float3 V, PositionInputs posInput, BSDFData bsdfData)
{
PreLightData preLightData;

preLightData.iblDirWS = GetSpecularDominantDir(bsdfData.normalWS, iblR, bsdfData.roughness);
// #if SHADERPASS == SHADERPASS_GBUFFER
// preLightData.ambientOcclusion = LOAD_TEXTURE2D(_AmbientOcclusion, coord.unPositionSS).x;
// preLightData.ambientOcclusion = LOAD_TEXTURE2D(_AmbientOcclusion, posInput.unPositionSS).x;
// #endif
// Area light specific

preLightData.ltcDisneyDiffuseMagnitude = ltcMagnitude.b;
// Shadow
preLightData.unPositionSS = coord.unPositionSS;
preLightData.unPositionSS = posInput.unPositionSS;
return preLightData;
}

//-----------------------------------------------------------------------------
void EvaluateBSDF_Directional( LightLoopContext lightLoopContext,
float3 V, float3 positionWS, PreLightData preLightData, DirectionalLightData lightData, BSDFData bsdfData,
float3 V, PositionInputs posInput, PreLightData preLightData, DirectionalLightData lightData, BSDFData bsdfData,
float3 positionWS = posInput.positionWS;
float3 L = -lightData.forward; // Lights are pointing backward in Unity
float illuminance = saturate(dot(bsdfData.normalWS, L));

coord = coord * 0.5 + 0.5;
// Tile the texture if the 'repeat' wrap mode is enabled.
if (lightData.tileCookie) coord = frac(coord);
if (lightData.tileCookie)
coord = frac(coord);
float4 cookie = SampleCookie2D(lightLoopContext, coord, lightData.cookieIndex);

//-----------------------------------------------------------------------------
void EvaluateBSDF_Punctual( LightLoopContext lightLoopContext,
float3 V, float3 positionWS, PreLightData preLightData, LightData lightData, BSDFData bsdfData,
float3 V, PositionInputs posInput, PreLightData preLightData, LightData lightData, BSDFData bsdfData,
float3 positionWS = posInput.positionWS;
// All punctual light type in the same formula, attenuation is neutral depends on light type.
// light.positionWS is the normalize light direction in case of directional light and invSqrAttenuationRadius is 0
// mean dot(unL, unL) = 1 and mean GetDistanceAttenuation() will return 1

//-----------------------------------------------------------------------------
void EvaluateBSDF_Line(LightLoopContext lightLoopContext,
float3 V, float3 positionWS,
float3 V, PositionInputs posInput,
float3 positionWS = posInput.positionWS;
#ifdef LIT_DISPLAY_REFERENCE_AREA
IntegrateBSDF_LineRef(V, positionWS, preLightData, lightData, bsdfData,
diffuseLighting, specularLighting);

// #define ELLIPSOIDAL_ATTENUATION
void EvaluateBSDF_Area(LightLoopContext lightLoopContext,
float3 V, float3 positionWS,
float3 V, PositionInputs posInput,
float3 positionWS = posInput.positionWS;
#ifdef LIT_DISPLAY_REFERENCE_AREA
IntegrateBSDF_AreaRef(V, positionWS, preLightData, lightData, bsdfData,
diffuseLighting, specularLighting);

// _preIntegratedFGD and _CubemapLD are unique for each BRDF
void EvaluateBSDF_Env( LightLoopContext lightLoopContext,
float3 V, float3 positionWS, PreLightData preLightData, EnvLightData lightData, BSDFData bsdfData,
float3 V, PositionInputs posInput, PreLightData preLightData, EnvLightData lightData, BSDFData bsdfData,
float3 positionWS = posInput.positionWS;
#ifdef LIT_DISPLAY_REFERENCE_IBL
specularLighting = IntegrateSpecularGGXIBLRef(lightLoopContext, V, lightData, bsdfData);

23
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Lit/LitData.hlsl


//-------------------------------------------------------------------------------------
#include "../MaterialUtilities.hlsl"
void GetBuiltinData(FragInput input, SurfaceData surfaceData, float alpha, out BuiltinData builtinData)
void GetBuiltinData(FragInputs input, SurfaceData surfaceData, float alpha, out BuiltinData builtinData)
{
// Builtin Data
builtinData.opacity = alpha;

builtinData.distortion = float2(0.0, 0.0);
builtinData.distortionBlur = 0.0;
builtinData.depthOffset = 0.0;
}
// Gather all kind of mapping in one struct, allow to improve code readability

#define ADD_ZERO_IDX(Name) Name
#include "LitSurfaceData.hlsl"
void GetSurfaceAndBuiltinData(FragInput input, out SurfaceData surfaceData, out BuiltinData builtinData)
void GetSurfaceAndBuiltinData(FragInputs input, float3 V, inout PositionInputs posInput, out SurfaceData surfaceData, out BuiltinData builtinData)
{
LayerTexCoord layerTexCoord;
ZERO_INITIALIZE(LayerTexCoord, layerTexCoord);

isTriplanar = true;
#endif
ComputeLayerTexCoord(input, isTriplanar, layerTexCoord);
ApplyDisplacement(input, layerTexCoord);
// Transform view vector in tangent space
float3 viewDirTS = TransformWorldToTangent(V, input.tangentToWorld);
ApplyDisplacement(input, viewDirTS, layerTexCoord);
float alpha = GetSurfaceData(input, layerTexCoord, surfaceData);
GetBuiltinData(input, surfaceData, alpha, builtinData);

#define SURFACEDATA_BLEND_SCALAR(surfaceData, name, mask) BlendLayeredScalar(surfaceData##0.##name, surfaceData##1.##name, surfaceData##2.##name, surfaceData##3.##name, mask);
#define PROP_BLEND_SCALAR(name, mask) BlendLayeredScalar(name##0, name##1, name##2, name##3, mask);
void GetSurfaceAndBuiltinData(FragInput input, out SurfaceData surfaceData, out BuiltinData builtinData)
void GetSurfaceAndBuiltinData(FragInputs input, float3 V, inout PositionInputs posInput, out SurfaceData surfaceData, out BuiltinData builtinData)
{
LayerTexCoord layerTexCoord;
ZERO_INITIALIZE(LayerTexCoord, layerTexCoord);

isTriplanar = true;
#endif
ComputeLayerTexCoord3(input, isTriplanar, layerTexCoord);
ApplyDisplacement0(input, layerTexCoord);
ApplyDisplacement1(input, layerTexCoord);
ApplyDisplacement2(input, layerTexCoord);
ApplyDisplacement3(input, layerTexCoord);
// Transform view vector in tangent space
float3 viewDirTS = TransformWorldToTangent(V, input.tangentToWorld);
ApplyDisplacement0(input, viewDirTS, layerTexCoord);
ApplyDisplacement1(input, viewDirTS, layerTexCoord);
ApplyDisplacement2(input, viewDirTS, layerTexCoord);
ApplyDisplacement3(input, viewDirTS, layerTexCoord);
SurfaceData surfaceData0;
SurfaceData surfaceData1;

8
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Lit/LitDepthPass.hlsl


return output;
}
FragInput UnpackVaryings(PackedVaryings input)
FragInputs UnpackVaryings(PackedVaryings input)
FragInput output;
ZERO_INITIALIZE(FragInput, output);
FragInputs output;
ZERO_INITIALIZE(FragInputs, output);
output.unPositionSS = input.positionCS; // as input we have the vpos
output.unPositionSS = input.positionCS; // input.positionCS is SV_Position
#if NEED_TANGENT_TO_WORLD
output.positionWS.xyz = input.interpolators[0].xyz;

8
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Lit/LitMetaPass.hlsl


return output;
}
FragInput UnpackVaryings(PackedVaryings input)
FragInputs UnpackVaryings(PackedVaryings input)
FragInput output;
ZERO_INITIALIZE(FragInput, output);
FragInputs output;
ZERO_INITIALIZE(FragInputs, output);
output.unPositionSS = input.positionCS; // as input we have the vpos
output.unPositionSS = input.positionCS; // input.positionCS is SV_Position
output.texCoord0 = input.interpolators[0].xy;
output.texCoord1 = input.interpolators[0].zw;

8
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Lit/LitSharePass.hlsl


return output;
}
FragInput UnpackVaryings(PackedVaryings input)
FragInputs UnpackVaryings(PackedVaryings input)
FragInput output;
ZERO_INITIALIZE(FragInput, output);
FragInputs output;
ZERO_INITIALIZE(FragInputs, output);
output.unPositionSS = input.positionCS; // as input we have the vpos
output.unPositionSS = input.positionCS; // input.positionCS is SV_Position
output.positionWS.xyz = input.interpolators[0].xyz;
output.tangentToWorld[0] = input.interpolators[1].xyz;
output.tangentToWorld[1] = input.interpolators[2].xyz;

12
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Lit/LitSurfaceData.hlsl


void ADD_IDX(ComputeLayerTexCoord)(FragInput input, bool isTriplanar, inout LayerTexCoord layerTexCoord)
void ADD_IDX(ComputeLayerTexCoord)(FragInputs input, bool isTriplanar, inout LayerTexCoord layerTexCoord)
{
// TODO: Do we want to manage local or world triplanar/planar
//float3 position = localTriplanar ? TransformWorldToObject(input.positionWS) : input.positionWS;

ADD_IDX(layerTexCoord.details).uvXY = TRANSFORM_TEX(ADD_IDX(layerTexCoord.base).uvXY, ADD_IDX(_DetailMap));
}
void ADD_IDX(ApplyDisplacement)(inout FragInput input, inout LayerTexCoord layerTexCoord)
void ADD_IDX(ApplyDisplacement)(inout FragInputs input, float3 viewDirTS, inout LayerTexCoord layerTexCoord)
// Transform view vector in tangent space
// Hope the compiler can optimize this in case of multiple layer
float3 V = GetWorldSpaceNormalizeViewDir(input.positionWS);
float3 viewDirTS = TransformWorldToTangent(V, input.tangentToWorld);
float height = SAMPLE_LAYER_TEXTURE2D(ADD_IDX(_HeightMap), ADD_ZERO_IDX(sampler_HeightMap), ADD_IDX(layerTexCoord.base)).r * ADD_IDX(_HeightScale) + ADD_IDX(_HeightBias);
float2 offset = ParallaxOffset(viewDirTS, height);

ADD_IDX(layerTexCoord.details).uvZX += offset;
ADD_IDX(layerTexCoord.details).uvXY += offset;
// Only modify tex coord for first layer, this will be use by for builtin data (like lightmap)
// Only modify texcoord for first layer, this will be use by for builtin data (like lightmap)
if (LAYER_INDEX == 0)
{
input.texCoord0 += offset;

}
// Return opacity
float ADD_IDX(GetSurfaceData)(FragInput input, LayerTexCoord layerTexCoord, out SurfaceData surfaceData)
float ADD_IDX(GetSurfaceData)(FragInputs input, LayerTexCoord layerTexCoord, out SurfaceData surfaceData)
{
#ifdef _SMOOTHNESS_TEXTURE_ALBEDO_CHANNEL_A
float alpha = ADD_IDX(_BaseColor).a;

8
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Lit/LitVelocityPass.hlsl


return output;
}
FragInput UnpackVaryings(PackedVaryings input)
FragInputs UnpackVaryings(PackedVaryings input)
FragInput output;
ZERO_INITIALIZE(FragInput, output);
FragInputs output;
ZERO_INITIALIZE(FragInputs, output);
output.unPositionSS = input.positionCS; // as input we have the vpos
output.unPositionSS = input.positionCS; // input.positionCS is SV_Position
output.positionCS = float4(input.interpolators[0].xy, 0.0, input.interpolators[0].z);
output.previousPositionCS = float4(input.interpolators[1].xy, 0.0, input.interpolators[1].z);

6
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Unlit/Unlit.shader


return output;
}
FragInput UnpackVaryings(PackedVaryings input)
FragInputs UnpackVaryings(PackedVaryings input)
FragInput output;
ZERO_INITIALIZE(FragInput, output);
FragInputs output;
ZERO_INITIALIZE(FragInputs, output);
output.unPositionSS = input.positionCS;
output.texCoord0 = input.interpolators[0].xy;

4
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Unlit/UnlitData.hlsl


// Fill SurfaceData/Builtin data function
//-------------------------------------------------------------------------------------
void GetSurfaceAndBuiltinData(FragInput input, out SurfaceData surfaceData, out BuiltinData builtinData)
void GetSurfaceAndBuiltinData(FragInputs input, float3 V, inout PositionInputs posInput, out SurfaceData surfaceData, out BuiltinData builtinData)
{
surfaceData.color = SAMPLE_TEXTURE2D(_ColorMap, sampler_ColorMap, input.texCoord0).rgb * _Color.rgb;
float alpha = SAMPLE_TEXTURE2D(_ColorMap, sampler_ColorMap, input.texCoord0).a * _Color.a;

builtinData.distortion = float2(0.0, 0.0);
builtinData.distortionBlur = 0.0;
builtinData.depthOffset = 0.0;
}

6
Assets/ScriptableRenderLoop/HDRenderLoop/Material/Unlit/UnlitSharePass.hlsl


return output;
}
FragInput UnpackVaryings(PackedVaryings input)
FragInputs UnpackVaryings(PackedVaryings input)
FragInput output;
ZERO_INITIALIZE(FragInput, output);
FragInputs output;
ZERO_INITIALIZE(FragInputs, output);
output.unPositionSS = input.positionCS;
output.texCoord0.xy = input.interpolators[0].xy;

9
Assets/ScriptableRenderLoop/HDRenderLoop/ShaderPass/ShaderPassDebugViewMaterial.hlsl


float4 Frag(PackedVaryings packedInput) : SV_Target
{
FragInput input = UnpackVaryings(packedInput);
FragInputs input = UnpackVaryings(packedInput);
// input.unPositionSS is SV_Position
PositionInputs posInput = GetPositionInput(input.unPositionSS.xy, _ScreenSize.zw);
UpdatePositionInput(input.unPositionSS.z, input.unPositionSS.w, input.positionWS, posInput);
float3 V = GetWorldSpaceNormalizeViewDir(input.positionWS);
GetSurfaceAndBuiltinData(input, surfaceData, builtinData);
GetSurfaceAndBuiltinData(input, V, posInput, surfaceData, builtinData);
BSDFData bsdfData = ConvertSurfaceDataToBSDFData(surfaceData);

9
Assets/ScriptableRenderLoop/HDRenderLoop/ShaderPass/ShaderPassDepthOnly.hlsl


float4 Frag(PackedVaryings packedInput) : SV_Target
{
FragInput input = UnpackVaryings(packedInput);
FragInputs input = UnpackVaryings(packedInput);
// input.unPositionSS is SV_Position
PositionInputs posInput = GetPositionInput(input.unPositionSS.xy, _ScreenSize.zw);
UpdatePositionInput(input.unPositionSS.z, input.unPositionSS.w, input.positionWS, posInput);
float3 V = GetWorldSpaceNormalizeViewDir(input.positionWS);
GetSurfaceAndBuiltinData(input, surfaceData, builtinData);
GetSurfaceAndBuiltinData(input, V, posInput, surfaceData, builtinData);
// TODO: handle cubemap shadow
return float4(0.0, 0.0, 0.0, 0.0);

16
Assets/ScriptableRenderLoop/HDRenderLoop/ShaderPass/ShaderPassForward.hlsl


float4 Frag(PackedVaryings packedInput) : SV_Target
{
FragInput input = UnpackVaryings(packedInput);
float3 V = GetWorldSpaceNormalizeViewDir(input.positionWS);
float3 positionWS = input.positionWS;
FragInputs input = UnpackVaryings(packedInput);
// input.unPositionSS is SV_Position
PositionInputs posInput = GetPositionInput(input.unPositionSS.xy, _ScreenSize.zw);
UpdatePositionInput(input.unPositionSS.z, input.unPositionSS.w, input.positionWS, posInput);
float3 V = GetWorldSpaceNormalizeViewDir(input.positionWS);
GetSurfaceAndBuiltinData(input, surfaceData, builtinData);
GetSurfaceAndBuiltinData(input, V, posInput, surfaceData, builtinData);
Coordinate coord = GetCoordinate(input.unPositionSS.xy, _ScreenSize.zw);
PreLightData preLightData = GetPreLightData(V, positionWS, coord, bsdfData);
PreLightData preLightData = GetPreLightData(V, posInput, bsdfData);
LightLoop(V, positionWS, coord, preLightData, bsdfData, bakeDiffuseLighting, diffuseLighting, specularLighting);
LightLoop(V, posInput, preLightData, bsdfData, bakeDiffuseLighting, diffuseLighting, specularLighting);
return float4(diffuseLighting + specularLighting, builtinData.opacity);
}

9
Assets/ScriptableRenderLoop/HDRenderLoop/ShaderPass/ShaderPassForwardUnlit.hlsl


float4 Frag(PackedVaryings packedInput) : SV_Target
{
FragInput input = UnpackVaryings(packedInput);
FragInputs input = UnpackVaryings(packedInput);
// input.unPositionSS is SV_Position
PositionInputs posInput = GetPositionInput(input.unPositionSS.xy, _ScreenSize.zw);
UpdatePositionInput(input.unPositionSS.z, input.unPositionSS.w, input.positionWS, posInput);
float3 V = GetWorldSpaceNormalizeViewDir(input.positionWS);
GetSurfaceAndBuiltinData(input, surfaceData, builtinData);
GetSurfaceAndBuiltinData(input, V, posInput, surfaceData, builtinData);
// Not lit here (but emissive is allowed)

14
Assets/ScriptableRenderLoop/HDRenderLoop/ShaderPass/ShaderPassGBuffer.hlsl


OUTPUT_GBUFFER_VELOCITY(outVelocityBuffer)
)
{
FragInput input = UnpackVaryings(packedInput);
float3 V = GetWorldSpaceNormalizeViewDir(input.positionWS);
float3 positionWS = input.positionWS;
FragInputs input = UnpackVaryings(packedInput);
// input.unPositionSS is SV_Position
PositionInputs posInput = GetPositionInput(input.unPositionSS.xy, _ScreenSize.zw);
UpdatePositionInput(input.unPositionSS.z, input.unPositionSS.w, input.positionWS, posInput);
float3 V = GetWorldSpaceNormalizeViewDir(input.positionWS);
GetSurfaceAndBuiltinData(input, surfaceData, builtinData);
GetSurfaceAndBuiltinData(input, V, posInput, surfaceData, builtinData);
Coordinate coord = GetCoordinate(input.unPositionSS.xy, _ScreenSize.zw);
PreLightData preLightData = GetPreLightData(V, positionWS, coord, bsdfData);
PreLightData preLightData = GetPreLightData(V, posInput, bsdfData);
float3 bakeDiffuseLighting = GetBakedDiffuseLigthing(surfaceData, builtinData, bsdfData, preLightData);
ENCODE_INTO_GBUFFER(surfaceData, bakeDiffuseLighting, outGBuffer);

9
Assets/ScriptableRenderLoop/HDRenderLoop/ShaderPass/ShaderPassLightTransport.hlsl


float4 Frag(PackedVaryings packedInput) : SV_Target
{
FragInput input = UnpackVaryings(packedInput);
FragInputs input = UnpackVaryings(packedInput);
// input.unPositionSS is SV_Position
PositionInputs posInput = GetPositionInput(input.unPositionSS.xy, _ScreenSize.zw);
// No position and depth in case of light transport
float3 V = float3(0, 0, 1); // No vector view in case of light transport
GetSurfaceAndBuiltinData(input, surfaceData, builtinData);
GetSurfaceAndBuiltinData(input, V, posInput, surfaceData, builtinData);
BSDFData bsdfData = ConvertSurfaceDataToBSDFData(surfaceData);
LighTransportData lightTransportData = GetLightTransportData(surfaceData, builtinData, bsdfData);

9
Assets/ScriptableRenderLoop/HDRenderLoop/ShaderPass/ShaderPassVelocity.hlsl


float4 Frag(PackedVaryings packedInput) : SV_Target
{
FragInput input = UnpackVaryings(packedInput);
FragInputs input = UnpackVaryings(packedInput);
// input.unPositionSS is SV_Position
PositionInputs posInput = GetPositionInput(input.unPositionSS.xy, _ScreenSize.zw);
UpdatePositionInput(input.unPositionSS.z, input.unPositionSS.w, input.positionWS, posInput);
float3 V = GetWorldSpaceNormalizeViewDir(input.positionWS);
GetSurfaceAndBuiltinData(input, surfaceData, builtinData);
GetSurfaceAndBuiltinData(input, V, posInput, surfaceData, builtinData);
float4 outBuffer;
EncodeVelocity(builtinData.velocity, outBuffer);

11
Assets/ScriptableRenderLoop/HDRenderLoop/Sky/Resources/SkyProcedural.shader


float3 rotDirY = float3(sinPhi, 0, cosPhi);
dir = float3(dot(rotDirX, dir), dir.y, dot(rotDirY, dir));
Coordinate coord = GetCoordinate(input.positionCS.xy, _ScreenSize.zw);
// input.positionCS is SV_Position
PositionInputs posInput = GetPositionInput(input.positionCS.xy, _ScreenSize.zw);
// If the sky box is too far away (depth set to 0), the resulting look is too foggy.
const float skyDepth = 0.01;

float rawDepth = max(skyDepth, LOAD_TEXTURE2D(_CameraDepthTexture, coord.unPositionSS).r);
float rawDepth = max(skyDepth, LOAD_TEXTURE2D(_CameraDepthTexture, posInput.unPositionSS).r);
float skyTexWeight = (rawDepth > skyDepth) ? 0.0 : 1.0;
#else
float rawDepth = skyDepth;

float3 positionWS = UnprojectToWorld(rawDepth, coord.positionSS, _InvViewProjMatrix);
UpdatePositionInput(rawDepth, _InvViewProjMatrix, GetWorldToViewMatrix(), posInput);
VolundTransferScatter(positionWS, c1, c2, c3);
VolundTransferScatter(posInput.positionWS, c1, c2, c3);
float4 coord1 = float4(c1.rgb + c3.rgb, max(0.f, 1.f - c1.a - c3.a));
float3 coord2 = c2.rgb;

92
Assets/ScriptableRenderLoop/ShaderLibrary/Common.hlsl


// World position reconstruction / transformation
// ----------------------------------------------------------------------------
struct Coordinate
// Z buffer to linear 0..1 depth
float Linear01Depth(float depth, float4 zBufferParam)
{
return 1.0 / (zBufferParam.x * depth + zBufferParam.y);
}
// Z buffer to linear depth
float LinearEyeDepth(float depth, float4 zBufferParam)
{
return 1.0 / (zBufferParam.z * depth + zBufferParam.w);
}
struct PositionInputs
// Normalize coordinates
float2 positionSS;
// Unormalize coordinates
uint2 unPositionSS;
// Normalize screen position (offset by 0.5)
float2 positionSS;
// Unormalize screen position (offset by 0.5)
uint2 unPositionSS;
float depthRaw; // raw depth from depth buffer
float depthVS;
float4 positionCS;
float3 positionWS;
// else it is current unormalized screen coordinate like return by VPOS
Coordinate GetCoordinate(float2 unPositionSS, float2 invScreenSize)
// else it is current unormalized screen coordinate like return by SV_Position
PositionInputs GetPositionInput(float2 unPositionSS, float2 invScreenSize)
Coordinate coord;
coord.positionSS = unPositionSS;
PositionInputs posInput;
ZERO_INITIALIZE(PositionInputs, posInput);
posInput.positionSS = unPositionSS;
coord.positionSS.xy += float2(0.5, 0.5);
posInput.positionSS.xy += float2(0.5, 0.5);
coord.positionSS *= invScreenSize;
posInput.positionSS *= invScreenSize;
coord.unPositionSS = uint2(unPositionSS);
posInput.unPositionSS = uint2(unPositionSS);
return coord;
return posInput;
// screenPos is screen coordinate in [0..1] (return by Coordinate.positionSS)
// depth must be the depth from the raw depth buffer. This allow to handle all kind of depth automatically with the inverse view projection matrix.
// For information. In Unity Depth is always in range 0..1 (even on OpenGL) but can be reversed.
float3 UnprojectToWorld(float depth, float2 screenPos, float4x4 invViewProjectionMatrix)
// From forward
// depthRaw and depthVS come directly form .zw of SV_Position
void UpdatePositionInput(float depthRaw, float depthVS, float3 positionWS, inout PositionInputs posInput)
float4 positionCS = float4(screenPos.xy * 2.0 - 1.0, depth, 1.0);
float4 hpositionWS = mul(invViewProjectionMatrix, positionCS);
posInput.depthRaw = depthRaw;
posInput.depthVS = depthVS;
return hpositionWS.xyz / hpositionWS.w;
// TODO: We revert for DX but maybe it is not the case of OGL ? Test the define ?
posInput.positionCS = float4((posInput.positionSS - 0.5) * float2(2.0, -2.0), depthRaw, 1.0) * depthVS; // depthVS is SV_Position.w
posInput.positionWS = positionWS;
// Z buffer to linear 0..1 depth
float Linear01Depth(float depth, float4 zBufferParam)
// From deferred or compute shader
// depth must be the depth from the raw depth buffer. This allow to handle all kind of depth automatically with the inverse view projection matrix.
// For information. In Unity Depth is always in range 0..1 (even on OpenGL) but can be reversed.
void UpdatePositionInput(float depth, float4x4 invViewProjectionMatrix, float4x4 worlToViewProjectionMatrix, inout PositionInputs posInput)
return 1.0 / (zBufferParam.x * depth + zBufferParam.y);
posInput.depthRaw = depth;
// TODO: Do we need to flip Y axis here on OGL ?
posInput.positionCS = float4(posInput.positionSS.xy * 2.0 - 1.0, depth, 1.0);
float4 hpositionWS = mul(invViewProjectionMatrix, posInput.positionCS);
posInput.positionWS = hpositionWS.xyz / hpositionWS.w;
// The compiler should optimize this (less expensive than reconstruct depth VS from depth buffer)
posInput.depthVS = mul(worlToViewProjectionMatrix, float4(posInput.positionWS, 1.0)).z;
posInput.positionCS *= posInput.depthVS;
// Z buffer to linear depth
float LinearEyeDepth(float depth, float4 zBufferParam)
// depthOffsetVS is always in the direction of the view vector (V)
void ApplyDepthOffsetVS(float V, float depthOffsetVS, inout PositionInputs posInput)
return 1.0 / (zBufferParam.z * depth + zBufferParam.w);
posInput.depthVS += depthOffsetVS;
// TODO: it is an approx, need a correct value where we use projection matrix to reproject the depth from VS
posInput.depthRaw = posInput.positionCS.z / posInput.depthVS;
posInput.positionCS = float4((posInput.positionSS - 0.5) * float2(2.0, -2.0), posInput.depthRaw, 1.0) * posInput.depthVS;
// Just add the offset along the view vector is sufficiant for world position
posInput.positionWS += V * depthOffsetVS;
}
//-----------------------------------------------------------------------------

正在加载...
取消
保存