浏览代码

Merge remote-tracking branch 'origin/master' into HDRP-GraphicTests

/main
Remy 7 年前
当前提交
39c80121
共有 150 个文件被更改,包括 1163 次插入1046 次删除
  1. 14
      ImageTemplates/LightweightPipeline/Scenes/023_Lighting_Mixed.unity.png.meta
  2. 14
      ImageTemplates/LightweightPipeline/Scenes/027_PostProcessing.unity.png.meta
  3. 14
      ImageTemplates/LightweightPipeline/Scenes/036_Lighting_Scene_DirectionalBakedDirectional.unity.png.meta
  4. 150
      SampleScenes/HDTest/HDRenderLoopTest.unity
  5. 122
      ScriptableRenderPipeline/Core/ShaderLibrary/Common.hlsl
  6. 12
      ScriptableRenderPipeline/Core/ShaderLibrary/CommonLighting.hlsl
  7. 5
      ScriptableRenderPipeline/Core/ShaderLibrary/CommonMaterial.hlsl
  8. 4
      ScriptableRenderPipeline/Core/ShaderLibrary/ImageBasedLighting.hlsl
  9. 18
      ScriptableRenderPipeline/Core/ShaderLibrary/Macros.hlsl
  10. 2
      ScriptableRenderPipeline/Core/ShaderLibrary/NormalSurfaceGradient.hlsl
  11. 6
      ScriptableRenderPipeline/Core/ShaderLibrary/Packing.hlsl
  12. 57
      ScriptableRenderPipeline/Core/ShaderLibrary/Sampling.hlsl
  13. 98
      ScriptableRenderPipeline/Core/ShaderLibrary/Shadow/Shadow.hlsl
  14. 2
      ScriptableRenderPipeline/Core/ShaderLibrary/VolumeRendering.hlsl
  15. 2
      ScriptableRenderPipeline/Core/Shadow/Resources/DebugDisplayShadowMap.shader
  16. 1
      ScriptableRenderPipeline/Core/Shadow/Shadow.cs
  17. 12
      ScriptableRenderPipeline/HDRenderPipeline/Camera/HDAdditionalCameraData.cs
  18. 2
      ScriptableRenderPipeline/HDRenderPipeline/Camera/HDCamera.cs
  19. 4
      ScriptableRenderPipeline/HDRenderPipeline/Debug/DebugDisplayLatlong.shader
  20. 2
      ScriptableRenderPipeline/HDRenderPipeline/Debug/DebugFullScreen.shader
  21. 8
      ScriptableRenderPipeline/HDRenderPipeline/Debug/DebugViewMaterialGBuffer.shader
  22. 2
      ScriptableRenderPipeline/HDRenderPipeline/Debug/DebugViewTiles.shader
  23. 135
      ScriptableRenderPipeline/HDRenderPipeline/Editor/HDAssetFactory.cs
  24. 1
      ScriptableRenderPipeline/HDRenderPipeline/Editor/HDRenderPipelineInspector.Styles.cs
  25. 3
      ScriptableRenderPipeline/HDRenderPipeline/Editor/HDRenderPipelineInspector.cs
  26. 431
      ScriptableRenderPipeline/HDRenderPipeline/HDRenderPipeline.cs
  27. 4
      ScriptableRenderPipeline/HDRenderPipeline/HDRenderPipelineAsset.cs
  28. 2
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/Deferred.shader
  29. 15
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/LightDefinition.cs
  30. 20
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/LightDefinition.cs.hlsl
  31. 14
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/Lighting.hlsl
  32. 4
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/ClusteredUtils.hlsl
  33. 2
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/Deferred.compute
  34. 2
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/DeferredDirectionalShadow.compute
  35. 9
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/ShadowContext.hlsl
  36. 4
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/ShadowContext.hlsl.meta
  37. 4
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/SortingComputeUtils.hlsl
  38. 2
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/TilePass.cs
  39. 6
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/TilePassLoop.hlsl
  40. 6
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/lightlistbuild-bigtile.compute
  41. 6
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/lightlistbuild-clustered.compute
  42. 2
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/lightlistbuild.compute
  43. 2
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/materialflags.compute
  44. 4
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/scrbound.compute
  45. 17
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/Shadow.hlsl
  46. 6
      ScriptableRenderPipeline/HDRenderPipeline/Material/LayeredLit/LayeredLit.shader
  47. 2
      ScriptableRenderPipeline/HDRenderPipeline/Material/LayeredLit/LayeredLitDataDisplacement.hlsl
  48. 10
      ScriptableRenderPipeline/HDRenderPipeline/Material/LayeredLit/LayeredLitTessellation.shader
  49. 232
      ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Lit.hlsl
  50. 6
      ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Lit.shader
  51. 2
      ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/LitData.hlsl
  52. 2
      ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/LitDataDisplacement.hlsl
  53. 10
      ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/LitTessellation.shader
  54. 2
      ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Resources/CombineLighting.shader
  55. 2
      ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Resources/CopyStencilBuffer.shader
  56. 4
      ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Resources/PreIntegratedFGD.shader
  57. 99
      ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Resources/SubsurfaceScattering.compute
  58. 2
      ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Resources/SubsurfaceScattering.shader
  59. 16
      ScriptableRenderPipeline/HDRenderPipeline/Material/Material.hlsl
  60. 15
      ScriptableRenderPipeline/HDRenderPipeline/Material/Unlit/Editor/BaseUnlitUI.cs
  61. 2
      ScriptableRenderPipeline/HDRenderPipeline/Material/Unlit/Unlit.shader
  62. 2
      ScriptableRenderPipeline/HDRenderPipeline/RenderPipelineResources/ApplyDistorsion.compute
  63. 2
      ScriptableRenderPipeline/HDRenderPipeline/RenderPipelineResources/CameraMotionVectors.shader
  64. 2
      ScriptableRenderPipeline/HDRenderPipeline/RenderPipelineResources/DepthDownsample.compute
  65. 14
      ScriptableRenderPipeline/HDRenderPipeline/RenderPipelineResources/RenderPipelineResources.cs
  66. 2
      ScriptableRenderPipeline/HDRenderPipeline/SceneSettings/Resources/DrawSssProfile.shader
  67. 4
      ScriptableRenderPipeline/HDRenderPipeline/SceneSettings/Resources/DrawTransmittanceGraph.shader
  68. 2
      ScriptableRenderPipeline/HDRenderPipeline/ShaderPass/ShaderPassDepthOnly.hlsl
  69. 2
      ScriptableRenderPipeline/HDRenderPipeline/ShaderPass/ShaderPassForward.hlsl
  70. 2
      ScriptableRenderPipeline/HDRenderPipeline/ShaderPass/ShaderPassGBuffer.hlsl
  71. 2
      ScriptableRenderPipeline/HDRenderPipeline/ShaderPass/ShaderPassLightTransport.hlsl
  72. 8
      ScriptableRenderPipeline/HDRenderPipeline/Sky/AtmosphericScattering/AtmosphericScattering.hlsl
  73. 16
      ScriptableRenderPipeline/HDRenderPipeline/Sky/BlacksmithlSky/Resources/SkyBlacksmith.shader
  74. 2
      ScriptableRenderPipeline/HDRenderPipeline/Sky/BlitCubemap.shader
  75. 4
      ScriptableRenderPipeline/HDRenderPipeline/Sky/BuildProbabilityTables.compute
  76. 4
      ScriptableRenderPipeline/HDRenderPipeline/Sky/ComputeGgxIblSampleData.compute
  77. 4
      ScriptableRenderPipeline/HDRenderPipeline/Sky/GGXConvolve.shader
  78. 6
      ScriptableRenderPipeline/HDRenderPipeline/Sky/HDRISky/Resources/SkyHDRI.shader
  79. 2
      ScriptableRenderPipeline/HDRenderPipeline/Sky/OpaqueAtmosphericScattering.shader
  80. 6
      ScriptableRenderPipeline/HDRenderPipeline/Sky/ProceduralSky/Resources/ProceduralSky.shader
  81. 7
      ScriptableRenderPipeline/HDRenderPipeline/Sky/SkyManager.cs
  82. 64
      ScriptableRenderPipeline/LightweightPipeline/LightweightPipeline.cs
  83. 10
      ScriptableRenderPipeline/LightweightPipeline/Resources/LightweightPipelineAsset.cs
  84. 8
      ScriptableRenderPipeline/LightweightPipeline/Resources/LightweightPipelineResource.asset
  85. 13
      ScriptableRenderPipeline/LightweightPipeline/Shaders/LightweightInput.cginc
  86. 151
      ScriptableRenderPipeline/LightweightPipeline/Shaders/LightweightPassLit.cginc
  87. 25
      ScriptableRenderPipeline/LightweightPipeline/Shaders/LightweightStandard.shader
  88. 54
      ScriptableRenderPipeline/LightweightPipeline/Shaders/LightweightStandardSimpleLighting.shader
  89. 2
      ScriptableRenderPipeline/LightweightPipeline/Shaders/LightweightStandardTerrain.shader
  90. 2
      ScriptableRenderPipeline/LightweightPipeline/Shaders/LightweightUnlit.shader
  91. 4
      ScriptableRenderPipeline/master-package.json
  92. 2
      TestbedPipelines/Fptl/FptlLighting.cs
  93. 31
      TestbedPipelines/Fptl/LightingTemplate.hlsl
  94. 7
      TestbedPipelines/Fptl/ShadowContext.hlsl
  95. 13
      TestbedPipelines/Fptl/ShadowContext.hlsl.meta
  96. 2
      TestbedPipelines/OnTileDeferredPipeline/OnTileDeferredRenderPipeline.cs
  97. 30
      TestbedPipelines/OnTileDeferredPipeline/Shaders/LightingTemplate.hlsl
  98. 30
      TestbedPipelines/OnTileDeferredPipeline/Shaders/UnityStandardForwardMobile.cginc
  99. 2
      ScriptableRenderPipeline/Core/Editor/CoreShaderIncludePaths.cs.meta
  100. 8
      SampleScenes/Common/Materials.meta

14
ImageTemplates/LightweightPipeline/Scenes/023_Lighting_Mixed.unity.png.meta


fileFormatVersion: 2
guid: fad56126d031e40ffa9d93733bead554
timeCreated: 1509374830
licenseType: Pro
serializedVersion: 4
serializedVersion: 5
mipmaps:
mipMapMode: 0
enableMipMap: 1

spriteTessellationDetail: -1
textureType: 0
textureShape: 1
singleChannelComponent: 0
- buildTarget: DefaultTexturePlatform
- serializedVersion: 2
buildTarget: DefaultTexturePlatform
maxTextureSize: 2048
resizeAlgorithm: 0
textureFormat: -1

sprites: []
outline: []
physicsShape: []
bones: []
spriteID:
vertices: []
indices:
edges: []
weights: []
spritePackingTag:
userData:
assetBundleName:

14
ImageTemplates/LightweightPipeline/Scenes/027_PostProcessing.unity.png.meta


fileFormatVersion: 2
guid: c40bd57fb760b4839a1ef80428356062
timeCreated: 1509377483
licenseType: Pro
serializedVersion: 4
serializedVersion: 5
mipmaps:
mipMapMode: 0
enableMipMap: 1

spriteTessellationDetail: -1
textureType: 0
textureShape: 1
singleChannelComponent: 0
- buildTarget: DefaultTexturePlatform
- serializedVersion: 2
buildTarget: DefaultTexturePlatform
maxTextureSize: 2048
resizeAlgorithm: 0
textureFormat: -1

sprites: []
outline: []
physicsShape: []
bones: []
spriteID:
vertices: []
indices:
edges: []
weights: []
spritePackingTag:
userData:
assetBundleName:

14
ImageTemplates/LightweightPipeline/Scenes/036_Lighting_Scene_DirectionalBakedDirectional.unity.png.meta


fileFormatVersion: 2
guid: f1b41e73e6cf549d4a74497132c3aa3a
timeCreated: 1509466261
licenseType: Pro
serializedVersion: 4
serializedVersion: 5
mipmaps:
mipMapMode: 0
enableMipMap: 1

spriteTessellationDetail: -1
textureType: 0
textureShape: 1
singleChannelComponent: 0
- buildTarget: DefaultTexturePlatform
- serializedVersion: 2
buildTarget: DefaultTexturePlatform
maxTextureSize: 2048
resizeAlgorithm: 0
textureFormat: -1

sprites: []
outline: []
physicsShape: []
bones: []
spriteID:
vertices: []
indices:
edges: []
weights: []
spritePackingTag:
userData:
assetBundleName:

150
SampleScenes/HDTest/HDRenderLoopTest.unity
文件差异内容过多而无法显示
查看文件

122
ScriptableRenderPipeline/Core/ShaderLibrary/Common.hlsl


// unsigned integer bit field extract implementation
uint BitFieldExtract(uint data, uint numBits, uint offset)
{
uint mask = 0xFFFFFFFFu >> (32u - numBits);
uint mask = UINT_MAX >> (32u - numBits);
return (data >> offset) & mask;
}
#endif // INTRINSIC_BITFIELD_EXTRACT

#endif // INTRINSIC_CUBEMAP_FACE_ID
// ----------------------------------------------------------------------------
// Common math definition and fastmath function
// Common math functions
#define PI 3.14159265359
#define TWO_PI 6.28318530718
#define FOUR_PI 12.56637061436
#define INV_PI 0.31830988618
#define INV_TWO_PI 0.15915494309
#define INV_FOUR_PI 0.07957747155
#define HALF_PI 1.57079632679
#define INV_HALF_PI 0.636619772367
#define INFINITY asfloat(0x7F800000)
#define LOG2_E 1.44269504089
#define FLT_EPSILON 1.192092896e-07 // Smallest positive number, such that 1.0 + FLT_EPSILON != 1.0
#define FLT_MIN 1.175494351e-38 // Minimum representable positive floating-point number
#define FLT_MAX 3.402823466e+38 // Maximum representable floating-point number
#define HFLT_MIN 0.00006103515625 // 2^14 it is the same for 10, 11 and 16bit float. ref: https://www.khronos.org/opengl/wiki/Small_Float_Formats
float DegToRad(float deg)
{
return deg * (PI / 180.0);

// Using pow often result to a warning like this
// "pow(f, e) will not work for negative f, use abs(f) or conditionally handle negative values if you expect them"
// PositivePow remove this warning when you know the value is positive and avoid inf/NAN.
TEMPLATE_2_FLT(PositivePow, base, power, return pow(max(abs(base), FLT_EPSILON), power))
TEMPLATE_2_FLT(PositivePow, base, power, return pow(max(abs(base), FLT_EPS), power))
// Computes (FastSign(s) * x) using 2x VALU.
// See the comment about FastSign() below.
float FastMulBySignOf(float s, float x, bool ignoreNegZero = true)
{
if (ignoreNegZero)
{
return (s >= 0) ? x : -x;
}
else
{
uint negZero = 0x80000000u;
uint signBit = negZero & asuint(s);
return asfloat(signBit ^ asuint(x));
}
}
// Ref: https://twitter.com/SebAaltonen/status/878250919879639040
// 2 mads (mad_sat and mad), faster than regular sign
float FastSign(float x)
// Returns -1 for negative numbers and 1 for positive numbers.
// 0 can be handled in 2 different ways.
// The IEEE floating point standard defines 0 as signed: +0 and -0.
// However, mathematics typically treats 0 as unsigned.
// Therefore, we treat -0 as +0 by default: FastSign(+0) = FastSign(-0) = 1.
// If (ignoreNegZero = false), FastSign(-0, false) = -1.
// Note that the sign() function in HLSL implements signum, which returns 0 for 0.
float FastSign(float s, bool ignoreNegZero = true)
return saturate(x * FLT_MAX) * 2.0 - 1.0;
return FastMulBySignOf(s, 1.0, ignoreNegZero);
}
// Orthonormalizes the tangent frame using the Gram-Schmidt process.

// Z buffer to linear depth.
// Correctly handles oblique view frustums. Only valid for projection matrices!
// Ref: An Efficient Depth Linearization Method for Oblique View Frustums, Eq. 6.
float LinearEyeDepth(float2 positionSS, float depthRaw, float4 invProjParam)
float LinearEyeDepth(float2 positionSS, float deviceDepth, float4 invProjParam)
float4 positionCS = float4(positionSS * 2.0 - 1.0, depthRaw, 1.0);
float4 positionCS = float4(positionSS * 2.0 - 1.0, deviceDepth, 1.0);
// Z buffer to linear depth.
// Correctly handles oblique view frustums.
// Typically, this is the cheapest variant, provided you've already computed 'positionWS'.
float LinearEyeDepth(float3 positionWS, float4x4 viewProjMatrix)
{
return mul(viewProjMatrix, float4(positionWS, 1.0)).w;
}
// ----------------------------------------------------------------------------
// Space transformations
// ----------------------------------------------------------------------------

return positionSS;
}
float4 ComputeClipSpacePosition(float2 positionSS, float depthRaw)
float4 ComputeClipSpacePosition(float2 positionSS, float deviceDepth)
return float4(positionSS * 2.0 - 1.0, depthRaw, 1.0);
return float4(positionSS * 2.0 - 1.0, deviceDepth, 1.0);
float3 ComputeViewSpacePosition(float2 positionSS, float depthRaw, float4x4 invProjMatrix)
float3 ComputeViewSpacePosition(float2 positionSS, float deviceDepth, float4x4 invProjMatrix)
float4 positionCS = ComputeClipSpacePosition(positionSS, depthRaw);
float4 positionCS = ComputeClipSpacePosition(positionSS, deviceDepth);
float4 positionVS = mul(invProjMatrix, positionCS);
// The view space uses a right-handed coordinate system.
positionVS.z = -positionVS.z;

float3 ComputeWorldSpacePosition(float2 positionSS, float depthRaw, float4x4 invViewProjMatrix)
float3 ComputeWorldSpacePosition(float2 positionSS, float deviceDepth, float4x4 invViewProjMatrix)
float4 positionCS = ComputeClipSpacePosition(positionSS, depthRaw);
float4 positionCS = ComputeClipSpacePosition(positionSS, deviceDepth);
float4 hpositionWS = mul(invViewProjMatrix, positionCS);
return hpositionWS.xyz / hpositionWS.w;
}

struct PositionInputs
{
// Normalize screen position (offset by 0.5)
float2 positionSS;
// Unormalize screen position (offset by 0.5)
uint2 unPositionSS;
uint2 unTileCoord;
float depthRaw; // raw depth from depth buffer
float depthVS;
float3 positionWS;
// TODO: improve the naming convention.
// Some options:
// positionNDC, positionSS, tileCoordSS
// pixelCoordUV, pixelCoordSS, tileCoordSS
// pixelCoordSS, pixelIndexSS, tileIndexSS
float3 positionWS; // World space position (could be camera-relative)
float2 positionSS; // Screen space pixel position : [0, 1) (with the half-pixel offset)
uint2 unPositionSS; // Screen space pixel index : [0, NumPixels)
uint2 unTileCoord; // Screen space tile index : [0, NumTiles)
float deviceDepth; // Depth from the depth buffer : [0, 1]
float linearDepth; // View space Z coordinate : [Near, Far]
};
// This function is use to provide an easy way to sample into a screen texture, either from a pixel or a compute shaders.

}
// From forward
// depthRaw and depthVS come directly form .zw of SV_Position
void UpdatePositionInput(float depthRaw, float depthVS, float3 positionWS, inout PositionInputs posInput)
// deviceDepth and linearDepth come directly from .zw of SV_Position
void UpdatePositionInput(float deviceDepth, float linearDepth, float3 positionWS, inout PositionInputs posInput)
posInput.depthRaw = depthRaw;
posInput.depthVS = depthVS;
posInput.positionWS = positionWS;
posInput.deviceDepth = deviceDepth;
posInput.linearDepth = linearDepth;
posInput.positionWS = positionWS;
void UpdatePositionInput(float depthRaw, float4x4 invViewProjMatrix, float4x4 viewProjMatrix, inout PositionInputs posInput)
void UpdatePositionInput(float deviceDepth, float4x4 invViewProjMatrix, float4x4 viewProjMatrix, inout PositionInputs posInput)
posInput.depthRaw = depthRaw;
posInput.positionWS = ComputeWorldSpacePosition(posInput.positionSS, depthRaw, invViewProjMatrix);
posInput.deviceDepth = deviceDepth;
posInput.positionWS = ComputeWorldSpacePosition(posInput.positionSS, deviceDepth, invViewProjMatrix);
posInput.depthVS = mul(viewProjMatrix, float4(posInput.positionWS, 1.0)).w;
posInput.linearDepth = mul(viewProjMatrix, float4(posInput.positionWS, 1.0)).w;
}
// The view direction 'V' points towards the camera.

posInput.positionWS += depthOffsetVS * (-V);
float4 positionCS = mul(viewProjMatrix, float4(posInput.positionWS, 1.0));
posInput.depthVS = positionCS.w;
posInput.depthRaw = positionCS.z / positionCS.w;
float4 positionCS = mul(viewProjMatrix, float4(posInput.positionWS, 1.0));
posInput.linearDepth = positionCS.w;
posInput.deviceDepth = positionCS.z / positionCS.w;
}
// ----------------------------------------------------------------------------

12
ScriptableRenderPipeline/Core/ShaderLibrary/CommonLighting.hlsl


// These clamping function to max of floating point 16 bit are use to prevent INF in code in case of extreme value
float ClampToFloat16Max(float value)
{
return min(value, 65504.0);
return min(value, HALF_MAX);
return min(value, 65504.0);
return min(value, HALF_MAX);
return min(value, 65504.0);
return min(value, HALF_MAX);
return min(value, 65504.0);
return min(value, HALF_MAX);
}
// Ligthing convention

NdotV = dot(N, V);
N = (NdotV >= 0) ? N : (N - 2 * NdotV * V);
N = (NdotV >= 0.0) ? N : (N - 2.0 * NdotV * V);
NdotV = abs(NdotV);
return N;

float x = localZ.x;
float y = localZ.y;
float z = localZ.z;
float sz = z >= 0 ? 1 : -1;
float sz = FastSign(z);
float a = 1 / (sz + z);
float ya = y * a;
float b = x * ya;

5
ScriptableRenderPipeline/Core/ShaderLibrary/CommonMaterial.hlsl


// It can be accomplished by reading the stencil buffer.
// A faster solution (which avoids an extra texture fetch) is to simply make sure that
// all pixels which belong to an SSS material are not black (those that don't always are).
// We choose the blue color channel since it's perceptually the least noticeable.
subsurfaceLighting.r = max(subsurfaceLighting.r, HFLT_MIN);
subsurfaceLighting.b = max(subsurfaceLighting.b, HALF_MIN);
return subsurfaceLighting;
}

return subsurfaceLighting.r > 0;
return subsurfaceLighting.b > 0;
}
// MACRO from Legacy Untiy

4
ScriptableRenderPipeline/Core/ShaderLibrary/ImageBasedLighting.hlsl


float m = PerceptualRoughnessToRoughness(perceptualRoughness);
// Remap to spec power. See eq. 21 in --> https://dl.dropboxusercontent.com/u/55891920/papers/mm_brdf.pdf
float n = (2.0 / max(FLT_EPSILON, m * m)) - 2.0;
float n = (2.0 / max(FLT_EPS, m * m)) - 2.0;
n /= (4.0 * max(NdotR, FLT_EPSILON));
n /= (4.0 * max(NdotR, FLT_EPS));
// remap back to square root of real roughness (0.25 include both the sqrt root of the conversion and sqrt for going from roughness to perceptualRoughness)
perceptualRoughness = pow(2.0 / (n + 2.0), 0.25);

18
ScriptableRenderPipeline/Core/ShaderLibrary/Macros.hlsl


#define SAMPLE_TEXTURECUBE_ARRAY_LOD_ABSTRACT(textureName, samplerName, coord3, index, lod) SAMPLE_TEXTURECUBE_ARRAY_LOD(textureName, samplerName, coord3, index, lod)
#endif
#define PI 3.14159265358979323846
#define TWO_PI 6.28318530717958647693
#define FOUR_PI 12.5663706143591729538
#define INV_PI 0.31830988618379067154
#define INV_TWO_PI 0.15915494309189533577
#define INV_FOUR_PI 0.07957747154594766788
#define HALF_PI 1.57079632679489661923
#define INV_HALF_PI 0.63661977236758134308
#define LOG2_E 1.44269504088896340736
#define INFINITY asfloat(0x7F800000)
#define FLT_EPS 1.192092896e-07 // Smallest positive number, such that 1.0 + FLT_EPS != 1.0
#define FLT_MIN 1.175494351e-38 // Minimum representable positive floating-point number
#define FLT_MAX 3.402823466e+38 // Maximum representable floating-point number
#define HALF_MIN 6.103515625e-5 // 2^-14, the same value for 10, 11 and 16-bit: https://www.khronos.org/opengl/wiki/Small_Float_Formats
#define HALF_MAX 65504.0
#define UINT_MAX 0xFFFFFFFFu
#define TEMPLATE_1_FLT(FunctionName, Parameter1, FunctionBody) \
float FunctionName(float Parameter1) { FunctionBody; } \
float2 FunctionName(float2 Parameter1) { FunctionBody; } \

2
ScriptableRenderPipeline/Core/ShaderLibrary/NormalSurfaceGradient.hlsl


float3 SurfaceGradientFromPerturbedNormal(float3 nrmVertexNormal, float3 v)
{
float3 n = nrmVertexNormal;
float s = 1.0 / max(FLT_EPSILON, abs(dot(n, v)));
float s = 1.0 / max(FLT_EPS, abs(dot(n, v)));
return s * (dot(n, v) * n - v);
}

6
ScriptableRenderPipeline/Core/ShaderLibrary/Packing.hlsl


// Packs an integer stored using at most 'numBits' into a [0..1] float.
float PackInt(uint i, uint numBits)
{
uint maxInt = 0xFFFFFFFFu >> (32u - numBits);
uint maxInt = UINT_MAX >> (32u - numBits);
return saturate(i * rcp(maxInt));
}

uint maxInt = 0xFFFFFFFFu >> (32u - numBits);
uint maxInt = UINT_MAX >> (32u - numBits);
return (uint)(f * maxInt + 0.5); // Round instead of truncating
}

float UnpackUIntToFloat(uint src, uint numBits, uint offset)
{
uint maxInt = 0xFFFFFFFFu >> (32u - numBits);
uint maxInt = UINT_MAX >> (32u - numBits);
return float(BitFieldExtract(src, numBits, offset)) * rcp(maxInt);
}

57
ScriptableRenderPipeline/Core/ShaderLibrary/Sampling.hlsl


return TransformGLtoDX(SphericalToCartesian(phi, cosTheta));
}
// Convert a texel position into normalized position [-1..1]x[-1..1]
float2 CubemapTexelToNVC(uint2 unPositionTXS, uint cubemapSize)
{
return 2.0 * float2(unPositionTXS) / float(max(cubemapSize - 1, 1)) - 1.0;
}
// Map cubemap face to world vector basis
static const float3 CUBEMAP_FACE_BASIS_MAPPING[6][3] =
{
//XPOS face
{
float3(0.0, 0.0, -1.0),
float3(0.0, -1.0, 0.0),
float3(1.0, 0.0, 0.0)
},
//XNEG face
{
float3(0.0, 0.0, 1.0),
float3(0.0, -1.0, 0.0),
float3(-1.0, 0.0, 0.0)
},
//YPOS face
{
float3(1.0, 0.0, 0.0),
float3(0.0, 0.0, 1.0),
float3(0.0, 1.0, 0.0)
},
//YNEG face
{
float3(1.0, 0.0, 0.0),
float3(0.0, 0.0, -1.0),
float3(0.0, -1.0, 0.0)
},
//ZPOS face
{
float3(1.0, 0.0, 0.0),
float3(0.0, -1.0, 0.0),
float3(0.0, 0.0, 1.0)
},
//ZNEG face
{
float3(-1.0, 0.0, 0.0),
float3(0.0, -1.0, 0.0),
float3(0.0, 0.0, -1.0)
}
};
// Convert a normalized cubemap face position into a direction
float3 CubemapTexelToDirection(float2 positionNVC, uint faceId)
{
float3 dir = CUBEMAP_FACE_BASIS_MAPPING[faceId][0] * positionNVC.x
+ CUBEMAP_FACE_BASIS_MAPPING[faceId][1] * positionNVC.y
+ CUBEMAP_FACE_BASIS_MAPPING[faceId][2];
return normalize(dir);
}
//-----------------------------------------------------------------------------
// Sampling function
// Reference : http://www.cs.virginia.edu/~jdl/bib/globillum/mis/shirley96.pdf + PBRT

98
ScriptableRenderPipeline/Core/ShaderLibrary/Shadow/Shadow.hlsl


#ifndef SHADOW_HLSL
#define SHADOW_HLSL
// First ShadowContext.hlsl must declare the specific ShadowContext struct and the loader that goes along with it.
// First ShadowContext.hlsl provides a macro SHADOWCONTEXT_DECLARE that must be used in order to define the specific ShadowContext struct and accompanying loader.
// Second there are two headers for shadow algorithms, whose signatures must match any of the Get...Attenuation function prototypes.
// The first header contains engine defaults, whereas the second header is empty by default. All project specific custom shadow algorithms should go in there or leave empty.
//
// Last there's a dispatcher include. By default the Get...Attenuation functions are rerouted to their default implementations. This can be overridden for each
// shadow type in the dispatcher source. For each overridden shadow type a specific define must be defined to prevent falling back to the default functions.
//
/* Required defines: (define these to the desired numbers - must be in sync with loading and resource setup from C#)
#define SHADOWCONTEXT_MAX_TEX2DARRAY 0
#define SHADOWCONTEXT_MAX_TEXCUBEARRAY 0
#define SHADOWCONTEXT_MAX_SAMPLER 0
#define SHADOWCONTEXT_MAX_COMPSAMPLER 0
*/
/* Default values for optional defines:
#define SHADOW_SUPPORTS_DYNAMIC_INDEXING 0 // Dynamic indexing only works on >= sm 5.1
#define SHADOW_OPTIMIZE_REGISTER_USAGE 0 // Redefine this as 1 in your ShadowContext.hlsl to optimize for register usage over instruction count
// #define SHADOW_DISPATCH_USE_CUSTOM_PUNCTUAL // Enable custom implementations of GetPunctualShadowAttenuation. If not defined, a default implementation will be used.
// #define SHADOW_DISPATCH_USE_CUSTOM_DIRECTIONAL // Enable custom implementations of GetDirectionalShadowAttenuation. If not defined, a default implementation will be used.
*/
#define SHADOW_SUPPORTS_DYNAMIC_INDEXING 0 // only on >= sm 5.1
#define SHADOW_OPTIMIZE_REGISTER_USAGE 0 // redefine this as 1 in your ShadowContext.hlsl to optimize for register usage over instruction count
#ifndef SHADOW_SUPPORTS_DYNAMIC_INDEXING
#define SHADOW_SUPPORTS_DYNAMIC_INDEXING 0
#endif
#ifndef SHADOW_OPTIMIZE_REGISTER_USAGE
#define SHADOW_OPTIMIZE_REGISTER_USAGE 0
#endif
#include "../../../Core/Shadow/ShadowBase.cs.hlsl" // ShadowData definition, auto generated (don't modify)
#include "Shadow/ShadowBase.cs.hlsl" // ShadowData definition, auto generated (don't modify)
// Declares a shadow context struct with members and sampling code based on whether _...Slots > 0
#define SHADOWCONTEXT_DECLARE( _Tex2DArraySlots, _TexCubeArraySlots, _SamplerCompSlots, _SamplerSlots ) \
\
struct ShadowContext \
{ \
StructuredBuffer<ShadowData> shadowDatas; \
StructuredBuffer<int4> payloads; \
SHADOWCONTEXT_DECLARE_TEXTURES( _Tex2DArraySlots, _TexCubeArraySlots, _SamplerCompSlots, _SamplerSlots ) \
}; \
\
SHADOW_DEFINE_SAMPLING_FUNCS( _Tex2DArraySlots, _TexCubeArraySlots, _SamplerCompSlots, _SamplerSlots )
struct ShadowContext
{
StructuredBuffer<ShadowData> shadowDatas;
StructuredBuffer<int4> payloads;
SHADOWCONTEXT_DECLARE_TEXTURES( SHADOWCONTEXT_MAX_TEX2DARRAY, SHADOWCONTEXT_MAX_TEXCUBEARRAY, SHADOWCONTEXT_MAX_COMPSAMPLER, SHADOWCONTEXT_MAX_SAMPLER )
};
// Shadow context definition and initialization, i.e. resource binding (project header, must be kept in sync with C# runtime)
#define SHADOW_CONTEXT_INCLUDE
#include "../../ShadowIncludes.hlsl"
#undef SHADOW_CONTEXT_INCLUDE
SHADOW_DEFINE_SAMPLING_FUNCS( SHADOWCONTEXT_MAX_TEX2DARRAY, SHADOWCONTEXT_MAX_TEXCUBEARRAY, SHADOWCONTEXT_MAX_COMPSAMPLER, SHADOWCONTEXT_MAX_SAMPLER )
// helper function to extract shadowmap data from the ShadowData struct
void UnpackShadowmapId( uint shadowmapId, out uint texIdx, out uint sampIdx, out float slice )

shadowType = packedShadowType >> 10;
}
// wedge in the actual shadow sampling algorithms
#include "ShadowAlgorithmsCustom.hlsl" // project specific custom algorithms (project can modify this)
// default dispatchers for the individual shadow types (with and without screenspace support)
// point/spot light shadows
float GetPunctualShadowAttenuationDefault( ShadowContext shadowContext, float3 positionWS, float3 normalWS, int shadowDataIndex, float4 L )
{
return EvalShadow_PunctualDepth(shadowContext, positionWS, normalWS, shadowDataIndex, L);
}
float GetPunctualShadowAttenuationDefault( ShadowContext shadowContext, float3 positionWS, float3 normalWS, int shadowDataIndex, float4 L, float2 unPositionSS )
{
return GetPunctualShadowAttenuationDefault( shadowContext, positionWS, normalWS, shadowDataIndex, L );
}
// directional light shadows
float GetDirectionalShadowAttenuationDefault( ShadowContext shadowContext, float3 positionWS, float3 normalWS, int shadowDataIndex, float3 L )
{
return EvalShadow_CascadedDepth_Blend( shadowContext, positionWS, normalWS, shadowDataIndex, L );
}
float GetDirectionalShadowAttenuationDefault( ShadowContext shadowContext, float3 positionWS, float3 normalWS, int shadowDataIndex, float3 L, float2 unPositionSS )
{
return GetDirectionalShadowAttenuationDefault( shadowContext, positionWS, normalWS, shadowDataIndex, L );
}
// include project specific shadow dispatcher. If this file is not empty, it MUST define which default shadows it's overriding
#define SHADOW_DISPATCH_INCLUDE
#include "../../ShadowIncludes.hlsl"
#undef SHADOW_DISPATCH_INCLUDE
// if shadow dispatch is empty we'll fall back to default shadow sampling implementations
return GetPunctualShadowAttenuationDefault( shadowContext, positionWS, normalWS, shadowDataIndex, L );
return EvalShadow_PunctualDepth(shadowContext, positionWS, normalWS, shadowDataIndex, L);
return GetPunctualShadowAttenuationDefault( shadowContext, positionWS, normalWS, shadowDataIndex, L, unPositionSS );
return GetPunctualShadowAttenuation( shadowContext, positionWS, normalWS, shadowDataIndex, L );
return GetDirectionalShadowAttenuationDefault( shadowContext, positionWS, normalWS, shadowDataIndex, L );
return EvalShadow_CascadedDepth_Blend( shadowContext, positionWS, normalWS, shadowDataIndex, L );
return GetDirectionalShadowAttenuationDefault( shadowContext, positionWS, normalWS, shadowDataIndex, L, unPositionSS );
return GetDirectionalShadowAttenuation( shadowContext, positionWS, normalWS, shadowDataIndex, L );
}
#endif

2
ScriptableRenderPipeline/Core/ShaderLibrary/VolumeRendering.hlsl


// Absorption coefficient from Disney: http://blog.selfshadow.com/publications/s2015-shading-course/burley/s2015_pbs_disney_bsdf_notes.pdf
float3 TransmittanceColorAtDistanceToAbsorption(float3 transmittanceColor, float atDistance)
{
return -log(transmittanceColor + FLT_EPSILON) / max(atDistance, FLT_EPSILON);
return -log(transmittanceColor + FLT_EPS) / max(atDistance, FLT_EPS);
}

2
ScriptableRenderPipeline/Core/Shadow/Resources/DebugDisplayShadowMap.shader


#pragma target 4.5
#pragma only_renderers d3d11 ps4 vulkan metal // TEMP: until we go further in dev
#include "../../../Core/ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/Common.hlsl"
float4 _TextureScaleBias;
float _TextureSlice;

1
ScriptableRenderPipeline/Core/Shadow/Shadow.cs


{
m_Shadowmap = new RenderTexture( (int) m_Width, (int) m_Height, (int) m_ShadowmapBits, m_ShadowmapFormat, RenderTextureReadWrite.Linear );
CreateShadowmap( m_Shadowmap );
m_Shadowmap.Create();
}
virtual protected void CreateShadowmap( RenderTexture shadowmap )

12
ScriptableRenderPipeline/HDRenderPipeline/Camera/HDAdditionalCameraData.cs


public class HDAdditionalCameraData : MonoBehaviour
{
public RenderingPathHDRP renderingPath;
Camera m_camera;
void OnEnable()
{
// Be sure legacy HDR option is disable on camera as it cause banding in SceneView. Yes, it is a contradiction, but well, Unity...
// When HDR option is enabled, Unity render in FP16 then convert to 8bit with a stretch copy (this cause banding as it should be convert to sRGB (or other color appropriate color space)), then do a final shader with sRGB conversion
// When LDR, unity render in 8bitSRGB, then do a final shader with sRGB conversion
// What should be done is just in our Post process we convert to sRGB and store in a linear 10bit, but require C++ change...
m_camera = GetComponent<Camera>();
m_camera.allowHDR = false;
}
}
}

2
ScriptableRenderPipeline/HDRenderPipeline/Camera/HDCamera.cs


public Plane[] frustumPlanes;
public Vector4[] frustumPlaneEquations;
public Camera camera;
public PostProcessRenderContext postprocessRenderContext;
public Matrix4x4 viewProjMatrix
{

camera = cam;
frustumPlanes = new Plane[6];
frustumPlaneEquations = new Vector4[6];
postprocessRenderContext = new PostProcessRenderContext();
Reset();
}

4
ScriptableRenderPipeline/HDRenderPipeline/Debug/DebugDisplayLatlong.shader


#pragma vertex Vert
#pragma fragment Frag
#include "../../Core/ShaderLibrary/Common.hlsl"
#include "../../Core/ShaderLibrary/ImageBasedLighting.hlsl"
#include "ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/ImageBasedLighting.hlsl"
TEXTURECUBE(_InputCubemap);
SAMPLERCUBE(sampler_InputCubemap);

2
ScriptableRenderPipeline/HDRenderPipeline/Debug/DebugFullScreen.shader


#pragma vertex Vert
#pragma fragment Frag
#include "../../Core/ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/Common.hlsl"
#include "../Debug/DebugDisplay.cs.hlsl"
#include "../ShaderVariables.hlsl"

8
ScriptableRenderPipeline/HDRenderPipeline/Debug/DebugViewMaterialGBuffer.shader


#pragma multi_compile _ SHADOWS_SHADOWMASK
#include "../../Core/ShaderLibrary/Common.hlsl"
#include "../../Core/ShaderLibrary/Color.hlsl"
#include "ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/Color.hlsl"
// CAUTION: In case deferred lighting need to support various lighting model statically, we will require to do multicompile with different define like UNITY_MATERIAL_LIT
#define UNITY_MATERIAL_LIT // Need to be define before including Material.hlsl

BSDFData bsdfData;
BakeLightingData bakeLightingData;
DECODE_FROM_GBUFFER(posInput.unPositionSS, 0xFFFFFFFF, bsdfData, bakeLightingData.bakeDiffuseLighting);
DECODE_FROM_GBUFFER(posInput.unPositionSS, UINT_MAX, bsdfData, bakeLightingData.bakeDiffuseLighting);
#ifdef SHADOWS_SHADOWMASK
DecodeShadowMask(LOAD_TEXTURE2D(_ShadowMaskTexture, posInput.unPositionSS), bakeLightingData.bakeShadowMask);
#endif

if (_DebugViewMaterial == DEBUGVIEWGBUFFER_DEPTH)
{
float linearDepth = frac(posInput.depthVS * 0.1);
float linearDepth = frac(posInput.linearDepth * 0.1);
result = linearDepth.xxx;
}
// Caution: This value is not the same than the builtin data bakeDiffuseLighting. It also include emissive and multiply by the albedo

2
ScriptableRenderPipeline/HDRenderPipeline/Debug/DebugViewTiles.shader


// Include
//-------------------------------------------------------------------------------------
#include "../../Core/ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/Common.hlsl"
// Note: We have fix as guidelines that we have only one deferred material (with control of GBuffer enabled). Mean a users that add a new
// deferred material must replace the old one here. If in the future we want to support multiple layout (cause a lot of consistency problem),

135
ScriptableRenderPipeline/HDRenderPipeline/Editor/HDAssetFactory.cs


using UnityEngine;
using System.IO;
using UnityEngine;
using UnityEngine.Experimental.Rendering;
using UnityEngine.Experimental.Rendering.HDPipeline;

get { return HDEditorUtils.GetHDRenderPipelinePath() + "RenderPipelineResources/HDRenderPipelineResources.asset"; }
}
static string s_DefaultMaterialPath
{
get { return HDEditorUtils.GetHDRenderPipelinePath() + "RenderPipelineResources/DefaultHDMaterial.mat"; }
}
static string s_DefaultShaderPath
{
get { return HDEditorUtils.GetHDRenderPipelinePath() + "Material/Lit/Lit.shader"; }
}
class DoCreateNewAssetHDRenderPipeline : ProjectWindowCallback.EndNameEditAction
{
public override void Action(int instanceId, string pathName, string resourceFile)
{
var newAsset = CreateInstance<HDRenderPipelineAsset>();
newAsset.name = Path.GetFileName(pathName);
// Load default renderPipelineResources / Material / Shader
newAsset.renderPipelineResources = AssetDatabase.LoadAssetAtPath<RenderPipelineResources>(s_RenderPipelineResourcesPath);
newAsset.defaultDiffuseMaterial = AssetDatabase.LoadAssetAtPath<Material>(s_DefaultMaterialPath);
newAsset.defaultShader = AssetDatabase.LoadAssetAtPath<Shader>(s_DefaultShaderPath);
AssetDatabase.CreateAsset(newAsset, pathName);
ProjectWindowUtil.ShowCreatedAsset(newAsset);
}
}
var instance = ScriptableObject.CreateInstance<HDRenderPipelineAsset>();
AssetDatabase.CreateAsset(instance, HDEditorUtils.GetHDRenderPipelinePath() + "HDRenderPipelineAsset.asset");
// If it exist, load renderPipelineResources
instance.renderPipelineResources = AssetDatabase.LoadAssetAtPath<RenderPipelineResources>(s_RenderPipelineResourcesPath);
var icon = EditorGUIUtility.FindTexture("ScriptableObject Icon");
ProjectWindowUtil.StartNameEditingIfProjectWindowExists(0, ScriptableObject.CreateInstance<DoCreateNewAssetHDRenderPipeline>(), "New HDRenderPipelineAsset.asset", icon, null);
// TODO skybox/cubemap
// Note: move this to a static using once we can target C#6+
static T Load<T>(string path) where T : UnityObject
{
return AssetDatabase.LoadAssetAtPath<T>(path);
}
[MenuItem("Assets/Create/Render Pipeline/High Definition/Render Pipeline Resources", priority = CoreUtils.assetCreateMenuPriority2)]
static void CreateRenderPipelineResources()
class DoCreateNewAssetHDRenderPipelineResources : ProjectWindowCallback.EndNameEditAction
string HDRenderPipelinePath = HDEditorUtils.GetHDRenderPipelinePath();
string PostProcessingPath = HDEditorUtils.GetPostProcessingPath();
string CorePath = HDEditorUtils.GetCorePath();
public override void Action(int instanceId, string pathName, string resourceFile)
{
var newAsset = CreateInstance<RenderPipelineResources>();
newAsset.name = Path.GetFileName(pathName);
var instance = ScriptableObject.CreateInstance<RenderPipelineResources>();
// Load default renderPipelineResources / Material / Shader
string HDRenderPipelinePath = HDEditorUtils.GetHDRenderPipelinePath();
string PostProcessingPath = HDEditorUtils.GetPostProcessingPath();
string CorePath = HDEditorUtils.GetCorePath();
instance.debugDisplayLatlongShader = Load<Shader>(HDRenderPipelinePath + "Debug/DebugDisplayLatlong.Shader");
instance.debugViewMaterialGBufferShader = Load<Shader>(HDRenderPipelinePath + "Debug/DebugViewMaterialGBuffer.Shader");
instance.debugViewTilesShader = Load<Shader>(HDRenderPipelinePath + "Debug/DebugViewTiles.Shader");
instance.debugFullScreenShader = Load<Shader>(HDRenderPipelinePath + "Debug/DebugFullScreen.Shader");
newAsset.debugDisplayLatlongShader = Load<Shader>(HDRenderPipelinePath + "Debug/DebugDisplayLatlong.Shader");
newAsset.debugViewMaterialGBufferShader = Load<Shader>(HDRenderPipelinePath + "Debug/DebugViewMaterialGBuffer.Shader");
newAsset.debugViewTilesShader = Load<Shader>(HDRenderPipelinePath + "Debug/DebugViewTiles.Shader");
newAsset.debugFullScreenShader = Load<Shader>(HDRenderPipelinePath + "Debug/DebugFullScreen.Shader");
instance.deferredShader = Load<Shader>(HDRenderPipelinePath + "Lighting/Deferred.Shader");
instance.subsurfaceScatteringCS = Load<ComputeShader>(HDRenderPipelinePath + "Material/Lit/Resources/SubsurfaceScattering.compute");
instance.volumetricLightingCS = Load<ComputeShader>(HDRenderPipelinePath + "Lighting/Volumetrics/Resources/VolumetricLighting.compute");
instance.gaussianPyramidCS = Load<ComputeShader>(PostProcessingPath + "Shaders/Builtins/GaussianDownsample.compute");
instance.depthPyramidCS = Load<ComputeShader>(HDRenderPipelinePath + "RenderPipelineResources/DepthDownsample.compute");
instance.copyChannelCS = Load<ComputeShader>(CorePath + "Resources/GPUCopy.compute");
instance.applyDistortionCS = Load<ComputeShader>(HDRenderPipelinePath + "RenderPipelineResources/ApplyDistorsion.compute");
newAsset.deferredShader = Load<Shader>(HDRenderPipelinePath + "Lighting/Deferred.Shader");
newAsset.subsurfaceScatteringCS = Load<ComputeShader>(HDRenderPipelinePath + "Material/Lit/Resources/SubsurfaceScattering.compute");
newAsset.gaussianPyramidCS = Load<ComputeShader>(PostProcessingPath + "Shaders/Builtins/GaussianDownsample.compute");
newAsset.depthPyramidCS = Load<ComputeShader>(HDRenderPipelinePath + "RenderPipelineResources/DepthDownsample.compute");
newAsset.copyChannelCS = Load<ComputeShader>(CorePath + "Resources/GPUCopy.compute");
newAsset.applyDistortionCS = Load<ComputeShader>(HDRenderPipelinePath + "RenderPipelineResources/ApplyDistorsion.compute");
instance.clearDispatchIndirectShader = Load<ComputeShader>(HDRenderPipelinePath + "Lighting/TilePass/cleardispatchindirect.compute");
instance.buildDispatchIndirectShader = Load<ComputeShader>(HDRenderPipelinePath + "Lighting/TilePass/builddispatchindirect.compute");
instance.buildScreenAABBShader = Load<ComputeShader>(HDRenderPipelinePath + "Lighting/TilePass/scrbound.compute");
instance.buildPerTileLightListShader = Load<ComputeShader>(HDRenderPipelinePath + "Lighting/TilePass/lightlistbuild.compute");
instance.buildPerBigTileLightListShader = Load<ComputeShader>(HDRenderPipelinePath + "Lighting/TilePass/lightlistbuild-bigtile.compute");
instance.buildPerVoxelLightListShader = Load<ComputeShader>(HDRenderPipelinePath + "Lighting/TilePass/lightlistbuild-clustered.compute");
instance.buildMaterialFlagsShader = Load<ComputeShader>(HDRenderPipelinePath + "Lighting/TilePass/materialflags.compute");
instance.deferredComputeShader = Load<ComputeShader>(HDRenderPipelinePath + "Lighting/TilePass/Deferred.compute");
newAsset.clearDispatchIndirectShader = Load<ComputeShader>(HDRenderPipelinePath + "Lighting/TilePass/cleardispatchindirect.compute");
newAsset.buildDispatchIndirectShader = Load<ComputeShader>(HDRenderPipelinePath + "Lighting/TilePass/builddispatchindirect.compute");
newAsset.buildScreenAABBShader = Load<ComputeShader>(HDRenderPipelinePath + "Lighting/TilePass/scrbound.compute");
newAsset.buildPerTileLightListShader = Load<ComputeShader>(HDRenderPipelinePath + "Lighting/TilePass/lightlistbuild.compute");
newAsset.buildPerBigTileLightListShader = Load<ComputeShader>(HDRenderPipelinePath + "Lighting/TilePass/lightlistbuild-bigtile.compute");
newAsset.buildPerVoxelLightListShader = Load<ComputeShader>(HDRenderPipelinePath + "Lighting/TilePass/lightlistbuild-clustered.compute");
newAsset.buildMaterialFlagsShader = Load<ComputeShader>(HDRenderPipelinePath + "Lighting/TilePass/materialflags.compute");
newAsset.deferredComputeShader = Load<ComputeShader>(HDRenderPipelinePath + "Lighting/TilePass/Deferred.compute");
instance.deferredDirectionalShadowComputeShader = Load<ComputeShader>(HDRenderPipelinePath + "Lighting/TilePass/DeferredDirectionalShadow.compute");
newAsset.deferredDirectionalShadowComputeShader = Load<ComputeShader>(HDRenderPipelinePath + "Lighting/TilePass/DeferredDirectionalShadow.compute");
// SceneSettings
// These shaders don't need to be reference by RenderPipelineResource as they are not use at runtime (only to draw in editor)
// instance.drawSssProfile = UnityEditor.AssetDatabase.LoadAssetAtPath<Shader>(HDRenderPipelinePath + "SceneSettings/DrawSssProfile.shader");
// instance.drawTransmittanceGraphShader = UnityEditor.AssetDatabase.LoadAssetAtPath<Shader>(HDRenderPipelinePath + "SceneSettings/DrawTransmittanceGraph.shader");
// SceneSettings
// These shaders don't need to be reference by RenderPipelineResource as they are not use at runtime (only to draw in editor)
// instance.drawSssProfile = UnityEditor.AssetDatabase.LoadAssetAtPath<Shader>(HDRenderPipelinePath + "SceneSettings/DrawSssProfile.shader");
// instance.drawTransmittanceGraphShader = UnityEditor.AssetDatabase.LoadAssetAtPath<Shader>(HDRenderPipelinePath + "SceneSettings/DrawTransmittanceGraph.shader");
instance.cameraMotionVectors = Load<Shader>(HDRenderPipelinePath + "RenderPipelineResources/CameraMotionVectors.shader");
newAsset.cameraMotionVectors = Load<Shader>(HDRenderPipelinePath + "RenderPipelineResources/CameraMotionVectors.shader");
// Sky
instance.blitCubemap = Load<Shader>(HDRenderPipelinePath + "Sky/BlitCubemap.shader");
instance.buildProbabilityTables = Load<ComputeShader>(HDRenderPipelinePath + "Sky/BuildProbabilityTables.compute");
instance.computeGgxIblSampleData = Load<ComputeShader>(HDRenderPipelinePath + "Sky/ComputeGgxIblSampleData.compute");
instance.GGXConvolve = Load<Shader>(HDRenderPipelinePath + "Sky/GGXConvolve.shader");
instance.opaqueAtmosphericScattering = Load<Shader>(HDRenderPipelinePath + "Sky/OpaqueAtmosphericScattering.shader");
// Sky
newAsset.blitCubemap = Load<Shader>(HDRenderPipelinePath + "Sky/BlitCubemap.shader");
newAsset.buildProbabilityTables = Load<ComputeShader>(HDRenderPipelinePath + "Sky/BuildProbabilityTables.compute");
newAsset.computeGgxIblSampleData = Load<ComputeShader>(HDRenderPipelinePath + "Sky/ComputeGgxIblSampleData.compute");
newAsset.GGXConvolve = Load<Shader>(HDRenderPipelinePath + "Sky/GGXConvolve.shader");
newAsset.opaqueAtmosphericScattering = Load<Shader>(HDRenderPipelinePath + "Sky/OpaqueAtmosphericScattering.shader");
// Skybox/Cubemap is a builtin shader, must use Sahder.Find to access it. It is fine because we are in the editor
instance.skyboxCubemap = Shader.Find("Skybox/Cubemap");
// Skybox/Cubemap is a builtin shader, must use Sahder.Find to access it. It is fine because we are in the editor
newAsset.skyboxCubemap = Shader.Find("Skybox/Cubemap");
AssetDatabase.CreateAsset(instance, s_RenderPipelineResourcesPath);
AssetDatabase.SaveAssets();
AssetDatabase.Refresh();
AssetDatabase.CreateAsset(newAsset, pathName);
ProjectWindowUtil.ShowCreatedAsset(newAsset);
}
// Note: move this to a static using once we can target C#6+
static T Load<T>(string path)
where T : UnityObject
[MenuItem("Assets/Create/Render Pipeline/High Definition/Render Pipeline Resources", priority = CoreUtils.assetCreateMenuPriority1)]
static void CreateRenderPipelineResources()
return AssetDatabase.LoadAssetAtPath<T>(path);
var icon = EditorGUIUtility.FindTexture("ScriptableObject Icon");
ProjectWindowUtil.StartNameEditingIfProjectWindowExists(0, ScriptableObject.CreateInstance<DoCreateNewAssetHDRenderPipelineResources>(), "New HDRenderPipelineResources.asset", icon, null);
}
}
}

1
ScriptableRenderPipeline/HDRenderPipeline/Editor/HDRenderPipelineInspector.Styles.cs


sealed class Styles
{
public readonly GUIContent defaults = new GUIContent("Defaults");
public readonly GUIContent renderPipelineResources = new GUIContent("Render Pipeline Resources", "Set of resources that need to be loaded when creating stand alone");
public readonly GUIContent defaultDiffuseMaterial = new GUIContent("Default Diffuse Material", "Material to use when creating objects");
public readonly GUIContent defaultShader = new GUIContent("Default Shader", "Shader to use when creating materials");

3
ScriptableRenderPipeline/HDRenderPipeline/Editor/HDRenderPipelineInspector.cs


[CustomEditor(typeof(HDRenderPipelineAsset))]
public sealed partial class HDRenderPipelineInspector : HDBaseEditor<HDRenderPipelineAsset>
{
SerializedProperty m_RenderPipelineResources;
SerializedProperty m_DefaultDiffuseMaterial;
SerializedProperty m_DefaultShader;

void InitializeProperties()
{
m_RenderPipelineResources = properties.Find("m_RenderPipelineResources");
m_DefaultDiffuseMaterial = properties.Find("m_DefaultDiffuseMaterial");
m_DefaultShader = properties.Find("m_DefaultShader");

EditorGUILayout.LabelField(s_Styles.defaults, EditorStyles.boldLabel);
EditorGUI.indentLevel++;
EditorGUILayout.PropertyField(m_RenderPipelineResources, s_Styles.renderPipelineResources);
EditorGUILayout.PropertyField(m_DefaultDiffuseMaterial, s_Styles.defaultDiffuseMaterial);
EditorGUILayout.PropertyField(m_DefaultShader, s_Styles.defaultShader);
EditorGUI.indentLevel--;

431
ScriptableRenderPipeline/HDRenderPipeline/HDRenderPipeline.cs


// Renderer Bake configuration can vary depends on if shadow mask is enabled or no
RendererConfiguration m_currentRendererConfigurationBakedLighting = HDUtils.k_RendererConfigurationBakedLighting;
// Various set of material use in render loop
// Various set of material use in render loop
ComputeShader m_SubsurfaceScatteringCS { get { return m_Asset.renderPipelineResources.subsurfaceScatteringCS; } }
int m_SubsurfaceScatteringKernel;
Material m_CombineLightingPass;

int m_GaussianPyramidKernel;
ComputeShader m_DepthPyramidCS { get { return m_Asset.renderPipelineResources.depthPyramidCS; } }
int m_DepthPyramidKernel;
ComputeShader m_applyDistortionCS { get { return m_Asset.renderPipelineResources.applyDistortionCS; } }
int m_applyDistortionKernel;
Material m_CameraMotionVectorsMaterial;

ShaderPassName[] m_ForwardAndForwardOnlyPassNames = { new ShaderPassName(), new ShaderPassName(), HDShaderPassNames.s_SRPDefaultUnlitName};
ShaderPassName[] m_ForwardOnlyPassNames = { new ShaderPassName(), HDShaderPassNames.s_SRPDefaultUnlitName };
ShaderPassName[] m_AllTransparentPassNames = { HDShaderPassNames.s_TransparentDepthPrepassName,
HDShaderPassNames.s_TransparentBackfaceName,
ShaderPassName[] m_AllTransparentPassNames = { HDShaderPassNames.s_TransparentBackfaceName,
ShaderPassName[] m_AllTransparentDebugDisplayPassNames = { HDShaderPassNames.s_TransparentDepthPrepassName,
HDShaderPassNames.s_TransparentBackfaceDebugDisplayName,
ShaderPassName[] m_AllTransparentDebugDisplayPassNames = { HDShaderPassNames.s_TransparentBackfaceDebugDisplayName,
HDShaderPassNames.s_ForwardOnlyDebugDisplayName,
HDShaderPassNames.s_ForwardDebugDisplayName,
HDShaderPassNames.s_TransparentDepthPostpassName,

ShaderPassName[] m_DepthOnlyAndDepthForwardOnlyPassNames = { HDShaderPassNames.s_DepthForwardOnlyName, HDShaderPassNames.s_DepthOnlyName };
ShaderPassName[] m_DepthForwardOnlyPassNames = { HDShaderPassNames.s_DepthForwardOnlyName };
ShaderPassName[] m_DepthOnlyPassNames = { HDShaderPassNames.s_DepthOnlyName };
ShaderPassName[] m_TransparentDepthOnlyPassNames = { HDShaderPassNames.s_TransparentDepthPrepassName };
// Post-processing context and screen-space effects (recycled on every frame to avoid GC alloc)
readonly PostProcessRenderContext m_PostProcessContext;
// Stencil usage in HDRenderPipeline.
// Currently we use only 2 bits to identify the kind of lighting that is expected from the render pipeline

CreateSssMaterials();
// Initialize various compute shader resources
m_applyDistortionKernel = m_applyDistortionCS.FindKernel("KMain");
m_CopyStencilForSplitLighting = CoreUtils.CreateEngineMaterial("Hidden/HDRenderPipeline/CopyStencilBuffer");
m_CopyStencilForSplitLighting.EnableKeyword("EXPORT_HTILE");
m_CopyStencilForSplitLighting.SetInt(HDShaderIDs._StencilRef, (int)StencilLightingUsage.SplitLighting);

m_SkyManager.Build(asset.renderPipelineResources);
m_SkyManager.skySettings = skySettingsToUse;
m_PostProcessContext = new PostProcessRenderContext();
m_DebugDisplaySettings.RegisterDebug();
m_DebugFullScreenTempRT = HDShaderIDs._DebugFullScreenTexture;

base.Render(renderContext, cameras);
#if UNITY_EDITOR
SupportedRenderingFeatures.active = s_NeededFeatures;
SupportedRenderingFeatures.active = s_NeededFeatures;
// HD use specific GraphicsSettings. This is init here.
// TODO: This should not be set at each Frame but is there another place for these config setup ?
GraphicsSettings.lightsUseLinearIntensity = true;
GraphicsSettings.lightsUseColorTemperature = true;
// HD use specific GraphicsSettings. This is init here.
// TODO: This should not be set at each Frame but is there another place for these config setup ?
GraphicsSettings.lightsUseLinearIntensity = true;
GraphicsSettings.lightsUseColorTemperature = true;
if (m_FrameCount != Time.frameCount)
{
HDCamera.CleanUnused();
m_FrameCount = Time.frameCount;
}
if (m_FrameCount != Time.frameCount)
{
HDCamera.CleanUnused();
m_FrameCount = Time.frameCount;
}
foreach (var material in m_MaterialList)
material.RenderInit(cmd);
foreach (var material in m_MaterialList)
material.RenderInit(cmd);
// Do anything we need to do upon a new frame.
m_LightLoop.NewFrame();
// Do anything we need to do upon a new frame.
m_LightLoop.NewFrame();
// we only want to render one camera for now
// select the most main camera!
Camera camera = null;
foreach (var cam in cameras)
{
if (cam == Camera.main)
// we only want to render one camera for now
// select the most main camera!
Camera camera = null;
foreach (var cam in cameras)
camera = cam;
break;
if (cam == Camera.main)
{
camera = cam;
break;
}
}
if (camera == null && cameras.Length > 0)
camera = cameras[0];
if (camera == null && cameras.Length > 0)
camera = cameras[0];
if (camera == null)
{
renderContext.Submit();
return;
}
if (camera == null)
{
renderContext.Submit();
return;
}
// If we render a reflection view or a preview we should not display any debug information
// This need to be call before ApplyDebugDisplaySettings()
if (camera.cameraType == CameraType.Reflection || camera.cameraType == CameraType.Preview)
{
// Neutral allow to disable all debug settings
m_CurrentDebugDisplaySettings = s_NeutralDebugDisplaySettings;
}
else
{
m_CurrentDebugDisplaySettings = m_DebugDisplaySettings;
}
// If we render a reflection view or a preview we should not display any debug information
// This need to be call before ApplyDebugDisplaySettings()
if (camera.cameraType == CameraType.Reflection || camera.cameraType == CameraType.Preview)
{
// Neutral allow to disable all debug settings
m_CurrentDebugDisplaySettings = s_NeutralDebugDisplaySettings;
}
else
{
m_CurrentDebugDisplaySettings = m_DebugDisplaySettings;
}
ApplyDebugDisplaySettings(cmd);
UpdateCommonSettings();
ApplyDebugDisplaySettings(cmd);
UpdateCommonSettings();
ScriptableCullingParameters cullingParams;
if (!CullResults.GetCullingParameters(camera, out cullingParams))
{
renderContext.Submit();
return;
}
ScriptableCullingParameters cullingParams;
if (!CullResults.GetCullingParameters(camera, out cullingParams))
{
renderContext.Submit();
return;
}
m_LightLoop.UpdateCullingParameters( ref cullingParams );
m_LightLoop.UpdateCullingParameters( ref cullingParams );
// emit scene view UI
if (camera.cameraType == CameraType.SceneView)
{
ScriptableRenderContext.EmitWorldGeometryForSceneView(camera);
}
// emit scene view UI
if (camera.cameraType == CameraType.SceneView)
{
ScriptableRenderContext.EmitWorldGeometryForSceneView(camera);
}
using (new ProfilingSample(cmd, "CullResults.Cull", GetSampler(CustomSamplerId.CullResultsCull)))
{
CullResults.Cull(ref cullingParams, renderContext,ref m_CullResults);
}
using (new ProfilingSample(cmd, "CullResults.Cull", GetSampler(CustomSamplerId.CullResultsCull)))
{
CullResults.Cull(ref cullingParams, renderContext,ref m_CullResults);
}
Resize(camera);
Resize(camera);
renderContext.SetupCameraProperties(camera);
renderContext.SetupCameraProperties(camera);
var postProcessLayer = camera.GetComponent<PostProcessLayer>();
var hdCamera = HDCamera.Get(camera, postProcessLayer);
PushGlobalParams(hdCamera, cmd, sssSettings);
var postProcessLayer = camera.GetComponent<PostProcessLayer>();
var hdCamera = HDCamera.Get(camera, postProcessLayer);
PushGlobalParams(hdCamera, cmd, sssSettings);
// TODO: Find a correct place to bind these material textures
// We have to bind the material specific global parameters in this mode
m_MaterialList.ForEach(material => material.Bind());
// TODO: Find a correct place to bind these material textures
// We have to bind the material specific global parameters in this mode
m_MaterialList.ForEach(material => material.Bind());
var additionalCameraData = camera.GetComponent<HDAdditionalCameraData>();
if (additionalCameraData && additionalCameraData.renderingPath == RenderingPathHDRP.Unlit)
{
// TODO: Add another path dedicated to planar reflection / real time cubemap that implement simpler lighting
// It is up to the users to only send unlit object for this camera path
var additionalCameraData = camera.GetComponent<HDAdditionalCameraData>();
if (additionalCameraData && additionalCameraData.renderingPath == RenderingPathHDRP.Unlit)
{
// TODO: Add another path dedicated to planar reflection / real time cubemap that implement simpler lighting
// It is up to the users to only send unlit object for this camera path
using (new ProfilingSample(cmd, "Forward", GetSampler(CustomSamplerId.Forward)))
using (new ProfilingSample(cmd, "Forward", GetSampler(CustomSamplerId.Forward)))
{
CoreUtils.SetRenderTarget(cmd, m_CameraColorBufferRT, m_CameraDepthStencilBufferRT, ClearFlag.Color | ClearFlag.Depth);
RenderOpaqueRenderList(m_CullResults, camera, renderContext, cmd, HDShaderPassNames.s_ForwardName);
RenderTransparentRenderList(m_CullResults, camera, renderContext, cmd, HDShaderPassNames.s_ForwardName, false);
}
renderContext.ExecuteCommandBuffer(cmd);
CommandBufferPool.Release(cmd);
renderContext.Submit();
return;
}
// Note: Legacy Unity behave like this for ShadowMask
// When you select ShadowMask in Lighting panel it recompile shaders on the fly with the SHADOW_MASK keyword.
// However there is no C# function that we can query to know what mode have been select in Lighting Panel and it will be wrong anyway. Lighting Panel setup what will be the next bake mode. But until light is bake, it is wrong.
// Currently to know if you need shadow mask you need to go through all visible lights (of CullResult), check the LightBakingOutput struct and look at lightmapBakeType/mixedLightingMode. If one light have shadow mask bake mode, then you need shadow mask features (i.e extra Gbuffer).
// It mean that when we build a standalone player, if we detect a light with bake shadow mask, we generate all shader variant (with and without shadow mask) and at runtime, when a bake shadow mask light is visible, we dynamically allocate an extra GBuffer and switch the shader.
// So the first thing to do is to go through all the light: PrepareLightsForGPU
bool enableBakeShadowMask;
using (new ProfilingSample(cmd, "TP_PrepareLightsForGPU", GetSampler(CustomSamplerId.TPPrepareLightsForGPU)))
CoreUtils.SetRenderTarget(cmd, m_CameraColorBufferRT, m_CameraDepthStencilBufferRT, ClearFlag.Color | ClearFlag.Depth);
RenderOpaqueRenderList(m_CullResults, camera, renderContext, cmd, HDShaderPassNames.s_ForwardName);
RenderTransparentRenderList(m_CullResults, camera, renderContext, cmd, HDShaderPassNames.s_ForwardName);
enableBakeShadowMask = m_LightLoop.PrepareLightsForGPU(m_ShadowSettings, m_CullResults, camera);
ConfigureForShadowMask(enableBakeShadowMask, cmd);
renderContext.ExecuteCommandBuffer(cmd);
CommandBufferPool.Release(cmd);
renderContext.Submit();
return;
}
InitAndClearBuffer(hdCamera, enableBakeShadowMask, cmd);
// Note: Legacy Unity behave like this for ShadowMask
// When you select ShadowMask in Lighting panel it recompile shaders on the fly with the SHADOW_MASK keyword.
// However there is no C# function that we can query to know what mode have been select in Lighting Panel and it will be wrong anyway. Lighting Panel setup what will be the next bake mode. But until light is bake, it is wrong.
// Currently to know if you need shadow mask you need to go through all visible lights (of CullResult), check the LightBakingOutput struct and look at lightmapBakeType/mixedLightingMode. If one light have shadow mask bake mode, then you need shadow mask features (i.e extra Gbuffer).
// It mean that when we build a standalone player, if we detect a light with bake shadow mask, we generate all shader variant (with and without shadow mask) and at runtime, when a bake shadow mask light is visible, we dynamically allocate an extra GBuffer and switch the shader.
// So the first thing to do is to go through all the light: PrepareLightsForGPU
bool enableBakeShadowMask;
using (new ProfilingSample(cmd, "TP_PrepareLightsForGPU", GetSampler(CustomSamplerId.TPPrepareLightsForGPU)))
{
enableBakeShadowMask = m_LightLoop.PrepareLightsForGPU(m_ShadowSettings, m_CullResults, camera);
}
ConfigureForShadowMask(enableBakeShadowMask, cmd);
RenderDepthPrepass(m_CullResults, camera, renderContext, cmd);
InitAndClearBuffer(hdCamera, enableBakeShadowMask, cmd);
RenderGBuffer(m_CullResults, camera, renderContext, cmd);
RenderDepthPrepass(m_CullResults, camera, renderContext, cmd);
RenderGBuffer(m_CullResults, camera, renderContext, cmd);
// In both forward and deferred, everything opaque should have been rendered at this point so we can safely copy the depth buffer for later processing.
CopyDepthBufferIfNeeded(cmd);
// In both forward and deferred, everything opaque should have been rendered at this point so we can safely copy the depth buffer for later processing.
CopyDepthBufferIfNeeded(cmd);
RenderPyramidDepth(camera, cmd, renderContext, FullScreenDebugMode.DepthPyramid);
RenderPyramidDepth(camera, cmd, renderContext, FullScreenDebugMode.DepthPyramid);
// Required for the SSS and the shader feature classification pass.
PrepareAndBindStencilTexture(cmd);
// Required for the SSS and the shader feature classification pass.
PrepareAndBindStencilTexture(cmd);
if (m_CurrentDebugDisplaySettings.IsDebugMaterialDisplayEnabled())
{
RenderDebugViewMaterial(m_CullResults, hdCamera, renderContext, cmd);
}
else
{
using (new ProfilingSample(cmd, "Render SSAO", GetSampler(CustomSamplerId.RenderSSAO)))
if (m_CurrentDebugDisplaySettings.IsDebugMaterialDisplayEnabled())
// TODO: Everything here (SSAO, Shadow, Build light list, deferred shadow, material and light classification can be parallelize with Async compute)
RenderSSAO(cmd, camera, renderContext, postProcessLayer);
RenderDebugViewMaterial(m_CullResults, hdCamera, renderContext, cmd);
using (new ProfilingSample(cmd, "Render shadows", GetSampler(CustomSamplerId.RenderShadows)))
else
m_LightLoop.RenderShadows(renderContext, cmd, m_CullResults);
// TODO: check if statement below still apply
renderContext.SetupCameraProperties(camera); // Need to recall SetupCameraProperties after RenderShadows as it modify our view/proj matrix
}
using (new ProfilingSample(cmd, "Render SSAO", GetSampler(CustomSamplerId.RenderSSAO)))
{
// TODO: Everything here (SSAO, Shadow, Build light list, deferred shadow, material and light classification can be parallelize with Async compute)
RenderSSAO(cmd, camera, renderContext, postProcessLayer);
}
using (new ProfilingSample(cmd, "Deferred directional shadows", GetSampler(CustomSamplerId.RenderDeferredDirectionalShadow)))
{
cmd.GetTemporaryRT(m_DeferredShadowBuffer, camera.pixelWidth, camera.pixelHeight, 0, FilterMode.Point, RenderTextureFormat.ARGB32, RenderTextureReadWrite.Linear, 1, true);
m_LightLoop.RenderDeferredDirectionalShadow(hdCamera, m_DeferredShadowBufferRT, GetDepthTexture(), cmd);
PushFullScreenDebugTexture(cmd, m_DeferredShadowBuffer, hdCamera.camera, renderContext, FullScreenDebugMode.DeferredShadows);
}
using (new ProfilingSample(cmd, "Render shadows", GetSampler(CustomSamplerId.RenderShadows)))
{
m_LightLoop.RenderShadows(renderContext, cmd, m_CullResults);
// TODO: check if statement below still apply
renderContext.SetupCameraProperties(camera); // Need to recall SetupCameraProperties after RenderShadows as it modify our view/proj matrix
}
using (new ProfilingSample(cmd, "Build Light list", GetSampler(CustomSamplerId.BuildLightList)))
{
m_LightLoop.BuildGPULightLists(camera, cmd, m_CameraDepthStencilBufferRT, GetStencilTexture());
}
using (new ProfilingSample(cmd, "Deferred directional shadows", GetSampler(CustomSamplerId.RenderDeferredDirectionalShadow)))
{
cmd.GetTemporaryRT(m_DeferredShadowBuffer, camera.pixelWidth, camera.pixelHeight, 0, FilterMode.Point, RenderTextureFormat.ARGB32, RenderTextureReadWrite.Linear, 1, true);
m_LightLoop.RenderDeferredDirectionalShadow(hdCamera, m_DeferredShadowBufferRT, GetDepthTexture(), cmd);
PushFullScreenDebugTexture(cmd, m_DeferredShadowBuffer, hdCamera.camera, renderContext, FullScreenDebugMode.DeferredShadows);
}
// Don't update the sky environment if we are rendering a cubemap (it should be update already)
if (camera.cameraType != CameraType.Reflection)
{
// Caution: We require sun light here as some sky use the sun light to render, mean UpdateSkyEnvironment
// must be call after BuildGPULightLists.
// TODO: Try to arrange code so we can trigger this call earlier and use async compute here to run sky convolution during other passes (once we move convolution shader to compute).
UpdateSkyEnvironment(hdCamera, cmd);
}
using (new ProfilingSample(cmd, "Build Light list", GetSampler(CustomSamplerId.BuildLightList)))
{
m_LightLoop.BuildGPULightLists(camera, cmd, m_CameraDepthStencilBufferRT, GetStencilTexture());
}
RenderDeferredLighting(hdCamera, cmd);
// Don't update the sky environment if we are rendering a cubemap (it should be update already)
if (camera.cameraType != CameraType.Reflection)
{
// Caution: We require sun light here as some sky use the sun light to render, mean UpdateSkyEnvironment
// must be call after BuildGPULightLists.
// TODO: Try to arrange code so we can trigger this call earlier and use async compute here to run sky convolution during other passes (once we move convolution shader to compute).
UpdateSkyEnvironment(hdCamera, cmd);
}
// We compute subsurface scattering here. Therefore, no objects rendered afterwards will exhibit SSS.
// Currently, there is no efficient way to switch between SRT and MRT for the forward pass;
// therefore, forward-rendered objects do not output split lighting required for the SSS pass.
SubsurfaceScatteringPass(hdCamera, cmd, sssSettings);
RenderDeferredLighting(hdCamera, cmd);
RenderForward(m_CullResults, camera, renderContext, cmd, ForwardPass.Opaque);
RenderForwardError(m_CullResults, camera, renderContext, cmd, ForwardPass.Opaque);
// We compute subsurface scattering here. Therefore, no objects rendered afterwards will exhibit SSS.
// Currently, there is no efficient way to switch between SRT and MRT for the forward pass;
// therefore, forward-rendered objects do not output split lighting required for the SSS pass.
SubsurfaceScatteringPass(hdCamera, cmd, sssSettings);
RenderSky(hdCamera, cmd);
RenderForward(m_CullResults, camera, renderContext, cmd, ForwardPass.Opaque);
RenderForwardError(m_CullResults, camera, renderContext, cmd, ForwardPass.Opaque);
RenderSky(hdCamera, cmd);
// Render pre refraction objects
RenderForward(m_CullResults, camera, renderContext, cmd, ForwardPass.PreRefraction);
RenderForwardError(m_CullResults, camera, renderContext, cmd, ForwardPass.PreRefraction);
// Render pre refraction objects
RenderForward(m_CullResults, camera, renderContext, cmd, ForwardPass.PreRefraction);
RenderForwardError(m_CullResults, camera, renderContext, cmd, ForwardPass.PreRefraction);
RenderGaussianPyramidColor(camera, cmd, renderContext, FullScreenDebugMode.PreRefractionColorPyramid);
RenderGaussianPyramidColor(camera, cmd, renderContext, FullScreenDebugMode.PreRefractionColorPyramid);
// Render all type of transparent forward (unlit, lit, complex (hair...)) to keep the sorting between transparent objects.
RenderForward(m_CullResults, camera, renderContext, cmd, ForwardPass.Transparent);
RenderForwardError(m_CullResults, camera, renderContext, cmd, ForwardPass.Transparent);
// Render all type of transparent forward (unlit, lit, complex (hair...)) to keep the sorting between transparent objects.
RenderForward(m_CullResults, camera, renderContext, cmd, ForwardPass.Transparent);
RenderForwardError(m_CullResults, camera, renderContext, cmd, ForwardPass.Transparent);
PushFullScreenDebugTexture(cmd, m_CameraColorBuffer, camera, renderContext, FullScreenDebugMode.NanTracker);
PushFullScreenDebugTexture(cmd, m_CameraColorBuffer, camera, renderContext, FullScreenDebugMode.NanTracker);
// Planar and real time cubemap doesn't need post process and render in FP16
if (camera.cameraType == CameraType.Reflection)
{
using (new ProfilingSample(cmd, "Blit to final RT", GetSampler(CustomSamplerId.BlitToFinalRT)))
// Planar and real time cubemap doesn't need post process and render in FP16
if (camera.cameraType == CameraType.Reflection)
// Simple blit
cmd.Blit(m_CameraColorBufferRT, BuiltinRenderTextureType.CameraTarget);
using (new ProfilingSample(cmd, "Blit to final RT", GetSampler(CustomSamplerId.BlitToFinalRT)))
{
// Simple blit
cmd.Blit(m_CameraColorBufferRT, BuiltinRenderTextureType.CameraTarget);
}
}
else
{
RenderVelocity(m_CullResults, hdCamera, renderContext, cmd); // Note we may have to render velocity earlier if we do temporalAO, temporal volumetric etc... Mean we will not take into account forward opaque in case of deferred rendering ?
else
{
RenderVelocity(m_CullResults, hdCamera, renderContext, cmd); // Note we may have to render velocity earlier if we do temporalAO, temporal volumetric etc... Mean we will not take into account forward opaque in case of deferred rendering ?
RenderGaussianPyramidColor(camera, cmd, renderContext, FullScreenDebugMode.FinalColorPyramid);
RenderGaussianPyramidColor(camera, cmd, renderContext, FullScreenDebugMode.FinalColorPyramid);
// TODO: Check with VFX team.
// Rendering distortion here have off course lot of artifact.
// But resolving at each objects that write in distortion is not possible (need to sort transparent, render those that do not distort, then resolve, then etc...)
// Instead we chose to apply distortion at the end after we cumulate distortion vector and desired blurriness.
AccumulateDistortion(m_CullResults, camera, renderContext, cmd);
RenderDistortion(cmd, m_Asset.renderPipelineResources);
// TODO: Check with VFX team.
// Rendering distortion here have off course lot of artifact.
// But resolving at each objects that write in distortion is not possible (need to sort transparent, render those that do not distort, then resolve, then etc...)
// Instead we chose to apply distortion at the end after we cumulate distortion vector and desired blurriness.
AccumulateDistortion(m_CullResults, camera, renderContext, cmd);
RenderDistortion(cmd, m_Asset.renderPipelineResources);
RenderPostProcesses(camera, cmd, postProcessLayer);
RenderPostProcesses(hdCamera, cmd, postProcessLayer);
}
}
RenderDebug(hdCamera, cmd);
RenderDebug(hdCamera, cmd);
// bind depth surface for editor grid/gizmo/selection rendering
if (camera.cameraType == CameraType.SceneView)
cmd.SetRenderTarget(BuiltinRenderTextureType.CameraTarget, m_CameraDepthStencilBufferRT);
// bind depth surface for editor grid/gizmo/selection rendering
if (camera.cameraType == CameraType.SceneView)
cmd.SetRenderTarget(BuiltinRenderTextureType.CameraTarget, m_CameraDepthStencilBufferRT);
renderContext.ExecuteCommandBuffer(cmd);
renderContext.ExecuteCommandBuffer(cmd);
}
}
}
void RenderOpaqueRenderList(CullResults cull,

ScriptableRenderContext renderContext,
CommandBuffer cmd,
ShaderPassName passName,
bool preRefractionQueue,
Material overrideMaterial = null,
bool preRefractionQueue = false)
Material overrideMaterial = null)
RenderTransparentRenderList(cull, camera, renderContext, cmd, m_SinglePassName,
rendererConfiguration, stateBlock, overrideMaterial, preRefractionQueue);
RenderTransparentRenderList(cull, camera, renderContext, cmd, m_SinglePassName, preRefractionQueue,
rendererConfiguration, stateBlock, overrideMaterial);
}
void RenderTransparentRenderList(CullResults cull,

ShaderPassName[] passNames,
bool preRefractionQueue,
Material overrideMaterial = null,
bool preRefractionQueue = false)
Material overrideMaterial = null
)
{
if (!m_CurrentDebugDisplaySettings.renderingDebugSettings.displayTransparentObjects)
return;

cmd.ClearRenderTarget(false, true, Color.clear);
// Only transparent object can render distortion vectors
RenderTransparentRenderList(cullResults, camera, renderContext, cmd, HDShaderPassNames.s_DistortionVectorsName, preRefractionQueue:true);
RenderTransparentRenderList(cullResults, camera, renderContext, cmd, HDShaderPassNames.s_DistortionVectorsName);
RenderTransparentRenderList(cullResults, camera, renderContext, cmd, HDShaderPassNames.s_DistortionVectorsName, true);
RenderTransparentRenderList(cullResults, camera, renderContext, cmd, HDShaderPassNames.s_DistortionVectorsName, false);
}
}

{
var size = new Vector4(m_CurrentWidth, m_CurrentHeight, 1f / m_CurrentWidth, 1f / m_CurrentHeight);
uint x, y, z;
resources.applyDistortionCS.GetKernelThreadGroupSizes(resources.applyDistortionKernel, out x, out y, out z);
cmd.SetComputeTextureParam(resources.applyDistortionCS, resources.applyDistortionKernel, HDShaderIDs._DistortionTexture, m_DistortionBufferRT);
cmd.SetComputeTextureParam(resources.applyDistortionCS, resources.applyDistortionKernel, HDShaderIDs._GaussianPyramidColorTexture, m_GaussianPyramidColorBufferRT);
cmd.SetComputeTextureParam(resources.applyDistortionCS, resources.applyDistortionKernel, HDShaderIDs._CameraColorTexture, m_CameraColorBufferRT);
cmd.SetComputeTextureParam(resources.applyDistortionCS, resources.applyDistortionKernel, HDShaderIDs._DepthTexture, GetDepthTexture());
cmd.SetComputeVectorParam(resources.applyDistortionCS, HDShaderIDs._Size, size);
cmd.SetComputeVectorParam(resources.applyDistortionCS, HDShaderIDs._ZBufferParams, Shader.GetGlobalVector(HDShaderIDs._ZBufferParams));
cmd.SetComputeVectorParam(resources.applyDistortionCS, HDShaderIDs._GaussianPyramidColorMipSize, Shader.GetGlobalVector(HDShaderIDs._GaussianPyramidColorMipSize));
m_applyDistortionCS.GetKernelThreadGroupSizes(m_applyDistortionKernel, out x, out y, out z);
cmd.SetComputeTextureParam(m_applyDistortionCS, m_applyDistortionKernel, HDShaderIDs._DistortionTexture, m_DistortionBufferRT);
cmd.SetComputeTextureParam(m_applyDistortionCS, m_applyDistortionKernel, HDShaderIDs._GaussianPyramidColorTexture, m_GaussianPyramidColorBufferRT);
cmd.SetComputeTextureParam(m_applyDistortionCS, m_applyDistortionKernel, HDShaderIDs._CameraColorTexture, m_CameraColorBufferRT);
cmd.SetComputeTextureParam(m_applyDistortionCS, m_applyDistortionKernel, HDShaderIDs._DepthTexture, GetDepthTexture());
cmd.SetComputeVectorParam(m_applyDistortionCS, HDShaderIDs._Size, size);
cmd.SetComputeVectorParam(m_applyDistortionCS, HDShaderIDs._ZBufferParams, Shader.GetGlobalVector(HDShaderIDs._ZBufferParams));
cmd.SetComputeVectorParam(m_applyDistortionCS, HDShaderIDs._GaussianPyramidColorMipSize, Shader.GetGlobalVector(HDShaderIDs._GaussianPyramidColorMipSize));
cmd.DispatchCompute(resources.applyDistortionCS, resources.applyDistortionKernel, Mathf.CeilToInt(size.x / x), Mathf.CeilToInt(size.y / y), 1);
cmd.DispatchCompute(m_applyDistortionCS, m_applyDistortionKernel, Mathf.CeilToInt(size.x / x), Mathf.CeilToInt(size.y / y), 1);
}
}

RenderOpaqueRenderList(cull, camera, renderContext, cmd, m_DepthOnlyPassNames, 0, renderQueueRange);
}
}
// Render transparent depth prepass after opaque one
RenderTransparentRenderList(cull, camera, renderContext, cmd, m_TransparentDepthOnlyPassNames, true);
RenderTransparentRenderList(cull, camera, renderContext, cmd, m_TransparentDepthOnlyPassNames, false);
}
}

RenderOpaqueRenderList(cull, hdCamera.camera, renderContext, cmd, m_AllForwardDebugDisplayPassNames, m_currentRendererConfigurationBakedLighting);
// Render forward transparent
RenderTransparentRenderList(cull, hdCamera.camera, renderContext, cmd, m_AllForwardDebugDisplayPassNames, m_currentRendererConfigurationBakedLighting);
RenderTransparentRenderList(cull, hdCamera.camera, renderContext, cmd, m_AllForwardDebugDisplayPassNames, true, m_currentRendererConfigurationBakedLighting);
RenderTransparentRenderList(cull, hdCamera.camera, renderContext, cmd, m_AllForwardDebugDisplayPassNames, false, m_currentRendererConfigurationBakedLighting);
}
}

else
{
var passNames = m_CurrentDebugDisplaySettings.IsDebugDisplayEnabled() ? m_AllTransparentDebugDisplayPassNames : m_AllTransparentPassNames;
RenderTransparentRenderList(cullResults, camera, renderContext, cmd, passNames, m_currentRendererConfigurationBakedLighting, preRefractionQueue: pass == ForwardPass.PreRefraction);
RenderTransparentRenderList(cullResults, camera, renderContext, cmd, passNames, pass == ForwardPass.PreRefraction, m_currentRendererConfigurationBakedLighting);
}
}
}

}
else
{
RenderTransparentRenderList(cullResults, camera, renderContext, cmd, m_ForwardErrorPassNames, 0,
null, m_ErrorMaterial, pass == ForwardPass.PreRefraction);
RenderTransparentRenderList(cullResults, camera, renderContext, cmd, m_ForwardErrorPassNames, pass == ForwardPass.PreRefraction, 0, null, m_ErrorMaterial);
}
}
}

}
}
void RenderPostProcesses(Camera camera, CommandBuffer cmd, PostProcessLayer layer)
void RenderPostProcesses(HDCamera hdcamera, CommandBuffer cmd, PostProcessLayer layer)
{
using (new ProfilingSample(cmd, "Post-processing", GetSampler(CustomSamplerId.PostProcessing)))
{

cmd.SetGlobalTexture(HDShaderIDs._CameraDepthTexture, m_CameraDepthStencilBuffer);
cmd.SetGlobalTexture(HDShaderIDs._CameraMotionVectorsTexture, m_VelocityBufferRT);
var context = m_PostProcessContext;
var context = hdcamera.postprocessRenderContext;
context.camera = camera;
context.camera = hdcamera.camera;
context.sourceFormat = RenderTextureFormat.ARGBHalf;
context.flip = true;

4
ScriptableRenderPipeline/HDRenderPipeline/HDRenderPipelineAsset.cs


public Material defaultDiffuseMaterial
{
get { return m_DefaultDiffuseMaterial; }
private set { m_DefaultDiffuseMaterial = value; }
set { m_DefaultDiffuseMaterial = value; }
private set { m_DefaultShader = value; }
set { m_DefaultShader = value; }
}
public override Shader GetDefaultShader()

2
ScriptableRenderPipeline/HDRenderPipeline/Lighting/Deferred.shader


// Include
//-------------------------------------------------------------------------------------
#include "../../Core/ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/Common.hlsl"
#include "../Debug/DebugDisplay.hlsl"
// Note: We have fix as guidelines that we have only one deferred material (with control of GBuffer enabled). Mean a users that add a new

15
ScriptableRenderPipeline/HDRenderPipeline/Lighting/LightDefinition.cs


public Vector3 forward;
public int cookieIndex; // -1 if unused
public Vector3 right; // Rescaled by (2 / shapeLenght)
public Vector3 right; // Rescaled by (2 / shapeLength)
public bool dynamicShadowCasterOnly; // Use with ShadowMask feature
public bool dynamicShadowCasterOnly; // Use with ShadowMask feature
public Vector4 shadowMaskSelector; // Use with ShadowMask feature
};

public Vector3 forward;
public int cookieIndex; // -1 if unused
public Vector3 right; // If spot: rescaled by cot(outerHalfAngle); if projector: rescaled by (2 / shapeLenght)
public Vector3 right; // If spot: rescaled by cot(outerHalfAngle); if projector: rescaled by (2 / shapeLength)
public Vector3 up; // If spot: rescaled by cot(outerHalfAngle); if projector: rescaled by * (2 / shapeWidth)
public Vector3 up; // If spot: rescaled by cot(outerHalfAngle); if projector: rescaled by (2 / shapeWidth)
public float diffuseScale;
public float angleScale; // Spot light

public Vector2 size; // Used by area, frustum projector and spot lights (x = cot(outerHalfAngle))
public Vector4 shadowMaskSelector; // Use with ShadowMask feature
public Vector2 size; // Used by area and pyramid projector lights
public Vector4 shadowMaskSelector; // Use with ShadowMask feature
[GenerateHLSL]

20
ScriptableRenderPipeline/HDRenderPipeline/Lighting/LightDefinition.cs.hlsl


float specularScale;
float3 up;
float diffuseScale;
bool dynamicShadowCasterOnly;
bool dynamicShadowCasterOnly;
float4 shadowMaskSelector;
};

float angleOffset;
float shadowDimmer;
bool dynamicShadowCasterOnly;
float4 shadowMaskSelector;
float4 shadowMaskSelector;
};
// Generated from UnityEngine.Experimental.Rendering.HDPipeline.EnvLightData

{
return value.diffuseScale;
}
bool GetDynamicShadowCasterOnly(DirectionalLightData value)
{
return value.dynamicShadowCasterOnly;
}
float2 GetFadeDistanceScaleAndBias(DirectionalLightData value)
{
return value.fadeDistanceScaleAndBias;

return value.unused0;
}
bool GetDynamicShadowCasterOnly(DirectionalLightData value)
{
return value.dynamicShadowCasterOnly;
}
float4 GetShadowMaskSelector(DirectionalLightData value)
{
return value.shadowMaskSelector;

{
return value.dynamicShadowCasterOnly;
}
float4 GetShadowMaskSelector(LightData value)
{
return value.shadowMaskSelector;
}
float2 GetSize(LightData value)
{
return value.size;

float GetMinRoughness(LightData value)
{
return value.minRoughness;
}
float4 GetShadowMaskSelector(LightData value)
{
return value.shadowMaskSelector;
}
//

14
ScriptableRenderPipeline/HDRenderPipeline/Lighting/Lighting.hlsl


#ifndef UNITY_LIGHTING_INCLUDED
#define UNITY_LIGHTING_INCLUDED
#include "../../Core/ShaderLibrary/CommonLighting.hlsl"
#include "../../Core/ShaderLibrary/CommonShadow.hlsl"
#include "../../Core/ShaderLibrary/Sampling.hlsl"
#include "../../Core/ShaderLibrary/AreaLighting.hlsl"
#include "../../Core/ShaderLibrary/ImageBasedLighting.hlsl"
#include "ShaderLibrary/CommonLighting.hlsl"
#include "ShaderLibrary/CommonShadow.hlsl"
#include "ShaderLibrary/Sampling.hlsl"
#include "ShaderLibrary/AreaLighting.hlsl"
#include "ShaderLibrary/ImageBasedLighting.hlsl"
// The light loop (or lighting architecture) is in charge to:
// - Define light list

#include "../Lighting/LightDefinition.cs.hlsl"
#include "../Lighting/LightUtilities.hlsl"
#define SHADOW_TILEPASS
#include "../../Core/ShaderLibrary/Shadow/Shadow.hlsl"
#undef SHADOW_TILEPASS
#include "TilePass/Shadow.hlsl"
#if defined(LIGHTLOOP_SINGLE_PASS) || defined(LIGHTLOOP_TILE_PASS)
#include "../Lighting/TilePass/TilePass.hlsl"

4
ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/ClusteredUtils.hlsl


float SuggestLogBase50(float tileFarPlane)
{
const float C = (float)(1 << g_iLog2NumClusters);
float rangeFittedDistance = clamp((tileFarPlane - g_fNearPlane) / (g_fFarPlane - g_fNearPlane), FLT_EPSILON, 1.0);
float rangeFittedDistance = clamp((tileFarPlane - g_fNearPlane) / (g_fFarPlane - g_fNearPlane), FLT_EPS, 1.0);
float suggested_base = pow((1.0 + sqrt(max(0.0, 1.0 - 4.0 * rangeFittedDistance * (1.0 - rangeFittedDistance)))) / (2.0 * rangeFittedDistance), 2.0 / C); //
return max(g_fClustBase, suggested_base);
}

{
const float C = (float)(1 << g_iLog2NumClusters);
float rangeFittedDistance = clamp((tileFarPlane - g_fNearPlane) / (g_fFarPlane - g_fNearPlane), FLT_EPSILON, 1.0);
float rangeFittedDistance = clamp((tileFarPlane - g_fNearPlane) / (g_fFarPlane - g_fNearPlane), FLT_EPS, 1.0);
float suggested_base = pow((1 / 2.3) * max(0.0, (0.8 / rangeFittedDistance) - 1), 4.0 / (C * 2)); // approximate inverse of d*x^4 + (-x) + (1-d) = 0 - d is normalized distance
return max(g_fClustBase, suggested_base);
}

2
ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/Deferred.compute


// Include
//-------------------------------------------------------------------------------------
#include "../../../Core/ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/Common.hlsl"
#include "../../Debug/DebugDisplay.hlsl"
// Note: We have fix as guidelines that we have only one deferred material (with control of GBuffer enabled). Mean a users that add a new

2
ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/DeferredDirectionalShadow.compute


// Each #kernel tells which function to compile; you can have many kernels
#pragma kernel DeferredDirectionalShadow
#include "../../../Core/ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/Common.hlsl"
#include "../../ShaderVariables.hlsl"
#include "../../Lighting/Lighting.hlsl"

9
ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/ShadowContext.hlsl


// This can be custom for each project and needs to be in sync with the ShadowMgr
#ifndef TILEPASS_SHADOW_CONTEXT_HLSL
#define TILEPASS_SHADOW_CONTEXT_HLSL
#undef SHADOW_OPTIMIZE_REGISTER_USAGE
SHADOWCONTEXT_DECLARE( SHADOWCONTEXT_MAX_TEX2DARRAY, SHADOWCONTEXT_MAX_TEXCUBEARRAY, SHADOWCONTEXT_MAX_COMPSAMPLER, SHADOWCONTEXT_MAX_SAMPLER );
#include "ShaderLibrary/Shadow/Shadow.hlsl"
TEXTURE2D_ARRAY(_ShadowmapExp_VSM_0);
SAMPLER2D(sampler_ShadowmapExp_VSM_0);

return sc;
}
#endif // TILEPASS_SHADOW_CONTEXT_HLSL

4
ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/ShadowContext.hlsl.meta


fileFormatVersion: 2
guid: b0e81431fe3a7604fb9f9dd2a96bd7e0
timeCreated: 1491321445
licenseType: Pro
externalObjects: {}
nonModifiableTextures: []
userData:
assetBundleName:
assetBundleVariant:

4
ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/SortingComputeUtils.hlsl


// have to make this sort routine a macro unfortunately because hlsl doesn't take
// groupshared memory of unspecified length as an input parameter to a function.
// maxcapacity_in must be a power of two.
// all data from length_in and up to closest power of two will be filled with 0xffffffff
// all data from length_in and up to closest power of two will be filled with UINT_MAX
for(int t=length+localThreadID; t<N; t+=nrthreads) { data[t]=0xffffffff; } \
for(int t=length+localThreadID; t<N; t+=nrthreads) { data[t]=UINT_MAX; } \
GroupMemoryBarrierWithGroupSync(); \
\
for(int k=2; k<=N; k=2*k) \

2
ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/TilePass.cs


scInit.resourceBinder = binder;
m_ShadowMgr = new ShadowManager(shadowSettings, ref scInit, m_Shadowmaps);
// set global overrides - these need to match the override specified in ShadowDispatch.hlsl
// set global overrides - these need to match the override specified in TilePass/Shadow.hlsl
bool useGlobalOverrides = true;
m_ShadowMgr.SetGlobalShadowOverride( GPUShadowType.Point , ShadowAlgorithm.PCF, ShadowVariant.V4, ShadowPrecision.High, useGlobalOverrides );
m_ShadowMgr.SetGlobalShadowOverride( GPUShadowType.Spot , ShadowAlgorithm.PCF, ShadowVariant.V4, ShadowPrecision.High, useGlobalOverrides );

6
ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/TilePassLoop.hlsl


logBase = g_logBaseBuffer[tileIndex.y * _NumTileClusteredX + tileIndex.x];
}
int clustIdx = SnapToClusterIdxFlex(posInput.depthVS, logBase, g_isLogBaseBufferEnabled != 0);
int clustIdx = SnapToClusterIdxFlex(posInput.linearDepth, logBase, g_isLogBaseBufferEnabled != 0);
int nrClusters = (1 << g_iLog2NumClusters);
const int idx = ((lightCategory * nrClusters + clustIdx) * _NumTileClusteredY + tileIndex.y) * _NumTileClusteredX + tileIndex.x;

for (i = 0; i < punctualLightCount; ++i)
{
int punctualIndex = FetchIndex(punctualLightStart, i);
DirectLighting lighting = EvaluateBSDF_Punctual(context, V, posInput, preLightData, _LightDatas[punctualIndex], bsdfData, bakeLightingData, _LightDatas[punctualIndex].lightType);
DirectLighting lighting = EvaluateBSDF_Punctual(context, V, posInput, preLightData, _LightDatas[punctualIndex], bsdfData, bakeLightingData);
AccumulateDirectLighting(lighting, aggregateLighting);
}

{
DirectLighting lighting = EvaluateBSDF_Punctual(context, V, posInput, preLightData, _LightDatas[i], bsdfData, bakeLightingData, _LightDatas[i].lightType);
DirectLighting lighting = EvaluateBSDF_Punctual(context, V, posInput, preLightData, _LightDatas[i], bsdfData, bakeLightingData);
AccumulateDirectLighting(lighting, aggregateLighting);
}

6
ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/lightlistbuild-bigtile.compute


#pragma kernel BigTileLightListGen
#include "../../../Core/ShaderLibrary/common.hlsl"
#include "ShaderLibrary/common.hlsl"
#include "TilePass.cs.hlsl"
#include "LightingConvexHullUtils.hlsl"
#include "SortingComputeUtils.hlsl"

SFiniteLightBound lgtDat = g_data[lightsListLDS[l]];
if( !DoesSphereOverlapTile(V, halfTileSizeAtZDistOne, lgtDat.center.xyz, lgtDat.radius, g_isOrthographic!=0) )
lightsListLDS[l]=0xffffffff;
lightsListLDS[l]=UINT_MAX;
}
#if !defined(SHADER_API_XBOXONE) && !defined(SHADER_API_PSSL)

int resf = (positive>0 && negative>0) ? 0 : (positive>0 ? 1 : (negative>0 ? (-1) : 0));
bool bFoundSepPlane = (resh*resf)<0;
if(bFoundSepPlane) lightsListLDS[l]=0xffffffff;
if(bFoundSepPlane) lightsListLDS[l]=UINT_MAX;
}
}
}

6
ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/lightlistbuild-clustered.compute


#pragma kernel TileLightListGen_DepthRT_MSAA_SrcBigTile LIGHTLISTGEN=TileLightListGen_DepthRT_MSAA_SrcBigTile ENABLE_DEPTH_TEXTURE_BACKPLANE MSAA_ENABLED USE_TWO_PASS_TILED_LIGHTING
#pragma kernel ClearAtomic
#include "../../../Core/ShaderLibrary/common.hlsl"
#include "ShaderLibrary/common.hlsl"
#include "ShaderBase.hlsl"
#include "TilePass.cs.hlsl"
#include "LightingConvexHullUtils.hlsl"

SFiniteLightBound lgtDat = g_data[coarseList[l]];
if( !DoesSphereOverlapTile(V, halfTileSizeAtZDistOne, lgtDat.center.xyz, lgtDat.radius, g_isOrthographic!=0) )
coarseList[l]=0xffffffff;
coarseList[l]=UINT_MAX;
}
#if !defined(SHADER_API_XBOXONE) && !defined(SHADER_API_PSSL)

int offs = 0;
for(int l=0; l<iNrCoarseLights; l++)
{
if(coarseList[l]!=0xffffffff)
if(coarseList[l]!=UINT_MAX)
coarseList[offs++] = coarseList[l];
}
lightOffsSph = offs;

2
ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/lightlistbuild.compute


//#pragma #pragma enable_d3d11_debug_symbols
#include "../../../Core/ShaderLibrary/common.hlsl"
#include "ShaderLibrary/common.hlsl"
#include "ShaderBase.hlsl"
#include "TilePass.cs.hlsl"
#include "LightingConvexHullUtils.hlsl"

2
ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/materialflags.compute


// #pragma enable_d3d11_debug_symbols
#include "../../../Core/ShaderLibrary/common.hlsl"
#include "ShaderLibrary/common.hlsl"
#include "ShaderBase.hlsl"
#include "TilePass.cs.hlsl"

4
ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/scrbound.compute


#pragma kernel ScreenBoundsAABB
#include "../../../Core/ShaderLibrary/common.hlsl"
#include "ShaderLibrary/common.hlsl"
#include "TilePass.cs.hlsl"
uniform int g_isOrthographic;

float fW = vPnts[k].w;
float fS = fW<0 ? -1 : 1;
float fWabs = fW<0 ? (-fW) : fW;
fW = fS * (fWabs<FLT_EPSILON ? FLT_EPSILON : fWabs);
fW = fS * (fWabs<FLT_EPS ? FLT_EPS : fWabs);
float3 vP = float3(vPnts[k].x/fW, vPnts[k].y/fW, vPnts[k].z/fW);
if(k==0) { vMin=vP; vMax=vP; }

17
ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/Shadow.hlsl


// This file is empty by default.
// Project specific file to override the default shadow sampling routines.
// We need to define which dispatchers we're overriding, otherwise the compiler will pick default implementations which will lead to compilation errors.
// Check Shadow.hlsl right below where this header is included for the individual defines.
#ifndef TILEPASS_SHADOW_HLSL
#define TILEPASS_SHADOW_HLSL
#define SHADOW_DISPATCH_USE_CUSTOM_DIRECTIONAL
#define SHADOW_DISPATCH_USE_CUSTOM_PUNCTUAL
#include "ShadowContext.hlsl"
// This is an example of how to override the default dynamic resource dispatcher
// by hardcoding the resources used and calling the shadow sampling routines that take an explicit texture and sampler.

#define SHADOW_DISPATCH_USE_CUSTOM_DIRECTIONAL // enables hardcoded resources and algorithm for directional lights
#define SHADOW_DISPATCH_USE_CUSTOM_PUNCTUAL // enables hardcoded resources and algorithm for punctual lights
//#define SHADOW_DISPATCH_USE_SEPARATE_CASCADE_ALGOS // enables separate cascade sampling variants for each cascade
//#define SHADOW_DISPATCH_USE_SEPARATE_PUNC_ALGOS // enables separate resources and algorithms for spot and point lights

return EvalShadow_SpotDepth( shadowContext, algo, tex, compSamp, positionWS, normalWS, shadowDataIndex, L );
}
#else
// example for choosing the same algo
// example for choosing the same algo
Texture2DArray tex = shadowContext.tex2DArray[SHADOW_DISPATCH_PUNC_TEX];
SamplerComparisonState compSamp = shadowContext.compSamplers[SHADOW_DISPATCH_PUNC_SMP];
uint algo = SHADOW_DISPATCH_PUNC_ALG;

#ifdef SHADOW_DISPATCH_USE_SEPARATE_PUNC_ALGOS
#undef SHADOW_DISPATCH_USE_SEPARATE_PUNC_ALGOS
#endif
#endif // TILEPASS_SHADOW_HLSL

6
ScriptableRenderPipeline/HDRenderPipeline/Material/LayeredLit/LayeredLit.shader


// Include
//-------------------------------------------------------------------------------------
#include "../../../Core/ShaderLibrary/common.hlsl"
#include "../../../Core/ShaderLibrary/Wind.hlsl"
#include "ShaderLibrary/common.hlsl"
#include "ShaderLibrary/Wind.hlsl"
#include "../../ShaderPass/FragInputs.hlsl"
#include "../../ShaderPass/ShaderPass.cs.hlsl"

ZWrite On
ZTest LEqual
// When alpha test is enabled, we should not write into the color buffer
ColorMask 0
HLSLPROGRAM

ZWrite On
// When alpha test is enabled, we should not write into the color buffer
ColorMask 0
HLSLPROGRAM

2
ScriptableRenderPipeline/HDRenderPipeline/Material/LayeredLit/LayeredLitDataDisplacement.hlsl


return BlendLayeredScalar(height0, height1, height2, height3, weights);
}
#include "../../../Core/ShaderLibrary/PerPixelDisplacement.hlsl"
#include "ShaderLibrary/PerPixelDisplacement.hlsl"
#endif // defined(_PIXEL_DISPLACEMENT) && LAYERS_HEIGHTMAP_ENABLE

10
ScriptableRenderPipeline/HDRenderPipeline/Material/LayeredLit/LayeredLitTessellation.shader


// Include
//-------------------------------------------------------------------------------------
#include "../../../Core/ShaderLibrary/common.hlsl"
#include "../../../Core/ShaderLibrary/Wind.hlsl"
#include "../../../Core/ShaderLibrary/GeometricTools.hlsl"
#include "../../../Core/ShaderLibrary/tessellation.hlsl"
#include "ShaderLibrary/common.hlsl"
#include "ShaderLibrary/Wind.hlsl"
#include "ShaderLibrary/GeometricTools.hlsl"
#include "ShaderLibrary/tessellation.hlsl"
#include "../../ShaderPass/FragInputs.hlsl"
#include "../../ShaderPass/ShaderPass.cs.hlsl"

ZWrite On
ZTest LEqual
// When alpha test is enabled, we should not write into the color buffer
ColorMask 0
HLSLPROGRAM

ZWrite On
// When alpha test is enabled, we should not write into the color buffer
ColorMask 0
HLSLPROGRAM

232
ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Lit.hlsl


// Define refraction keyword helpers
#define HAS_REFRACTION (defined(_REFRACTION_PLANE) || defined(_REFRACTION_SPHERE))
#if HAS_REFRACTION
# include "../../../Core/ShaderLibrary/Refraction.hlsl"
# include "ShaderLibrary/Refraction.hlsl"
# if defined(_REFRACTION_PLANE)
# define REFRACTION_MODEL(V, posInputs, bsdfData) RefractionModelPlane(V, posInputs.positionWS, bsdfData.normalWS, bsdfData.ior, bsdfData.thickness)

float depth = LinearEyeDepth(pyramidDepth, _ZBufferParams);
// Distance from point to the back plane
float depthFromPositionInput = depth - posInputs.depthVS;
float depthFromPositionInput = depth - posInputs.linearDepth;
float offset = dot(-V, positionWS - posInputs.positionWS);
float depthFromPosition = depthFromPositionInput - offset;

// If a user do a lighting architecture without material classification, this can be remove
#include "../../Lighting/TilePass/TilePass.cs.hlsl"
static int g_FeatureFlags = 0xFFFFFFFF;
static uint g_FeatureFlags = UINT_MAX;
bool HasMaterialFeatureFlag(int flag)
bool HasMaterialFeatureFlag(uint flag)
{
return ((g_FeatureFlags & flag) != 0);
}

// The material features system for material classification must allow compile time optimization (i.e everything should be static)
// Note that as we store materialId for Aniso based on content of RT2 we need to add few extra condition.
// The code is also call from MaterialFeatureFlagsFromGBuffer, so must work fully dynamic if featureFlags is 0xFFFFFFFF
// The code is also call from MaterialFeatureFlagsFromGBuffer, so must work fully dynamic if featureFlags is UINT_MAX
int supportsStandard = HasMaterialFeatureFlag(MATERIALFEATUREFLAGS_LIT_STANDARD);
int supportsSSS = HasMaterialFeatureFlag(MATERIALFEATUREFLAGS_LIT_SSS);
int supportsAniso = HasMaterialFeatureFlag(MATERIALFEATUREFLAGS_LIT_ANISO);

DecodeFromGBuffer(
unPositionSS,
0xFFFFFFFF,
UINT_MAX,
bsdfData,
unused
);

float diffuseFGD;
// Area lights (17 VGPRs)
// TODO: 'orthoBasisViewNormal' is just a rotation around the normal and should thus be just 1x VGPR.
float3x3 orthoBasisViewNormal; // Right-handed view-dependent orthogonal basis around the normal (6x VGPRs)
float3x3 ltcTransformDiffuse; // Inverse transformation for Lambertian or Disney Diffuse (4x VGPRs)
float3x3 ltcTransformSpecular; // Inverse transformation for GGX (4x VGPRs)

float NdotL = saturate(dot(bsdfData.coatNormalWS, L));
float NdotV = preLightData.coatNdotV;
float LdotV = dot(L, V);
float invLenLV = rsqrt(max(2 * LdotV + 2, FLT_EPSILON));
float invLenLV = rsqrt(max(2 * LdotV + 2, FLT_EPS));
float NdotH = saturate((NdotL + NdotV) * invLenLV);
float LdotH = saturate(invLenLV * LdotV + invLenLV);

float NdotL = saturate(dot(bsdfData.normalWS, L)); // Must have the same value without the clamp
float NdotV = preLightData.NdotV; // Get the unaltered (geometric) version
float LdotV = dot(L, V);
float invLenLV = rsqrt(max(2 * LdotV + 2, FLT_EPSILON)); // invLenLV = rcp(length(L + V)) - caution about the case where V and L are opposite, it can happen, use max to avoid this
float invLenLV = rsqrt(max(2 * LdotV + 2, FLT_EPS)); // invLenLV = rcp(length(L + V)) - caution about the case where V and L are opposite, it can happen, use max to avoid this
float NdotH = saturate((NdotL + NdotV) * invLenLV);
float LdotH = saturate(invLenLV * LdotV + invLenLV);

#elif LIT_DIFFUSE_GGX_BRDF
float3 diffuseTerm = DiffuseGGX(bsdfData.diffuseColor, NdotV, NdotL, NdotH, LdotV, bsdfData.roughness);
#else
// A note on subsurface scattering.
// A note on subsurface scattering: [SSS-NOTE-TRSM]
// The correct way to handle SSS is to transmit light inside the surface, perform SSS,
// and then transmit it outside towards the viewer.
// Transmit(X) = F_Transm_Schlick(F0, F90, NdotX), where F0 = 0, F90 = 1.

float3 EvaluateTransmission(BSDFData bsdfData, float intensity, float shadow)
{
// For low thickness, we can reuse the shadowing status for the back of the object.
shadow = bsdfData.useThinObjectMode ? shadow : 1;
shadow = bsdfData.useThinObjectMode ? shadow : 1.0;
float backLight = intensity * shadow;

//-----------------------------------------------------------------------------
// EvaluateBSDF_Directional (supports directional and box projector lights)
// EvaluateBSDF_Directional
// Compute the NDC position (in [-1, 1]^2) by projecting 'positionWS' onto the near plane.
// Compute the CS position (in [-1, 1]^2) by projecting 'positionWS' onto the near plane.
float2 positionNDC = positionLS.xy;
float2 positionCS = positionLS.xy;
float2 coord = positionNDC * 0.5 + 0.5;
float2 positionNDC = positionCS * 0.5 + 0.5;
coord = frac(coord);
isInBounds = true;
positionNDC = frac(positionNDC);
isInBounds = true;
isInBounds = Max3(abs(positionNDC.x), abs(positionNDC.y), 1.0 - positionLS.z) <= 1.0;
isInBounds = Max3(abs(positionCS.x), abs(positionCS.y), 1.0 - positionLS.z) <= 1.0;
float4 cookie = SampleCookie2D(lightLoopContext, coord, lightData.cookieIndex);
float4 cookie = SampleCookie2D(lightLoopContext, positionNDC, lightData.cookieIndex);
cookie.a = isInBounds ? cookie.a : 0.0;
cookie.a = isInBounds ? cookie.a : 0;
DirectLighting EvaluateBSDF_Directional( LightLoopContext lightLoopContext,
float3 V, PositionInputs posInput, PreLightData preLightData,
DirectionalLightData lightData, BSDFData bsdfData, BakeLightingData bakeLightingData)
// None of the outputs are premultiplied.
void EvaluateLight_Directional(LightLoopContext lightLoopContext, PositionInputs posInput,
DirectionalLightData lightData, BakeLightingData bakeLightingData,
float3 N, float3 L,
out float3 color, out float attenuation, out float shadow)
DirectLighting lighting;
ZERO_INITIALIZE(DirectLighting, lighting);
float shadowMask = 1.0;
float3 L = -lightData.forward; // Lights are pointing backward in Unity
float NdotL = dot(bsdfData.normalWS, L);
float illuminance = saturate(NdotL);
color = lightData.color;
attenuation = 1.0;
shadow = 1.0;
float shadow = 1.0;
float shadowMask = 1.0;
#ifdef SHADOWS_SHADOWMASK
// shadowMaskSelector.x is -1 if there is no shadow mask
// Note that we override shadow value (in case we don't have any dynamic shadow)

[branch] if (lightData.shadowIndex >= 0)
{
#ifdef _SURFACE_TYPE_TRANSPARENT
shadow = GetDirectionalShadowAttenuation(lightLoopContext.shadowContext, positionWS, bsdfData.normalWS, lightData.shadowIndex, L, posInput.unPositionSS);
shadow = GetDirectionalShadowAttenuation(lightLoopContext.shadowContext, positionWS, N, lightData.shadowIndex, L, posInput.unPositionSS);
float fade = saturate(posInput.depthVS * lightData.fadeDistanceScaleAndBias.x + lightData.fadeDistanceScaleAndBias.y);
float fade = saturate(posInput.linearDepth * lightData.fadeDistanceScaleAndBias.x + lightData.fadeDistanceScaleAndBias.y);
// See comment in EvaluateBSDF_Punctual
shadow = lightData.dynamicShadowCasterOnly ? min(shadowMask, shadow) : shadow;

#endif
}
illuminance *= shadow;
float3 lightToSurface = positionWS - lightData.positionWS;
float4 cookie = EvaluateCookie_Directional(lightLoopContext, lightData, lightToSurface);
float3 lightToSample = positionWS - lightData.positionWS;
float4 cookie = EvaluateCookie_Directional(lightLoopContext, lightData, lightToSample);
// Premultiply.
lightData.color *= cookie.rgb;
lightData.diffuseScale *= cookie.a;
lightData.specularScale *= cookie.a;
color *= cookie.rgb;
attenuation *= cookie.a;
}
[branch] if (illuminance > 0.0)
DirectLighting EvaluateBSDF_Directional(LightLoopContext lightLoopContext,
float3 V, PositionInputs posInput, PreLightData preLightData,
DirectionalLightData lightData, BSDFData bsdfData,
BakeLightingData bakeLightingData)
{
DirectLighting lighting;
ZERO_INITIALIZE(DirectLighting, lighting);
float3 positionWS = posInput.positionWS;
float3 N = bsdfData.normalWS;
float3 L = -lightData.forward; // Lights point backward in Unity
float NdotL = dot(N, L);
float3 color; float attenuation, shadow;
EvaluateLight_Directional(lightLoopContext, posInput, lightData, bakeLightingData, N, L,
color, attenuation, shadow);
float intensity = shadow * attenuation * saturate(NdotL);
[branch] if (intensity > 0.0)
lighting.diffuse *= illuminance * lightData.diffuseScale;
lighting.specular *= illuminance * lightData.specularScale;
lighting.diffuse *= intensity * lightData.diffuseScale;
lighting.specular *= intensity * lightData.specularScale;
}
[branch] if (bsdfData.enableTransmission)

// Apply the BSDF to attenuation. See also: [SSS-NOTE-TRSM]
illuminance = Lambert() * wrappedNdotL;
attenuation *= Lambert();
illuminance = INV_PI * F_Transm_Schlick(0, 0.5, NdotV) * F_Transm_Schlick(0, 0.5, tNdotL) * wrappedNdotL;
attenuation *= INV_PI * F_Transm_Schlick(0, 0.5, NdotV) * F_Transm_Schlick(0, 0.5, tNdotL);
// Shadowing is applied inside EvaluateTransmission().
intensity = attenuation * wrappedNdotL;
lighting.diffuse += EvaluateTransmission(bsdfData, illuminance * lightData.diffuseScale, shadow);
lighting.diffuse += EvaluateTransmission(bsdfData, intensity * lightData.diffuseScale, shadow);
// Save ALU by applying 'lightData.color' only once.
lighting.diffuse *= lightData.color;
lighting.specular *= lightData.color;
// Save ALU by applying light and cookie colors only once.
lighting.diffuse *= color;
lighting.specular *= color;
return lighting;
}

{
// Compute the NDC position (in [-1, 1]^2) by projecting 'positionWS' onto the plane at 1m distance.
// Box projector lights require no perspective division.
float perspectiveZ = (lightType != GPULIGHTTYPE_PROJECTOR_BOX) ? positionLS.z : 1;
float2 positionNDC = positionLS.xy / perspectiveZ;
bool isInBounds = Max3(abs(positionNDC.x), abs(positionNDC.y), 1 - positionLS.z) <= 1;
float perspectiveZ = (lightType != GPULIGHTTYPE_PROJECTOR_BOX) ? positionLS.z : 1.0;
float2 positionCS = positionLS.xy / perspectiveZ;
bool isInBounds = Max3(abs(positionCS.x), abs(positionCS.y), 1.0 - positionLS.z) <= 1.0;
float2 coord = positionNDC * 0.5 + 0.5;
float2 positionNDC = positionCS * 0.5 + 0.5;
cookie = SampleCookie2D(lightLoopContext, coord, lightData.cookieIndex);
cookie = SampleCookie2D(lightLoopContext, positionNDC, lightData.cookieIndex);
cookie.a = isInBounds ? cookie.a : 0;
}

return attenuation * GetAngleAttenuation(L, -lightData.forward, lightData.angleScale, lightData.angleOffset);
}
DirectLighting EvaluateBSDF_Punctual( LightLoopContext lightLoopContext,
float3 V, PositionInputs posInput,
PreLightData preLightData, LightData lightData, BSDFData bsdfData, BakeLightingData bakeLightingData, int GPULightType)
// None of the outputs are premultiplied.
void EvaluateLight_Punctual(LightLoopContext lightLoopContext, PositionInputs posInput,
LightData lightData, BakeLightingData bakeLightingData,
float3 N, float3 L, float distSq,
out float3 color, out float attenuation, out float shadow)
DirectLighting lighting;
ZERO_INITIALIZE(DirectLighting, lighting);
int lightType = GPULightType;
// All punctual light type in the same formula, attenuation is neutral depends on light type.
// light.positionWS is the normalize light direction in case of directional light and invSqrAttenuationRadius is 0
// mean dot(unL, unL) = 1 and mean GetDistanceAttenuation() will return 1
// For point light and directional GetAngleAttenuation() return 1
float shadowMask = 1.0;
float3 lightToSurface = positionWS - lightData.positionWS;
float3 unL = -lightToSurface;
float distSq = dot(unL, unL);
float dist = sqrt(distSq);
float3 L = (lightType != GPULIGHTTYPE_PROJECTOR_BOX) ? unL * rsqrt(distSq) : -lightData.forward;
float NdotL = dot(bsdfData.normalWS, L);
float illuminance = saturate(NdotL);
color = lightData.color;
attenuation = GetPunctualShapeAttenuation(lightData, L, distSq);
shadow = 1.0;
float attenuation = GetPunctualShapeAttenuation(lightData, L, distSq);
// Premultiply.
lightData.diffuseScale *= attenuation;
lightData.specularScale *= attenuation;
float shadow = 1.0;
float shadowMask = 1.0;
#ifdef SHADOWS_SHADOWMASK
// shadowMaskSelector.x is -1 if there is no shadow mask
// Note that we override shadow value (in case we don't have any dynamic shadow)

{
// TODO: make projector lights cast shadows.
float3 offset = float3(0.0, 0.0, 0.0); // GetShadowPosOffset(nDotL, normal);
float4 L_dist = { L, dist };
shadow = GetPunctualShadowAttenuation(lightLoopContext.shadowContext, positionWS + offset, bsdfData.normalWS, lightData.shadowIndex, L_dist, posInput.unPositionSS);
float4 L_dist = float4(L, sqrt(distSq));
shadow = GetPunctualShadowAttenuation(lightLoopContext.shadowContext, positionWS + offset, N, lightData.shadowIndex, L_dist, posInput.unPositionSS);
#ifdef SHADOWS_SHADOWMASK
// Note: Legacy Unity have two shadow mask mode. ShadowMask (ShadowMask contain static objects shadow and ShadowMap contain only dynamic objects shadow, final result is the minimun of both value)
// and ShadowMask_Distance (ShadowMask contain static objects shadow and ShadowMap contain everything and is blend with ShadowMask based on distance (Global distance setup in QualitySettigns)).

#endif
}
illuminance *= shadow;
float4 cookie = EvaluateCookie_Punctual(lightLoopContext, lightData, lightToSurface);
float3 lightToSample = positionWS - lightData.positionWS;
float4 cookie = EvaluateCookie_Punctual(lightLoopContext, lightData, lightToSample);
// Premultiply.
lightData.color *= cookie.rgb;
lightData.diffuseScale *= cookie.a;
lightData.specularScale *= cookie.a;
color *= cookie.rgb;
attenuation *= cookie.a;
}
DirectLighting EvaluateBSDF_Punctual(LightLoopContext lightLoopContext,
float3 V, PositionInputs posInput,
PreLightData preLightData, LightData lightData, BSDFData bsdfData, BakeLightingData bakeLightingData)
{
DirectLighting lighting;
ZERO_INITIALIZE(DirectLighting, lighting);
[branch] if (illuminance > 0.0)
float3 positionWS = posInput.positionWS;
float3 lightToSample = positionWS - lightData.positionWS;
int lightType = lightData.lightType;
float3 unL = (lightType != GPULIGHTTYPE_PROJECTOR_BOX) ? -lightToSample : -lightData.forward;
float distSq = dot(unL, unL);
float3 N = bsdfData.normalWS;
float3 L = unL * rsqrt(distSq);
float NdotL = dot(N, L);
float3 color; float attenuation, shadow;
EvaluateLight_Punctual(lightLoopContext, posInput, lightData, bakeLightingData, N, L, distSq,
color, attenuation, shadow);
float intensity = shadow * attenuation * saturate(NdotL);
[branch] if (intensity > 0.0)
lighting.diffuse *= illuminance * lightData.diffuseScale;
lighting.specular *= illuminance * lightData.specularScale;
lighting.diffuse *= intensity * lightData.diffuseScale;
lighting.specular *= intensity * lightData.specularScale;
}
[branch] if (bsdfData.enableTransmission)

// Apply the BSDF to attenuation. See also: [SSS-NOTE-TRSM]
illuminance = Lambert() * wrappedNdotL;
attenuation *= Lambert();
illuminance = INV_PI * F_Transm_Schlick(0, 0.5, NdotV) * F_Transm_Schlick(0, 0.5, tNdotL) * wrappedNdotL;
attenuation *= INV_PI * F_Transm_Schlick(0, 0.5, NdotV) * F_Transm_Schlick(0, 0.5, tNdotL);
// Shadowing is applied inside EvaluateTransmission().
intensity = attenuation * wrappedNdotL;
lighting.diffuse += EvaluateTransmission(bsdfData, illuminance * lightData.diffuseScale, shadow);
lighting.diffuse += EvaluateTransmission(bsdfData, intensity * lightData.diffuseScale, shadow);
// Save ALU by applying 'lightData.color' only once.
lighting.diffuse *= lightData.color;
lighting.specular *= lightData.color;
// Save ALU by applying light and cookie colors only once.
lighting.diffuse *= color;
lighting.specular *= color;
return lighting;
}

// Exit if texel is out of color buffer
// Or if the texel is from an object in front of the object
if (refractedBackPointDepth < posInput.depthVS
if (refractedBackPointDepth < posInput.linearDepth
|| any(refractedBackPointSS < 0.0)
|| any(refractedBackPointSS > 1.0))
{

// We store inverse AO so neutral is black. So either we sample inside or outside the texture it return 0 in case of neutral
// Ambient occlusion use for indirect lighting (reflection probe, baked diffuse lighting)
#ifndef _SURFACE_TYPE_TRANSPARENT
#else
float indirectAmbientOcclusion = 1.0;
float directAmbientOcclusion = 1.0;
#endif
// Add indirect diffuse + emissive (if any) - Ambient occlusion is multiply by emissive which is wrong but not a big deal
#if GTAO_MULTIBOUNCE_APPROX

6
ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Lit.shader


// Include
//-------------------------------------------------------------------------------------
#include "../../../Core/ShaderLibrary/Common.hlsl"
#include "../../../Core/ShaderLibrary/Wind.hlsl"
#include "ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/Wind.hlsl"
#include "../../ShaderPass/FragInputs.hlsl"
#include "../../ShaderPass/ShaderPass.cs.hlsl"

ZWrite On
ZTest LEqual
// When alpha test is enabled, we should not write into the color buffer
ColorMask 0
HLSLPROGRAM

ZWrite On
// When alpha test is enabled, we should not write into the color buffer
ColorMask 0
HLSLPROGRAM

2
ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/LitData.hlsl


#include "../../../Core/ShaderLibrary/SampleUVMapping.hlsl"
#include "ShaderLibrary/SampleUVMapping.hlsl"
#include "../MaterialUtilities.hlsl"
//-------------------------------------------------------------------------------------

2
ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/LitDataDisplacement.hlsl


return SAMPLE_TEXTURE2D_LOD(_HeightMap, sampler_HeightMap, param.uv + texOffsetCurrent, lod).r;
}
#include "../../../Core/ShaderLibrary/PerPixelDisplacement.hlsl"
#include "ShaderLibrary/PerPixelDisplacement.hlsl"
void ApplyDisplacementTileScale(inout float height)
{

10
ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/LitTessellation.shader


// Include
//-------------------------------------------------------------------------------------
#include "../../../Core/ShaderLibrary/common.hlsl"
#include "../../../Core/ShaderLibrary/Wind.hlsl"
#include "../../../Core/ShaderLibrary/GeometricTools.hlsl"
#include "../../../Core/ShaderLibrary/tessellation.hlsl"
#include "ShaderLibrary/common.hlsl"
#include "ShaderLibrary/Wind.hlsl"
#include "ShaderLibrary/GeometricTools.hlsl"
#include "ShaderLibrary/tessellation.hlsl"
#include "../../ShaderPass/FragInputs.hlsl"
#include "../../ShaderPass/ShaderPass.cs.hlsl"

ZWrite On
ZTest LEqual
// When alpha test is enabled, we should not write into the color buffer
ColorMask 0
HLSLPROGRAM

ZWrite On
// When alpha test is enabled, we should not write into the color buffer
ColorMask 0
HLSLPROGRAM

2
ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Resources/CombineLighting.shader


#pragma vertex Vert
#pragma fragment Frag
#include "../../../../Core/ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/Common.hlsl"
TEXTURE2D(_IrradianceSource);

2
ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Resources/CopyStencilBuffer.shader


#pragma vertex Vert
#pragma fragment Frag
#include "../../../../Core/ShaderLibrary/Packing.hlsl"
#include "ShaderLibrary/Packing.hlsl"
#include "../../../ShaderVariables.hlsl"
#include "../../../Lighting/LightDefinition.cs.hlsl"

4
ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Resources/PreIntegratedFGD.shader


#pragma target 4.5
#pragma only_renderers d3d11 ps4 vulkan metal // TEMP: until we go further in dev
#include "../../../../Core/ShaderLibrary/Common.hlsl"
#include "../../../../Core/ShaderLibrary/ImageBasedLighting.hlsl"
#include "ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/ImageBasedLighting.hlsl"
#include "../../../ShaderVariables.hlsl"
struct Attributes

99
ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Resources/SubsurfaceScattering.compute


// Included headers
//--------------------------------------------------------------------------------------------------
#include "../../../../Core/ShaderLibrary/Packing.hlsl"
#include "../../../../Core/ShaderLibrary/SpaceFillingCurves.hlsl"
#include "ShaderLibrary/Packing.hlsl"
#include "ShaderLibrary/SpaceFillingCurves.hlsl"
#include "../../../ShaderVariables.hlsl"
#define UNITY_MATERIAL_LIT
#include "../../../Material/Material.hlsl"

#endif
groupshared bool processGroup;
bool StencilTest(int2 pixelCoord, float stencilRef)
{
bool passedStencilTest;
#if SSS_SAMPLE_TEST_HTILE
int2 tileCoord = pixelCoord / 8;
// Perform the stencil test (reject at the tile rate).
passedStencilTest = stencilRef == LOAD_TEXTURE2D(_HTile, tileCoord).r;
[branch] if (passedStencilTest)
#else
// It is extremely uncommon for individual samples to fail the HTile test.
// Unfortunately, our copy of HTile does not allow to accept at the tile rate.
// Therefore, we choose not to perform the HiS test here.
#endif
{
// Unfortunately, our copy of HTile does not allow to accept at the tile rate.
// Therefore, we have to additionally perform the stencil test at the pixel rate.
// We check the tagged irradiance buffer to avoid an extra stencil texture fetch.
passedStencilTest = TestLightingForSSS(LOAD_TEXTURE2D(_IrradianceSource, pixelCoord).rgb);
}
return passedStencilTest;
}
#if SSS_USE_LDS_CACHE
float4 LoadSampleFromCacheMemory(int2 cacheCoord)
{

// Returns {irradiance, linearDepth}.
float4 LoadSample(int2 pixelCoord, int2 cacheAnchor)
{
#if SSS_USE_LDS_CACHE
#if SSS_USE_LDS_CACHE
[branch] if (isInCache)
{
return LoadSampleFromCacheMemory(cacheCoord);

{
float stencilRef = STENCILLIGHTINGUSAGE_SPLIT_LIGHTING;
[branch] if (StencilTest(pixelCoord, stencilRef))
{
return LoadSampleFromVideoMemory(pixelCoord);
}
else
{
return float4(0, 0, 0, 0);
}
// Always load both irradiance and depth.
// Avoid dependent texture reads at the cost of extra bandwidth.
return LoadSampleFromVideoMemory(pixelCoord);
}
}

if (TestLightingForSSS(irradiance))
{
// Apply bilateral weighting.
float linearDepth = textureSample.a;
float z = linearDepth - centerPosVS.z;
float p = _FilterKernels[profileID][i][iP];
float3 w = ComputeBilateralWeight(xy2, z, mmPerUnit, shapeParam, p);
float viewZ = textureSample.a;
float relZ = viewZ - centerPosVS.z;
float rcpPdf = _FilterKernels[profileID][i][iP];
float3 weight = ComputeBilateralWeight(xy2, relZ, mmPerUnit, shapeParam, rcpPdf);
totalIrradiance += w * irradiance;
totalWeight += w;
totalIrradiance += weight * irradiance;
totalWeight += weight;
}
else
{

[branch] if (!processGroup) { return; }
float3 centerIrradiance = 0;
float centerDepth = 0;
float4 cachedValue = 0;
bool passedStencilTest = StencilTest((int2)pixelCoord, stencilRef);
float3 centerIrradiance = LOAD_TEXTURE2D(_IrradianceSource, pixelCoord).rgb;
float centerDepth = 0;
float centerViewZ = 0;
bool passedStencilTest = TestLightingForSSS(centerIrradiance);
// Save some bandwidth by only loading depth values for SSS pixels.
centerIrradiance = LOAD_TEXTURE2D(_IrradianceSource, pixelCoord).rgb;
centerDepth = LOAD_TEXTURE2D(_DepthTexture, pixelCoord).r;
cachedValue = float4(centerIrradiance, LinearEyeDepth(centerDepth, _ZBufferParams));
centerDepth = LOAD_TEXTURE2D(_DepthTexture, pixelCoord).r;
centerViewZ = LinearEyeDepth(centerDepth, _ZBufferParams);
textureCache[Mad24(TEXTURE_CACHE_SIZE_1D, cacheCoord.y, cacheCoord.x)] = cachedValue;
textureCache[Mad24(TEXTURE_CACHE_SIZE_1D, cacheCoord.y, cacheCoord.x)] = float4(centerIrradiance, centerViewZ);
uint numBorderQuadsPerWave = TEXTURE_CACHE_SIZE_1D / 2 - 1;
uint halfCacheWidthInQuads = TEXTURE_CACHE_SIZE_1D / 4;

uint2 quadCoord;
// The traversal order is such that the quad's X coordinate is monotonically increasing.
// The corner is always the near the block of the corresponding wavefront.
case 0:
case 0: // Bottom left
case 1:
case 1: // Bottom right
case 2:
case 2: // Top left
default: // 3
default: // Top right
uint2 cacheCoord2 = 2 * (startQuad + quadCoord) + uint2(laneIndex & 1, (laneIndex >> 1) & 1);
int2 pixelCoord2 = (int2)(tileAnchor + cacheCoord2) - TEXTURE_CACHE_BORDER;
float4 cachedValue2 = 0;
uint2 cacheCoord2 = 2 * (startQuad + quadCoord) + uint2(laneIndex & 1, (laneIndex >> 1) & 1);
int2 pixelCoord2 = (int2)(tileAnchor + cacheCoord2) - TEXTURE_CACHE_BORDER;
float3 irradiance2 = LOAD_TEXTURE2D(_IrradianceSource, pixelCoord2).rgb;
float viewZ2 = 0;
[branch] if (StencilTest(pixelCoord2, stencilRef))
// Save some bandwidth by only loading depth values for SSS pixels.
[branch] if (TestLightingForSSS(irradiance2))
cachedValue2 = LoadSampleFromVideoMemory(pixelCoord2);
viewZ2 = LinearEyeDepth(LOAD_TEXTURE2D(_DepthTexture, pixelCoord2).r, _ZBufferParams);
textureCache[Mad24(TEXTURE_CACHE_SIZE_1D, cacheCoord2.y, cacheCoord2.x)] = cachedValue2;
textureCache[Mad24(TEXTURE_CACHE_SIZE_1D, cacheCoord2.y, cacheCoord2.x)] = float4(irradiance2, viewZ2);
}
// Wait for the LDS.

bool useNearFieldKernel = SSS_ENABLE_NEAR_FIELD && maxDistInPixels > SSS_LOD_THRESHOLD;
#if SSS_DEBUG_LOD
StoreResult(pixelCoord, useNearFieldKernel ? float3(1, 0, 0) : float3(0.5, 0.5, 0);
StoreResult(pixelCoord, useNearFieldKernel ? float3(1, 0, 0) : float3(0.5, 0.5, 0));
return;
#endif

2
ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Resources/SubsurfaceScattering.shader


// Include
//-------------------------------------------------------------------------------------
#include "../../../../Core/ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/Common.hlsl"
#include "../../../ShaderVariables.hlsl"
#define UNITY_MATERIAL_LIT // Needs to be defined before including Material.hlsl
#include "../../../Material/Material.hlsl"

16
ScriptableRenderPipeline/HDRenderPipeline/Material/Material.hlsl


#ifndef UNITY_MATERIAL_INCLUDED
#define UNITY_MATERIAL_INCLUDED
#include "../../Core/ShaderLibrary/Color.hlsl"
#include "../../Core/ShaderLibrary/Packing.hlsl"
#include "../../Core/ShaderLibrary/BSDF.hlsl"
#include "../../Core/ShaderLibrary/Debug.hlsl"
#include "../../Core/ShaderLibrary/GeometricTools.hlsl"
#include "../../Core/ShaderLibrary/CommonMaterial.hlsl"
#include "../../Core/ShaderLibrary/EntityLighting.hlsl"
#include "../../Core/ShaderLibrary/ImageBasedLighting.hlsl"
#include "ShaderLibrary/Color.hlsl"
#include "ShaderLibrary/Packing.hlsl"
#include "ShaderLibrary/BSDF.hlsl"
#include "ShaderLibrary/Debug.hlsl"
#include "ShaderLibrary/GeometricTools.hlsl"
#include "ShaderLibrary/CommonMaterial.hlsl"
#include "ShaderLibrary/EntityLighting.hlsl"
#include "ShaderLibrary/ImageBasedLighting.hlsl"
#include "../Sky/AtmosphericScattering/AtmosphericScattering.hlsl"
// Guidelines for Material Keyword.

15
ScriptableRenderPipeline/HDRenderPipeline/Material/Unlit/Editor/BaseUnlitUI.cs


Transparent
}
// Enum values are hardcoded for retrocompatibility. Don't change them.
// Enum values are hardcoded for retro-compatibility. Don't change them.
public enum BlendMode
{
Alpha = 0,

// This function must finish with double sided option (see LitUI.cs)
if (doubleSidedEnable != null)
{
// Grey the option is backface rendering is enabled
bool disabledScope = transparentBackfaceEnable != null && transparentBackfaceEnable.floatValue > 0.0f && ((SurfaceType)surfaceType.floatValue == SurfaceType.Transparent);
using (new EditorGUI.DisabledScope(disabledScope))
{
m_MaterialEditor.ShaderProperty(doubleSidedEnable, StylesBaseUnlit.doubleSidedEnableText);
}
m_MaterialEditor.ShaderProperty(doubleSidedEnable, StylesBaseUnlit.doubleSidedEnableText);
}
EditorGUI.indentLevel--;

}
}
// Can't enable double sided and backface rendering at the same time, give priority to backface rendering
bool doubleSidedEnable = material.HasProperty(kDoubleSidedEnable) && material.GetFloat(kDoubleSidedEnable) > 0.0f && !isBackFaceEnable;
bool doubleSidedEnable = material.HasProperty(kDoubleSidedEnable) && material.GetFloat(kDoubleSidedEnable) > 0.0f;
if (doubleSidedEnable)
if (doubleSidedEnable && !isBackFaceEnable) // When backface is enable no need to disable cullmode as we render both side.
{
material.SetInt("_CullMode", (int)UnityEngine.Rendering.CullMode.Off);
}

2
ScriptableRenderPipeline/HDRenderPipeline/Material/Unlit/Unlit.shader


// Include
//-------------------------------------------------------------------------------------
#include "../../../Core/ShaderLibrary/common.hlsl"
#include "ShaderLibrary/common.hlsl"
#include "../../ShaderVariables.hlsl"
#include "../../ShaderPass/FragInputs.hlsl"
#include "../../ShaderPass/ShaderPass.cs.hlsl"

2
ScriptableRenderPipeline/HDRenderPipeline/RenderPipelineResources/ApplyDistorsion.compute


#include "../../Core/ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/Common.hlsl"
#include "../Material/Builtin/BuiltinData.hlsl"
TEXTURE2D(_DistortionTexture);

2
ScriptableRenderPipeline/HDRenderPipeline/RenderPipelineResources/CameraMotionVectors.shader


#pragma target 4.5
#include "../../Core/ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/Common.hlsl"
#include "../ShaderVariables.hlsl"
#include "../ShaderPass/FragInputs.hlsl"
#include "../ShaderPass/VaryingMesh.hlsl"

2
ScriptableRenderPipeline/HDRenderPipeline/RenderPipelineResources/DepthDownsample.compute


#include "../../Core/ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/Common.hlsl"
Texture2D<float> _Source;
RWTexture2D<float> _Result;

14
ScriptableRenderPipeline/HDRenderPipeline/RenderPipelineResources/RenderPipelineResources.cs


namespace UnityEngine.Experimental.Rendering.HDPipeline
namespace UnityEngine.Experimental.Rendering.HDPipeline
{
public class RenderPipelineResources : ScriptableObject
{

// Lighting resources
public Shader deferredShader;
public Shader combineLightingPass;
public ComputeShader volumetricLightingCS;
public ComputeShader gaussianPyramidCS;
public ComputeShader depthPyramidCS;
public ComputeShader copyChannelCS;

public Shader opaqueAtmosphericScattering;
public Shader skyboxCubemap;
public int applyDistortionKernel { get; private set; }
void OnEnable()
{
applyDistortionKernel = -1;
if (applyDistortionCS != null)
applyDistortionKernel = applyDistortionCS.FindKernel("KMain");
}
}
}

2
ScriptableRenderPipeline/HDRenderPipeline/SceneSettings/Resources/DrawSssProfile.shader


// Include
//-------------------------------------------------------------------------------------
#include "../../../Core/ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/Common.hlsl"
#define USE_LEGACY_UNITY_MATRIX_VARIABLES
#include "../../ShaderVariables.hlsl"
#ifdef SSS_MODEL_BASIC

4
ScriptableRenderPipeline/HDRenderPipeline/SceneSettings/Resources/DrawTransmittanceGraph.shader


// Include
//-------------------------------------------------------------------------------------
#include "../../../Core/ShaderLibrary/Common.hlsl"
#include "../../../Core/ShaderLibrary/CommonMaterial.hlsl"
#include "ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/CommonMaterial.hlsl"
#define USE_LEGACY_UNITY_MATRIX_VARIABLES
#include "../../ShaderVariables.hlsl"

2
ScriptableRenderPipeline/HDRenderPipeline/ShaderPass/ShaderPassDepthOnly.hlsl


outColor = float4(0.0, 0.0, 0.0, 0.0);
#ifdef _DEPTHOFFSET_ON
outputDepth = posInput.depthRaw;
outputDepth = posInput.deviceDepth;
#endif
}

2
ScriptableRenderPipeline/HDRenderPipeline/ShaderPass/ShaderPassForward.hlsl


}
#ifdef _DEPTHOFFSET_ON
outputDepth = posInput.depthRaw;
outputDepth = posInput.deviceDepth;
#endif
#ifdef DEBUG_DISPLAY

2
ScriptableRenderPipeline/HDRenderPipeline/ShaderPass/ShaderPassGBuffer.hlsl


ENCODE_VELOCITY_INTO_GBUFFER(builtinData.velocity, outVelocityBuffer);
#ifdef _DEPTHOFFSET_ON
outputDepth = posInput.depthRaw;
outputDepth = posInput.deviceDepth;
#endif
}

2
ScriptableRenderPipeline/HDRenderPipeline/ShaderPass/ShaderPassLightTransport.hlsl


#error SHADERPASS_is_not_correctly_define
#endif
#include "../../Core/ShaderLibrary/Color.hlsl"
#include "ShaderLibrary/Color.hlsl"
CBUFFER_START(UnityMetaPass)
// x = use uv1 as raster position

8
ScriptableRenderPipeline/HDRenderPipeline/Sky/AtmosphericScattering/AtmosphericScattering.hlsl


#ifndef UNITY_ATMOSPHERIC_SCATTERING_INCLUDED
#define UNITY_ATMOSPHERIC_SCATTERING_INCLUDED
#include "../../../Core/ShaderLibrary/VolumeRendering.hlsl"
#include "ShaderLibrary/VolumeRendering.hlsl"
#include "AtmosphericScattering.cs.hlsl"
#include "../SkyVariables.hlsl"

else if (_FogColorMode == FOGCOLORMODE_SKY_COLOR)
{
// Based on Uncharted 4 "Mip Sky Fog" trick: http://advances.realtimerendering.com/other/2016/naughty_dog/NaughtyDog_TechArt_Final.pdf
float mipLevel = (1.0 - _MipFogMaxMip * saturate((posInput.depthVS - _MipFogNear) / (_MipFogFar - _MipFogNear))) * _SkyTextureMipCount;
float mipLevel = (1.0 - _MipFogMaxMip * saturate((posInput.linearDepth - _MipFogNear) / (_MipFogFar - _MipFogNear))) * _SkyTextureMipCount;
float3 dir = normalize(posInput.positionWS - GetPrimaryCameraPosition());
return SampleSkyTexture(dir, mipLevel).rgb;
}

if (_AtmosphericScatteringType == FOGTYPE_EXPONENTIAL)
{
float3 fogColor = GetFogColor(posInput);
float fogFactor = _ExpFogDensity * (1.0f - Transmittance(OpticalDepthHomogeneous(1.0f / _ExpFogDistance, posInput.depthVS)));
float fogFactor = _ExpFogDensity * (1.0f - Transmittance(OpticalDepthHomogeneous(1.0f / _ExpFogDistance, posInput.linearDepth)));
float fogFactor = _LinearFogDensity * saturate((posInput.depthVS - _LinearFogStart) * _LinearFogOneOverRange);
float fogFactor = _LinearFogDensity * saturate((posInput.linearDepth - _LinearFogStart) * _LinearFogOneOverRange);
return float4(fogColor, fogFactor);
}
else // NONE

16
ScriptableRenderPipeline/HDRenderPipeline/Sky/BlacksmithlSky/Resources/SkyBlacksmith.shader


#pragma multi_compile _ ATMOSPHERICS_DEBUG
#pragma multi_compile _ PERFORM_SKY_OCCLUSION_TEST
#include "../../../../Core/ShaderLibrary/Color.hlsl"
#include "../../../../Core/ShaderLibrary/Common.hlsl"
#include "../../../../Core/ShaderLibrary/CommonLighting.hlsl"
#include "ShaderLibrary/Color.hlsl"
#include "ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/CommonLighting.hlsl"
#include "../../../ShaderVariables.hlsl"
TEXTURECUBE(_Cubemap);

#ifdef PERFORM_SKY_OCCLUSION_TEST
// Determine whether the sky is occluded by the scene geometry.
// Do not perform blending with the environment map if the sky is occluded.
float depthRaw = max(_SkyDepth, LOAD_TEXTURE2D(_MainDepthTexture, posInput.unPositionSS).r);
float skyTexWeight = (depthRaw > _SkyDepth) ? 0.0 : 1.0;
float deviceDepth = max(_SkyDepth, LOAD_TEXTURE2D(_MainDepthTexture, posInput.unPositionSS).r);
float skyTexWeight = (deviceDepth > _SkyDepth) ? 0.0 : 1.0;
float depthRaw = _SkyDepth;
float deviceDepth = _SkyDepth;
depthRaw = _SkyDepth;
deviceDepth = _SkyDepth;
UpdatePositionInput(depthRaw, UNITY_MATRIX_I_VP, k_identity4x4, posInput);
UpdatePositionInput(deviceDepth, UNITY_MATRIX_I_VP, k_identity4x4, posInput);
float4 c1, c2, c3;
VolundTransferScatter(GetAbsolutePositionWS(posInput.positionWS), c1, c2, c3);

2
ScriptableRenderPipeline/HDRenderPipeline/Sky/BlitCubemap.shader


#pragma fragment frag
#pragma target 4.5
#include "../../Core/ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/Common.hlsl"
TEXTURECUBE(_MainTex);
SAMPLERCUBE(sampler_MainTex);

4
ScriptableRenderPipeline/HDRenderPipeline/Sky/BuildProbabilityTables.compute


// Ref: PBRT v3, 13.6.7 "Piecewise-Constant 2D Distributions".
// Note that we use the equiareal sphere-to-square mapping instead of the latitude-longitude one.
#include "../../Core/ShaderLibrary/Common.hlsl"
#include "../../Core/ShaderLibrary/ImageBasedLighting.hlsl"
#include "ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/ImageBasedLighting.hlsl"
/* --- Input --- */

4
ScriptableRenderPipeline/HDRenderPipeline/Sky/ComputeGgxIblSampleData.compute


// Precomputes data for IntegrateLD(). See that function for a detailed description.
#include "../../Core/ShaderLibrary/Common.hlsl"
#include "../../Core/ShaderLibrary/ImageBasedLighting.hlsl"
#include "ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/ImageBasedLighting.hlsl"
#ifdef SHADER_API_MOBILE
#define MAX_IBL_SAMPLE_CNT 34

4
ScriptableRenderPipeline/HDRenderPipeline/Sky/GGXConvolve.shader


#pragma vertex Vert
#pragma fragment Frag
#include "../../Core/ShaderLibrary/Common.hlsl"
#include "../../Core/ShaderLibrary/ImageBasedLighting.hlsl"
#include "ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/ImageBasedLighting.hlsl"
#include "SkyManager.cs.hlsl"
TEXTURECUBE(_MainTex);

6
ScriptableRenderPipeline/HDRenderPipeline/Sky/HDRISky/Resources/SkyHDRI.shader


#pragma target 4.5
#pragma only_renderers d3d11 ps4 vulkan metal // TEMP: until we go further in dev
#include "../../../../Core/ShaderLibrary/Color.hlsl"
#include "../../../../Core/ShaderLibrary/Common.hlsl"
#include "../../../../Core/ShaderLibrary/CommonLighting.hlsl"
#include "ShaderLibrary/Color.hlsl"
#include "ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/CommonLighting.hlsl"
TEXTURECUBE(_Cubemap);
SAMPLERCUBE(sampler_Cubemap);

2
ScriptableRenderPipeline/HDRenderPipeline/Sky/OpaqueAtmosphericScattering.shader


#pragma enable_d3d11_debug_symbols
#include "../../Core/ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/Common.hlsl"
#include "../ShaderVariables.hlsl"
#include "AtmosphericScattering/AtmosphericScattering.hlsl"

6
ScriptableRenderPipeline/HDRenderPipeline/Sky/ProceduralSky/Resources/ProceduralSky.shader


#pragma multi_compile _ _ENABLE_SUN_DISK
#include "../../../../Core/ShaderLibrary/Color.hlsl"
#include "../../../../Core/ShaderLibrary/Common.hlsl"
#include "../../../../Core/ShaderLibrary/CommonLighting.hlsl"
#include "ShaderLibrary/Color.hlsl"
#include "ShaderLibrary/Common.hlsl"
#include "ShaderLibrary/CommonLighting.hlsl"
float4 _SkyParam; // x exposure, y multiplier, z rotation
float4x4 _PixelCoordToViewDirWS; // Actually just 3x3, but Unity can only set 4x4

7
ScriptableRenderPipeline/HDRenderPipeline/Sky/SkyManager.cs


using (new ProfilingSample(cmd, "DynamicGI.UpdateEnvironment"))
{
// TODO: Properly send the cubemap to Enlighten. Currently workaround is to set the cubemap in a Skybox/cubemap material
float intensity = IsSkyValid() ? 1.0f : 0.0f; // Eliminate all diffuse if we don't have a skybox (meaning for now the background is black in HDRP)
RenderSettings.skybox = IsSkyValid() ? m_StandardSkyboxMaterial : null; // Setup this material as the default to be use in RenderSettings
RenderSettings.ambientIntensity = 1.0f; // fix this to 1, this parameter should not exist!
RenderSettings.skybox = m_StandardSkyboxMaterial; // Setup this material as the default to be use in RenderSettings
RenderSettings.ambientIntensity = intensity;
RenderSettings.reflectionIntensity = 1.0f;
RenderSettings.reflectionIntensity = intensity;
RenderSettings.customReflection = null;
DynamicGI.UpdateEnvironment();

64
ScriptableRenderPipeline/LightweightPipeline/LightweightPipeline.cs


using System;
using System.Collections.Generic;
using System.Diagnostics;
using UnityEngine.Rendering;
using UnityEngine.Rendering.PostProcessing;
using UnityEngine.XR;

private ShadowSettings m_ShadowSettings = ShadowSettings.Default;
private ShadowSliceData[] m_ShadowSlices = new ShadowSliceData[kMaxCascades];
// Pipeline pass names
private static readonly ShaderPassName m_UnlitPassName = new ShaderPassName("SRPDefaultUnlit");
private static readonly ShaderPassName m_UnlitPassName = new ShaderPassName("SRPDefaultUnlit"); // Renders all shaders without a lightmode tag
// Legacy pass names
public static readonly ShaderPassName s_AlwaysName = new ShaderPassName("Always");
public static readonly ShaderPassName s_ForwardBaseName = new ShaderPassName("ForwardBase");
public static readonly ShaderPassName s_PrepassBaseName = new ShaderPassName("PrepassBase");
public static readonly ShaderPassName s_VertexName = new ShaderPassName("Vertex");
public static readonly ShaderPassName s_VertexLMRGBMName = new ShaderPassName("VertexLMRGBM");
public static readonly ShaderPassName s_VertexLMName = new ShaderPassName("VertexLM");
public static readonly ShaderPassName[] s_LegacyPassNames =
{
s_AlwaysName, s_ForwardBaseName, s_PrepassBaseName, s_VertexName, s_VertexLMRGBMName, s_VertexLMName
};
private RenderTextureFormat m_ColorFormat;
private PostProcessRenderContext m_PostProcessRenderContext;

private Mesh m_BlitQuad;
private Material m_BlitMaterial;
private Material m_CopyDepthMaterial;
private Material m_ErrorMaterial;
private int m_BlitTexID = Shader.PropertyToID("_BlitTex");
private CopyTextureSupport m_CopyTextureSupport;

m_BlitQuad = LightweightUtils.CreateQuadMesh(false);
m_BlitMaterial = CoreUtils.CreateEngineMaterial(m_Asset.BlitShader);
m_CopyDepthMaterial = CoreUtils.CreateEngineMaterial(m_Asset.CopyDepthShader);
m_ErrorMaterial = CoreUtils.CreateEngineMaterial("Hidden/InternalErrorShader");
}
public override void Dispose()

CoreUtils.Destroy(m_ErrorMaterial);
CoreUtils.Destroy(m_CopyDepthMaterial);
CoreUtils.Destroy(m_BlitMaterial);
}
CullResults m_CullResults;

context.DrawRenderers(m_CullResults.visibleRenderers, ref opaqueDrawSettings, opaqueFilterSettings);
// Render objects that did not match any shader pass with error shader
RenderObjectsWithError(ref context, opaqueFilterSettings, SortFlags.None);
if (m_CurrCamera.clearFlags == CameraClearFlags.Skybox)
context.DrawSkybox(m_CurrCamera);
}

CommandBufferPool.Release(cmd);
}
private void RenderTransparents(ref ScriptableRenderContext context, RendererConfiguration config)
{
var transparentSettings = new DrawRendererSettings(m_CurrCamera, m_LitPassName);
transparentSettings.SetShaderPassName(1, m_UnlitPassName);
transparentSettings.sorting.flags = SortFlags.CommonTransparent;
transparentSettings.rendererConfiguration = config;
var transparentFilterSettings = new FilterRenderersSettings(true)
{
renderQueueRange = RenderQueueRange.transparent
};
context.DrawRenderers(m_CullResults.visibleRenderers, ref transparentSettings, transparentFilterSettings);
// Render objects that did not match any shader pass with error shader
RenderObjectsWithError(ref context, transparentFilterSettings, SortFlags.None);
}
private void AfterTransparent(ref ScriptableRenderContext context, FrameRenderingConfiguration config)
{
if (!LightweightUtils.HasFlag(config, FrameRenderingConfiguration.PostProcess))

CommandBufferPool.Release(cmd);
}
private void RenderTransparents(ref ScriptableRenderContext context, RendererConfiguration config)
[Conditional("DEVELOPMENT_BUILD"), Conditional("UNITY_EDITOR")]
private void RenderObjectsWithError(ref ScriptableRenderContext context, FilterRenderersSettings filterSettings, SortFlags sortFlags)
var transparentSettings = new DrawRendererSettings(m_CurrCamera, m_LitPassName);
transparentSettings.SetShaderPassName(1, m_UnlitPassName);
transparentSettings.sorting.flags = SortFlags.CommonTransparent;
transparentSettings.rendererConfiguration = config;
var transparentFilterSettings = new FilterRenderersSettings(true)
if (m_ErrorMaterial != null)
renderQueueRange = RenderQueueRange.transparent
};
DrawRendererSettings errorSettings = new DrawRendererSettings(m_CurrCamera, s_LegacyPassNames[0]);
for (int i = 1; i < s_LegacyPassNames.Length; ++i)
errorSettings.SetShaderPassName(i, s_LegacyPassNames[i]);
context.DrawRenderers(m_CullResults.visibleRenderers, ref transparentSettings, transparentFilterSettings);
errorSettings.sorting.flags = sortFlags;
errorSettings.rendererConfiguration = RendererConfiguration.None;
errorSettings.SetOverrideMaterial(m_ErrorMaterial, 0);
context.DrawRenderers(m_CullResults.visibleRenderers, ref errorSettings, filterSettings);
}
}
private void BuildShadowSettings()

10
ScriptableRenderPipeline/LightweightPipeline/Resources/LightweightPipelineAsset.cs


{
public static readonly string m_SimpleLightShaderPath = "LightweightPipeline/Standard (Simple Lighting)";
public static readonly string m_StandardShaderPath = "LightweightPipeline/Standard (Physically Based)";
public static readonly string[] m_SearchPaths = {"Assets", "Packages/com.unity.render-pipelines"};
// Default values set when a new LightweightPipeline asset is created
[SerializeField] private int m_MaxPixelLights = 4;

{
var instance = CreateInstance<LightweightPipelineAsset>();
string[] guids = UnityEditor.AssetDatabase.FindAssets("LightweightPipelineResource t:scriptableobject");
string[] guids = UnityEditor.AssetDatabase.FindAssets("LightweightPipelineResource t:scriptableobject", m_SearchPaths);
LightweightPipelineResource resourceAsset = null;
foreach (string guid in guids)
{

break;
}
// There's currently an issue that prevents FindAssets from find resources withing the package folder.
if (resourceAsset == null)
{
string path = "Packages/com.unity.render-pipelines.lightweight/Resources/LightweightPipelineResource.asset";
resourceAsset = UnityEditor.AssetDatabase.LoadAssetAtPath<LightweightPipelineResource>(path);
}
if (resourceAsset != null)

8
ScriptableRenderPipeline/LightweightPipeline/Resources/LightweightPipelineResource.asset


m_Script: {fileID: 11500000, guid: eb91b173ce266e040aa972ca9a561308, type: 3}
m_Name: LightweightPipelineResource
m_EditorClassIdentifier:
DefaultMaterial: {fileID: 0}
DefaultParticleMaterial: {fileID: 0}
DefaultTerrainMaterial: {fileID: 0}
DefaultMaterial: {fileID: 2100000, guid: 31321ba15b8f8eb4c954353edc038b1d, type: 2}
DefaultParticleMaterial: {fileID: 2100000, guid: e823cd5b5d27c0f4b8256e7c12ee3e6d,
type: 2}
DefaultTerrainMaterial: {fileID: 2100000, guid: 594ea882c5a793440b60ff72d896021e,
type: 2}

13
ScriptableRenderPipeline/LightweightPipeline/Shaders/LightweightInput.cginc


#define MAX_VISIBLE_LIGHTS 16
// Must match Lightweigth ShaderGraph master node
struct SurfaceData
{
half3 albedo;
half3 specular;
half metallic;
half smoothness;
half3 normal;
half3 emission;
half occlusion;
half alpha;
};
struct LightInput
{
float4 pos;

151
ScriptableRenderPipeline/LightweightPipeline/Shaders/LightweightPassLit.cginc


#ifndef LIGHTWEIGHT_PASS_LIT_INCLUDED
#define LIGHTWEIGHT_PASS_LIT_INCLUDED
#include "LightweightSurfaceInput.cginc"
#ifdef _SPECULAR_SETUP
#define SAMPLE_METALLICSPECULAR(uv) tex2D(_SpecGlossMap, uv)
#else
#define SAMPLE_METALLICSPECULAR(uv) tex2D(_MetallicGlossMap, uv)
#endif
CBUFFER_START(MaterialProperties)
half4 _MainTex_ST;
half4 _Color;
half _Cutoff;
half _Glossiness;
half _GlossMapScale;
half _SmoothnessTextureChannel;
half _Metallic;
half4 _SpecColor;
half _BumpScale;
half _OcclusionStrength;
half4 _EmissionColor;
half _Shininess;
CBUFFER_END
sampler2D _MainTex;
sampler2D _MetallicGlossMap;
sampler2D _SpecGlossMap;
sampler2D _BumpMap;
sampler2D _OcclusionMap;
sampler2D _EmissionMap;
struct LightweightVertexInput
{
float4 vertex : POSITION;

};
///////////////////////////////////////////////////////////////////////////////
// Material Property Helpers //
///////////////////////////////////////////////////////////////////////////////
inline half Alpha(half albedoAlpha)
{
#if defined(_SMOOTHNESS_TEXTURE_ALBEDO_CHANNEL_A)
half alpha = _Color.a;
#else
half alpha = albedoAlpha * _Color.a;
#endif
#if defined(_ALPHATEST_ON)
clip(alpha - _Cutoff);
#endif
return alpha;
}
half3 Normal(float2 uv)
{
#if _NORMALMAP
return UnpackNormalScale(tex2D(_BumpMap, uv), _BumpScale);
#else
return half3(0.0h, 0.0h, 1.0h);
#endif
}
half4 SpecularGloss(half2 uv, half alpha)
{
half4 specularGloss = half4(0, 0, 0, 1);
#ifdef _SPECGLOSSMAP
specularGloss = tex2D(_SpecGlossMap, uv);
specularGloss.rgb = LIGHTWEIGHT_GAMMA_TO_LINEAR(specularGloss.rgb);
#elif defined(_SPECULAR_COLOR)
specularGloss = _SpecColor;
#endif
#ifdef _GLOSSINESS_FROM_BASE_ALPHA
specularGloss.a = alpha;
#endif
return specularGloss;
}
half4 MetallicSpecGloss(float2 uv, half albedoAlpha)
{
half4 specGloss;
#ifdef _METALLICSPECGLOSSMAP
specGloss = specGloss = SAMPLE_METALLICSPECULAR(uv);
#ifdef _SMOOTHNESS_TEXTURE_ALBEDO_CHANNEL_A
specGloss.a = albedoAlpha * _GlossMapScale;
#else
specGloss.a *= _GlossMapScale;
#endif
#else // _METALLICSPECGLOSSMAP
#if _SPECULAR_SETUP
specGloss.rgb = _SpecColor.rgb;
#else
specGloss.rgb = _Metallic.rrr;
#endif
#ifdef _SMOOTHNESS_TEXTURE_ALBEDO_CHANNEL_A
specGloss.a = albedoAlpha * _GlossMapScale;
#else
specGloss.a = _Glossiness;
#endif
#endif
return specGloss;
}
half Occlusion(float2 uv)
{
#ifdef _OCCLUSIONMAP
#if (SHADER_TARGET < 30)
// SM20: instruction count limitation
// SM20: simpler occlusion
return tex2D(_OcclusionMap, uv).g;
#else
half occ = tex2D(_OcclusionMap, uv).g;
return _LerpOneTo(occ, _OcclusionStrength);
#endif
#else
return 1.0;
#endif
}
half3 Emission(float2 uv)
{
#ifndef _EMISSION
return 0;
#else
return LIGHTWEIGHT_GAMMA_TO_LINEAR(tex2D(_EmissionMap, uv).rgb) * _EmissionColor.rgb;
#endif
}
inline void InitializeStandardLitSurfaceData(LightweightVertexOutput IN, out SurfaceData outSurfaceData)
{
float2 uv = IN.uv01.xy;
half4 albedoAlpha = tex2D(_MainTex, uv);
half4 specGloss = MetallicSpecGloss(uv, albedoAlpha);
outSurfaceData.albedo = LIGHTWEIGHT_GAMMA_TO_LINEAR(albedoAlpha.rgb) * _Color.rgb;
#if _SPECULAR_SETUP
outSurfaceData.metallic = 1.0h;
outSurfaceData.specular = specGloss.rgb;
#else
outSurfaceData.metallic = specGloss.r;
outSurfaceData.specular = half3(0.0h, 0.0h, 0.0h);
#endif
outSurfaceData.smoothness = specGloss.a;
outSurfaceData.normal = Normal(uv);
outSurfaceData.occlusion = Occlusion(uv);
outSurfaceData.emission = Emission(uv);
outSurfaceData.alpha = Alpha(albedoAlpha.a);
}
///////////////////////////////////////////////////////////////////////////////
// Vertex and Fragment functions //
///////////////////////////////////////////////////////////////////////////////

half4 LitPassFragment(LightweightVertexOutput IN) : SV_Target
{
SurfaceData surfaceData;
InitializeStandardLitSurfaceData(IN, surfaceData);
InitializeStandardLitSurfaceData(IN.uv01.xy, surfaceData);
#if _NORMALMAP
half3 normalWS = TangentToWorldNormal(surfaceData.normal, IN.tangent, IN.binormal, IN.normal);

25
ScriptableRenderPipeline/LightweightPipeline/Shaders/LightweightStandard.shader


}
ENDCG
}
// This pass it not used during regular rendering, only for lightmap baking.
Pass
{
Tags{"LightMode" = "Meta"}
Cull Off
CGPROGRAM
#pragma vertex LightweightVertexMeta
#pragma fragment LightweightFragmentMeta
#pragma shader_feature _EMISSION
#pragma shader_feature _METALLICSPECGLOSSMAP
#pragma shader_feature _ _SMOOTHNESS_TEXTURE_ALBEDO_CHANNEL_A
#pragma shader_feature EDITOR_VISUALIZATION
#pragma shader_feature _EMISSION
#pragma shader_feature _SPECGLOSSMAP
#include "LightweightPassMeta.cginc"
ENDCG
}
FallBack "Standard"
FallBack "Hidden/InternalErrorShader"
CustomEditor "LightweightStandardGUI"
}

54
ScriptableRenderPipeline/LightweightPipeline/Shaders/LightweightStandardSimpleLighting.shader


CGPROGRAM
#define UNITY_SETUP_BRDF_INPUT SpecularSetup
#pragma vertex LightweightVertexMeta
#pragma fragment LightweightFragmentMeta
#pragma fragment LightweightFragmentMetaSimple
#pragma shader_feature _ _SMOOTHNESS_TEXTURE_ALBEDO_CHANNEL_A
#pragma shader_feature ___ _DETAIL_MULX2
#pragma shader_feature EDITOR_VISUALIZATION
#include "LightweightPassLit.cginc"
#include "UnityMetaPass.cginc"
struct MetaVertexInput
{
float4 vertex : POSITION;
half3 normal : NORMAL;
float2 uv0 : TEXCOORD0;
float2 uv1 : TEXCOORD1;
float2 uv2 : TEXCOORD2;
#ifdef _TANGENT_TO_WORLD
half4 tangent : TANGENT;
#endif
UNITY_VERTEX_INPUT_INSTANCE_ID
};
struct MetaVertexOuput
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
};
MetaVertexOuput LightweightVertexMeta(MetaVertexInput v)
{
MetaVertexOuput o;
o.pos = UnityMetaVertexPosition(v.vertex, v.uv1.xy, v.uv2.xy, unity_LightmapST, unity_DynamicLightmapST);
o.uv = TRANSFORM_TEX(v.uv0, _MainTex);
return o;
}
fixed4 LightweightFragmentMeta(MetaVertexOuput i) : SV_Target
{
UnityMetaInput o;
UNITY_INITIALIZE_OUTPUT(UnityMetaInput, o);
o.Albedo = _Color.rgb * tex2D(_MainTex, i.uv).rgb;
o.SpecularColor = SpecularGloss(i.uv.xy, 1.0);
#ifdef _EMISSION
o.Emission += LIGHTWEIGHT_GAMMA_TO_LINEAR(tex2D(_EmissionMap, i.uv).rgb) * _EmissionColor;
#else
o.Emission += _EmissionColor;
#endif
return UnityMetaFragment(o);
}
#include "LightweightPassMeta.cginc"
Fallback "Standard (Specular setup)"
Fallback "Hidden/InternalErrorShader"
CustomEditor "LightweightStandardSimpleLightingGUI"
}

2
ScriptableRenderPipeline/LightweightPipeline/Shaders/LightweightStandardTerrain.shader


ENDCG
}
}
Fallback "Hidden/InternalErrorShader"
}

2
ScriptableRenderPipeline/LightweightPipeline/Shaders/LightweightUnlit.shader


}
SubShader
{
Tags { "RenderType" = "Opaque" "IgnoreProjectors" = "True" "RenderPipeline" = "LightweightPipe" "Lightmode" = "LightweightForward" }
Tags { "RenderType" = "Opaque" "IgnoreProjectors" = "True" "RenderPipeline" = "LightweightPipe" }
LOD 100
Blend [_SrcBlend][_DstBlend]

4
ScriptableRenderPipeline/master-package.json


{
"version": "0.1.10",
"version": "0.1.11",
"com.unity.postprocessing": "0.1.3"
"com.unity.postprocessing": "0.1.4"
},
"subPackages": [
"Core",

2
TestbedPipelines/Fptl/FptlLighting.cs


scInit.resourceBinder = binder;
m_ShadowMgr = new ShadowManager(shadowSettings, ref scInit, m_Shadowmaps);
// set global overrides - these need to match the override specified in ShadowDispatch.hlsl
// set global overrides - these need to match the override specified in Fptl/Shadow.hlsl
m_ShadowMgr.SetGlobalShadowOverride( GPUShadowType.Point , ShadowAlgorithm.PCF, ShadowVariant.V1, ShadowPrecision.High, true );
m_ShadowMgr.SetGlobalShadowOverride( GPUShadowType.Spot , ShadowAlgorithm.PCF, ShadowVariant.V1, ShadowPrecision.High, true );
m_ShadowMgr.SetGlobalShadowOverride( GPUShadowType.Directional , ShadowAlgorithm.PCF, ShadowVariant.V1, ShadowPrecision.High, true );

31
TestbedPipelines/Fptl/LightingTemplate.hlsl


#define CUBEMAPFACE_POSITIVE_Z 4
#define CUBEMAPFACE_NEGATIVE_Z 5
#define SHADOW_FPTL
# if defined(SHADER_API_D3D11)
# include "../../ScriptableRenderPipeline/Core/ShaderLibrary/API/D3D11.hlsl"
# elif defined(SHADER_API_PSSL)
# include "../../ScriptableRenderPipeline/Core/ShaderLibrary/API/PSSL.hlsl"
# elif defined(SHADER_API_XBOXONE)
# include "../../ScriptableRenderPipeline/Core/ShaderLibrary/API/D3D11.hlsl"
# include "../../ScriptableRenderPipeline/Core/ShaderLibrary/API/D3D11_1.hlsl"
# elif defined(SHADER_API_METAL)
# include "../../ScriptableRenderPipeline/Core/ShaderLibrary/API/Metal.hlsl"
# else
# error unsupported shader api
# endif
# include "../../ScriptableRenderPipeline/Core/ShaderLibrary/API/Validate.hlsl"
# include "../../ScriptableRenderPipeline/Core/ShaderLibrary/Shadow/Shadow.hlsl"
#undef SHADOW_FPTL
#if defined(SHADER_API_D3D11)
# include "ShaderLibrary/API/D3D11.hlsl"
#elif defined(SHADER_API_PSSL)
# include "ShaderLibrary/API/PSSL.hlsl"
#elif defined(SHADER_API_XBOXONE)
# include "ShaderLibrary/API/D3D11.hlsl"
# include "ShaderLibrary/API/D3D11_1.hlsl"
#elif defined(SHADER_API_METAL)
# include "ShaderLibrary/API/Metal.hlsl"
#else
# error unsupported shader api
#endif
#include "ShaderLibrary/API/Validate.hlsl"
#include "Shadow.hlsl"
CBUFFER_START(ShadowLightData)

7
TestbedPipelines/Fptl/ShadowContext.hlsl


// This can be custom for each project and needs to be in sync with the ShadowMgr
#ifndef FPTL_SHADOW_CONTEXT_HLSL
#define FPTL_SHADOW_CONTEXT_HLSL
#define SHADOWCONTEXT_MAX_TEX2DARRAY 1
#define SHADOWCONTEXT_MAX_TEXCUBEARRAY 0

SHADOWCONTEXT_DECLARE( SHADOWCONTEXT_MAX_TEX2DARRAY, SHADOWCONTEXT_MAX_TEXCUBEARRAY, SHADOWCONTEXT_MAX_COMPSAMPLER, SHADOWCONTEXT_MAX_SAMPLER );
#include "ShaderLibrary/Shadow/Shadow.hlsl"
TEXTURE2D_ARRAY(_ShadowmapExp_PCF);
SAMPLER2D_SHADOW(sampler_ShadowmapExp_PCF);

return sc;
}
#endif // FPTL_SHADOW_CONTEXT_HLSL

13
TestbedPipelines/Fptl/ShadowContext.hlsl.meta


fileFormatVersion: 2
guid: 30016f0dcc663b14483b62ccf2e8a7ce
timeCreated: 1503477804
licenseType: Pro
ShaderImporter:
externalObjects: {}
defaultTextures: []
userData:
assetBundleName:
assetBundleVariant:
fileFormatVersion: 2
guid: 7c02f6444b51404e99b1158f7f152678
timeCreated: 1511273566

2
TestbedPipelines/OnTileDeferredPipeline/OnTileDeferredRenderPipeline.cs


scInit.resourceBinder = binder;
m_ShadowMgr = new ShadowManager(shadowSettings, ref scInit, m_Shadowmaps);
// set global overrides - these need to match the override specified in ShadowDispatch.hlsl
// set global overrides - these need to match the override specified in Fptl/Shadow.hlsl
m_ShadowMgr.SetGlobalShadowOverride( GPUShadowType.Point , ShadowAlgorithm.PCF, ShadowVariant.V1, ShadowPrecision.High, true );
m_ShadowMgr.SetGlobalShadowOverride( GPUShadowType.Spot , ShadowAlgorithm.PCF, ShadowVariant.V1, ShadowPrecision.High, true );
m_ShadowMgr.SetGlobalShadowOverride( GPUShadowType.Directional , ShadowAlgorithm.PCF, ShadowVariant.V1, ShadowPrecision.High, true );

30
TestbedPipelines/OnTileDeferredPipeline/Shaders/LightingTemplate.hlsl


#define BOX_LIGHT (2)
#define DIRECTIONAL_LIGHT (3)
#define SHADOW_FPTL
# if defined(SHADER_API_D3D11)
# include "../../../ScriptableRenderPipeline/Core/ShaderLibrary/API/D3D11.hlsl"
# elif defined(SHADER_API_PSSL)
# include "../../../ScriptableRenderPipeline/Core/ShaderLibrary/API/PSSL.hlsl"
# elif defined(SHADER_API_XBOXONE)
# include "../../../ScriptableRenderPipeline/Core/ShaderLibrary/API/D3D11.hlsl"
# include "../../../ScriptableRenderPipeline/Core/ShaderLibrary/API/D3D11_1.hlsl"
# elif defined(SHADER_API_METAL)
# include "../../../ScriptableRenderPipeline/Core/ShaderLibrary/API/Metal.hlsl"
# else
# error unsupported shader api
# endif
# include "../../../ScriptableRenderPipeline/Core/ShaderLibrary/API/Validate.hlsl"
# include "../../../ScriptableRenderPipeline/Core/ShaderLibrary/Shadow/Shadow.hlsl"
#undef SHADOW_FPTL
#if defined(SHADER_API_D3D11)
# include "ShaderLibrary/API/D3D11.hlsl"
#elif defined(SHADER_API_PSSL)
# include "ShaderLibrary/API/PSSL.hlsl"
#elif defined(SHADER_API_XBOXONE)
# include "ShaderLibrary/API/D3D11.hlsl"
# include "ShaderLibrary/API/D3D11_1.hlsl"
#elif defined(SHADER_API_METAL)
# include "ShaderLibrary/API/Metal.hlsl"
#else
# error unsupported shader api
#endif
#include "ShaderLibrary/API/Validate.hlsl"
#include "../../Fptl/Shadow.hlsl"
UNITY_DECLARE_DEPTH_TEXTURE(_CameraGBufferZ);

30
TestbedPipelines/OnTileDeferredPipeline/Shaders/UnityStandardForwardMobile.cginc


#define CUBEMAPFACE_POSITIVE_Z 4
#define CUBEMAPFACE_NEGATIVE_Z 5
#define SHADOW_FPTL
# if defined(SHADER_API_D3D11)
# include "../../../ScriptableRenderPipeline/Core/ShaderLibrary/API/D3D11.hlsl"
# elif defined(SHADER_API_PSSL)
# include "../../../ScriptableRenderPipeline/Core/ShaderLibrary/API/PSSL.hlsl"
# elif defined(SHADER_API_XBOXONE)
# include "../../../ScriptableRenderPipeline/Core/ShaderLibrary/API/D3D11.hlsl"
# include "../../../ScriptableRenderPipeline/Core/ShaderLibrary/API/D3D11_1.hlsl"
# elif defined(SHADER_API_METAL)
# include "../../../ScriptableRenderPipeline/Core/ShaderLibrary/API/Metal.hlsl"
# else
# error unsupported shader api
# endif
# include "../../../ScriptableRenderPipeline/Core/ShaderLibrary/API/Validate.hlsl"
# include "../../../ScriptableRenderPipeline/Core/ShaderLibrary/Shadow/Shadow.hlsl"
#undef SHADOW_FPTL
#if defined(SHADER_API_D3D11)
# include "ShaderLibrary/API/D3D11.hlsl"
#elif defined(SHADER_API_PSSL)
# include "ShaderLibrary/API/PSSL.hlsl"
#elif defined(SHADER_API_XBOXONE)
# include "ShaderLibrary/API/D3D11.hlsl"
# include "ShaderLibrary/API/D3D11_1.hlsl"
#elif defined(SHADER_API_METAL)
# include "ShaderLibrary/API/Metal.hlsl"
#else
# error unsupported shader api
#endif
#include "ShaderLibrary/API/Validate.hlsl"
#include "../../Fptl/Shadow.hlsl"
struct VertexOutputForwardNew
{

2
ScriptableRenderPipeline/Core/Editor/CoreShaderIncludePaths.cs.meta


fileFormatVersion: 2
guid: e303d56aac48c1f4280b30146e9d0a63
guid: b40d30371f67ce54e89386dc076a895a
MonoImporter:
externalObjects: {}
serializedVersion: 2

8
SampleScenes/Common/Materials.meta


fileFormatVersion: 2
guid: d6989f167ae4d07419315e04a2920061
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

部分文件因为文件数量过多而无法显示

正在加载...
取消
保存