浏览代码

Merge remote-tracking branch 'refs/remotes/origin/master' into HDRP_GraphicTests

/main
Sebastien Lagarde 7 年前
当前提交
b32a2d17
共有 60 个文件被更改,包括 3413 次插入3389 次删除
  1. 2
      ScriptableRenderPipeline/Core/CoreRP/MousePositionDebug.cs
  2. 10
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/API/GLCore.hlsl
  3. 10
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/API/GLES3.hlsl
  4. 2
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Color.hlsl
  5. 14
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Debug.hlsl
  6. 39
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Packing.hlsl
  7. 245
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Shadow/ShadowSampling.hlsl
  8. 13
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/UnityInstancing.hlsl
  9. 2
      ScriptableRenderPipeline/Core/package.json
  10. 66
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Camera/HDAdditionalCameraData.cs
  11. 9
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugDisplay.hlsl
  12. 4
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugViewTiles.shader
  13. 1
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/MaterialDebug.cs
  14. 3
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/MaterialDebug.cs.hlsl
  15. 2
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Camera/HDCameraEditor.Handlers.cs
  16. 3
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Camera/HDCameraEditor.cs
  17. 3
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/RenderLoopSettings/RenderPipelineSettingsUI.cs
  18. 2
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/RenderLoopSettings/SerializedRenderPipelineSettings.cs
  19. 30
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/HDRenderPipeline.cs
  20. 26
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/HDRenderPipelineAsset.cs
  21. 2
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/LayeredLit/LayeredLitData.hlsl
  22. 2
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/Lit.cs
  23. 2
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/Lit.cs.hlsl
  24. 297
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/Lit.hlsl
  25. 6
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/SubsurfaceScattering/SubsurfaceScattering.hlsl
  26. 4
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/RenderPipeline/FrameSettings.cs
  27. 1
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/RenderPipeline/RenderPipelineSettings.cs
  28. 2
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/ShaderPass/ShaderPassForward.hlsl
  29. 3
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/ShaderPass/ShaderPassForwardUnlit.hlsl
  30. 4
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/ShaderPass/VaryingMesh.hlsl
  31. 7
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Sky/ProceduralSky/Resources/ProceduralSky.shader
  32. 4
      ScriptableRenderPipeline/HDRenderPipeline/package.json
  33. 110
      ScriptableRenderPipeline/LightweightPipeline/LWRP/Data/LightweightPipelineAsset.cs
  34. 13
      ScriptableRenderPipeline/LightweightPipeline/LWRP/LightweightConstantBuffer.cs
  35. 124
      ScriptableRenderPipeline/LightweightPipeline/LWRP/LightweightPipeline.cs
  36. 11
      ScriptableRenderPipeline/LightweightPipeline/LWRP/ShaderLibrary/Input.hlsl
  37. 4
      ScriptableRenderPipeline/LightweightPipeline/LWRP/ShaderLibrary/InputSurface.hlsl
  38. 118
      ScriptableRenderPipeline/LightweightPipeline/LWRP/ShaderLibrary/Lighting.hlsl
  39. 82
      ScriptableRenderPipeline/LightweightPipeline/LWRP/ShaderLibrary/LightweightPassLit.hlsl
  40. 22
      ScriptableRenderPipeline/LightweightPipeline/LWRP/ShaderLibrary/Particles.hlsl
  41. 80
      ScriptableRenderPipeline/LightweightPipeline/LWRP/ShaderLibrary/Shadows.hlsl
  42. 1
      ScriptableRenderPipeline/LightweightPipeline/LWRP/Shaders/LightweightStandard.shader
  43. 18
      ScriptableRenderPipeline/LightweightPipeline/LWRP/Shaders/LightweightStandardParticles.shader
  44. 60
      ScriptableRenderPipeline/LightweightPipeline/LWRP/Shaders/LightweightStandardTerrain.shader
  45. 5
      ScriptableRenderPipeline/LightweightPipeline/LWRP/Shaders/LightweightStandardUnlit.shader
  46. 4
      ScriptableRenderPipeline/LightweightPipeline/package.json
  47. 2
      ScriptableRenderPipeline/master-package.json
  48. 4
      Tests/GraphicsTests/RenderPipeline/LightweightPipeline/LightweightPipelineAsset.asset
  49. 3
      Tests/GraphicsTests/RenderPipeline/LightweightPipeline/LightweightPipelineAsset.asset.meta
  50. 65
      Tests/GraphicsTests/RenderPipeline/LightweightPipeline/Scenes/040_UpgradeScene/LightingData.asset
  51. 2
      Tests/GraphicsTests/RenderPipeline/LightweightPipeline/Scenes/040_UpgradeScene/LightingData.asset.meta
  52. 999
      Tests/GraphicsTests/RenderPipeline/LightweightPipeline/Scenes/040_UpgradeScene/Lightmap-0_comp_dir.png
  53. 1001
      Tests/GraphicsTests/RenderPipeline/LightweightPipeline/Scenes/040_UpgradeScene/Lightmap-0_comp_light.exr
  54. 999
      Tests/GraphicsTests/RenderPipeline/LightweightPipeline/Scenes/040_UpgradeScene/Lightmap-1_comp_dir.png
  55. 1001
      Tests/GraphicsTests/RenderPipeline/LightweightPipeline/Scenes/040_UpgradeScene/Lightmap-1_comp_light.exr
  56. 1001
      Tests/GraphicsTests/RenderPipeline/LightweightPipeline/Scenes/040_UpgradeScene/ReflectionProbe-0.exr
  57. 244
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Shadow/ShadowSamplingTent.hlsl
  58. 9
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Shadow/ShadowSamplingTent.hlsl.meta
  59. 0
      /ScriptableRenderPipeline/LightweightPipeline/LWRP/Shaders/LightweightStandardUnlit.shader.meta
  60. 0
      /ScriptableRenderPipeline/LightweightPipeline/LWRP/Shaders/LightweightStandardUnlit.shader

2
ScriptableRenderPipeline/Core/CoreRP/MousePositionDebug.cs


}
}
#if UNITY_EDITOR
#if UNITY_EDITOR
private void OnSceneGUI(UnityEditor.SceneView sceneview)
{
m_mousePosition = Event.current.mousePosition;

10
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/API/GLCore.hlsl


#define SAMPLE_TEXTURECUBE(textureName, samplerName, coord3) textureName.Sample(samplerName, coord3)
#define SAMPLE_TEXTURECUBE_LOD(textureName, samplerName, coord3, lod) textureName.SampleLevel(samplerName, coord3, lod)
#define SAMPLE_TEXTURECUBE_BIAS(textureName, samplerName, coord3, bias) textureName.SampleBias(samplerName, coord3, bias)
#if OPENGL4_1_SM5
#define SAMPLE_TEXTURECUBE_ARRAY(textureName, samplerName, coord3, index) textureName.Sample(samplerName, float4(coord3, index))
#define SAMPLE_TEXTURECUBE_ARRAY_LOD(textureName, samplerName, coord3, index, lod) textureName.SampleLevel(samplerName, float4(coord3, index), lod)
#define SAMPLE_TEXTURECUBE_ARRAY_BIAS(textureName, samplerName, coord3, index, bias) textureName.SampleBias(samplerName, float4(coord3, index), bias)
#else
#ifdef UNITY_NO_CUBEMAP_ARRAY
#else
#define SAMPLE_TEXTURECUBE_ARRAY(textureName, samplerName, coord3, index) textureName.Sample(samplerName, float4(coord3, index))
#define SAMPLE_TEXTURECUBE_ARRAY_LOD(textureName, samplerName, coord3, index, lod) textureName.SampleLevel(samplerName, float4(coord3, index), lod)
#define SAMPLE_TEXTURECUBE_ARRAY_BIAS(textureName, samplerName, coord3, index, bias)textureName.SampleBias(samplerName, float4(coord3, index), bias)
#endif
#define SAMPLE_TEXTURE3D(textureName, samplerName, coord3) textureName.Sample(samplerName, coord3)

10
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/API/GLES3.hlsl


#define SAMPLE_TEXTURECUBE_LOD(textureName, samplerName, coord3, lod) textureName.SampleLevel(samplerName, coord3, lod)
#define SAMPLE_TEXTURECUBE_BIAS(textureName, samplerName, coord3, bias) textureName.SampleBias(samplerName, coord3, bias)
#if GLES3_1_AEP
#define SAMPLE_TEXTURECUBE_ARRAY(textureName, samplerName, coord3, index) textureName.Sample(samplerName, float4(coord3, index))
#define SAMPLE_TEXTURECUBE_ARRAY_LOD(textureName, samplerName, coord3, index, lod) textureName.SampleLevel(samplerName, float4(coord3, index), lod)
#define SAMPLE_TEXTURECUBE_ARRAY_BIAS(textureName, samplerName, coord3, index, bias)textureName.SampleBias(samplerName, float4(coord3, index), bias)
#else
#ifdef UNITY_NO_CUBEMAP_ARRAY
#else
#define SAMPLE_TEXTURECUBE_ARRAY(textureName, samplerName, coord3, index) textureName.Sample(samplerName, float4(coord3, index))
#define SAMPLE_TEXTURECUBE_ARRAY_LOD(textureName, samplerName, coord3, index, lod) textureName.SampleLevel(samplerName, float4(coord3, index), lod)
#define SAMPLE_TEXTURECUBE_ARRAY_BIAS(textureName, samplerName, coord3, index, bias)textureName.SampleBias(samplerName, float4(coord3, index), bias)
#endif
#define SAMPLE_TEXTURE3D(textureName, samplerName, coord3) textureName.Sample(samplerName, coord3)

2
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Color.hlsl


real3 FastLinearToSRGB(real3 c)
{
return max(1.055 * pow(c, 0.416666667) - 0.055, 0.0);
return saturate(1.055 * pow(abs(c), 0.416666667) - 0.055);
}
real4 FastLinearToSRGB(real4 c)

14
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Debug.hlsl


else if (index == 8)
outColor = real3(0.75, 1.0, 0.25);
else if (index == 9)
outColor = real3(0.75, 0.25, 1.0);
outColor = real3(0.75, 0.25, 1.0);
else if (index == 10)
outColor = real3(0.25, 1.0, 0.75);
else if (index == 11)
outColor = real3(0.75, 0.75, 0.25);
else if (index == 12)
outColor = real3(0.75, 0.25, 0.75);
else if (index == 13)
outColor = real3(0.25, 0.75, 0.75);
else if (index == 14)
outColor = real3(0.25, 0.25, 0.75);
else if (index == 15)
outColor = real3(0.75, 0.25, 0.25);
return outColor;
}

39
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Packing.hlsl


{
// Perform planar projection.
real3 p = n * rcp(dot(abs(n), 1.0));
real x = p.x, y = p.y, z = p.z;
real r = 1 - p.x + p.y;
real g = p.x + p.y;
// Left side of the 2:1 rectangle for the negative hemisphere, right otherwise.
// We also correct the aspect ratio from 2:1 to 1:1.
real s = CopySign(0.5, p.z);
// Also correct the aspect ratio from 2:1 to 1:1.
real r = saturate(0.5 - 0.5 * x + 0.5 * y);
real g = x + y;
return real2(s * r, g);
// Negative hemisphere on the left, positive on the right.
return real2(CopySign(r, z), g);
real r = f.r;
real g = f.g;
real s = FastSign(r);
real r = f.r, g = f.g;
real x = 0.5 * g + 0.5 - s * r;
real x = 0.5 + 0.5 * g - abs(r);
real z = s * max(1.0 - abs(x) - abs(y), FLT_EPS); // Clamping is absolutely crucial for numerical stability
real z = max(1.0 - abs(x) - abs(y), FLT_EPS); // EPS is absolutely crucial for anisotropy
real3 p = real3(x, y, z);
real3 p = real3(x, y, CopySign(z, r));
return normalize(p);
}

// ...
// Example: precision is 1024.0, maxi is 8, i is [0..7] encode on 3 bit. f is [0..1] encode on 7 bit.
//...
real PackFloatInt(real f, int i, real maxi, real precision)
real PackFloatInt(real f, uint i, real maxi, real precision)
{
// Constant
real precisionMinusOne = precision - 1.0;

return t1 * f + t2 * real(i);
}
void UnpackFloatInt(real val, real maxi, real precision, out real f, out int i)
void UnpackFloatInt(real val, real maxi, real precision, out real f, out uint i)
{
// Constant
real precisionMinusOne = precision - 1.0;

}
// Define various variante for ease of read
real PackFloatInt8bit(real f, int i, real maxi)
real PackFloatUInt8bit(real f, uint i, real maxi)
void UnpackFloatInt8bit(real val, real maxi, out real f, out int i)
void UnpackFloatUInt8bit(real val, real maxi, out real f, out uint i)
real PackFloatInt10bit(real f, int i, real maxi)
real PackFloatUInt10bit(real f, uint i, real maxi)
void UnpackFloatInt10bit(real val, real maxi, out real f, out int i)
void UnpackFloatUInt10bit(real val, real maxi, out real f, out uint i)
real PackFloatInt16bit(real f, int i, real maxi)
real PackFloatUInt16bit(real f, uint i, real maxi)
void UnpackFloatInt16bit(real val, real maxi, out real f, out int i)
void UnpackFloatUInt16bit(real val, real maxi, out real f, out uint i)
{
UnpackFloatInt(val, maxi, 65536.0, f, i);
}

245
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Shadow/ShadowSampling.hlsl


// Various shadow sampling logic.
// Again two versions, one for dynamic resource indexing, one for static resource access.
// ------------------------------------------------------------------
// PCF Filtering helpers
// ------------------------------------------------------------------
// Assuming a isoceles right angled triangle of height "triangleHeight" (as drawn below).
// This function return the area of the triangle above the first texel.
//
// |\ <-- 45 degree slop isosceles right angled triangle
// | \
// ---- <-- length of this side is "triangleHeight"
// _ _ _ _ <-- texels
real SampleShadow_GetTriangleTexelArea(real triangleHeight)
{
return triangleHeight - 0.5;
}
// Assuming a isoceles triangle of 1.5 texels height and 3 texels wide lying on 4 texels.
// This function return the area of the triangle above each of those texels.
// | <-- offset from -0.5 to 0.5, 0 meaning triangle is exactly in the center
// / \ <-- 45 degree slop isosceles triangle (ie tent projected in 2D)
// / \
// _ _ _ _ <-- texels
// X Y Z W <-- result indices (in computedArea.xyzw and computedAreaUncut.xyzw)
void SampleShadow_GetTexelAreas_Tent_3x3(real offset, out real4 computedArea, out real4 computedAreaUncut)
{
// Compute the exterior areas
real offset01SquaredHalved = (offset + 0.5) * (offset + 0.5) * 0.5;
computedAreaUncut.x = computedArea.x = offset01SquaredHalved - offset;
computedAreaUncut.w = computedArea.w = offset01SquaredHalved;
// Compute the middle areas
// For Y : We find the area in Y of as if the left section of the isoceles triangle would
// intersect the axis between Y and Z (ie where offset = 0).
computedAreaUncut.y = SampleShadow_GetTriangleTexelArea(1.5 - offset);
// This area is superior to the one we are looking for if (offset < 0) thus we need to
// subtract the area of the triangle defined by (0,1.5-offset), (0,1.5+offset), (-offset,1.5).
real clampedOffsetLeft = min(offset,0);
real areaOfSmallLeftTriangle = clampedOffsetLeft * clampedOffsetLeft;
computedArea.y = computedAreaUncut.y - areaOfSmallLeftTriangle;
// We do the same for the Z but with the right part of the isoceles triangle
computedAreaUncut.z = SampleShadow_GetTriangleTexelArea(1.5 + offset);
real clampedOffsetRight = max(offset,0);
real areaOfSmallRightTriangle = clampedOffsetRight * clampedOffsetRight;
computedArea.z = computedAreaUncut.z - areaOfSmallRightTriangle;
}
// Assuming a isoceles triangle of 1.5 texels height and 3 texels wide lying on 4 texels.
// This function return the weight of each texels area relative to the full triangle area.
void SampleShadow_GetTexelWeights_Tent_3x3(real offset, out real4 computedWeight)
{
real4 dummy;
SampleShadow_GetTexelAreas_Tent_3x3(offset, computedWeight, dummy);
computedWeight *= 0.44444;//0.44 == 1/(the triangle area)
}
// Assuming a isoceles triangle of 2.5 texel height and 5 texels wide lying on 6 texels.
// This function return the weight of each texels area relative to the full triangle area.
// / \
// _ _ _ _ _ _ <-- texels
// 0 1 2 3 4 5 <-- computed area indices (in texelsWeights[])
void SampleShadow_GetTexelWeights_Tent_5x5(real offset, out real3 texelsWeightsA, out real3 texelsWeightsB)
{
// See _UnityInternalGetAreaPerTexel_3TexelTriangleFilter for details.
real4 computedArea_From3texelTriangle;
real4 computedAreaUncut_From3texelTriangle;
SampleShadow_GetTexelAreas_Tent_3x3(offset, computedArea_From3texelTriangle, computedAreaUncut_From3texelTriangle);
// Triangle slope is 45 degree thus we can almost reuse the result of the 3 texel wide computation.
// the 5 texel wide triangle can be seen as the 3 texel wide one but shifted up by one unit/texel.
// 0.16 is 1/(the triangle area)
texelsWeightsA.x = 0.16 * (computedArea_From3texelTriangle.x);
texelsWeightsA.y = 0.16 * (computedAreaUncut_From3texelTriangle.y);
texelsWeightsA.z = 0.16 * (computedArea_From3texelTriangle.y + 1);
texelsWeightsB.x = 0.16 * (computedArea_From3texelTriangle.z + 1);
texelsWeightsB.y = 0.16 * (computedAreaUncut_From3texelTriangle.z);
texelsWeightsB.z = 0.16 * (computedArea_From3texelTriangle.w);
}
// Assuming a isoceles triangle of 3.5 texel height and 7 texels wide lying on 8 texels.
// This function return the weight of each texels area relative to the full triangle area.
// / \
// _ _ _ _ _ _ _ _ <-- texels
// 0 1 2 3 4 5 6 7 <-- computed area indices (in texelsWeights[])
void SampleShadow_GetTexelWeights_Tent_7x7(real offset, out real4 texelsWeightsA, out real4 texelsWeightsB)
{
// See _UnityInternalGetAreaPerTexel_3TexelTriangleFilter for details.
real4 computedArea_From3texelTriangle;
real4 computedAreaUncut_From3texelTriangle;
SampleShadow_GetTexelAreas_Tent_3x3(offset, computedArea_From3texelTriangle, computedAreaUncut_From3texelTriangle);
// Triangle slope is 45 degree thus we can almost reuse the result of the 3 texel wide computation.
// the 7 texel wide triangle can be seen as the 3 texel wide one but shifted up by two unit/texel.
// 0.081632 is 1/(the triangle area)
texelsWeightsA.x = 0.081632 * (computedArea_From3texelTriangle.x);
texelsWeightsA.y = 0.081632 * (computedAreaUncut_From3texelTriangle.y);
texelsWeightsA.z = 0.081632 * (computedAreaUncut_From3texelTriangle.y + 1);
texelsWeightsA.w = 0.081632 * (computedArea_From3texelTriangle.y + 2);
texelsWeightsB.x = 0.081632 * (computedArea_From3texelTriangle.z + 2);
texelsWeightsB.y = 0.081632 * (computedAreaUncut_From3texelTriangle.z + 1);
texelsWeightsB.z = 0.081632 * (computedAreaUncut_From3texelTriangle.z);
texelsWeightsB.w = 0.081632 * (computedArea_From3texelTriangle.w);
}
// 3x3 Tent filter (45 degree sloped triangles in U and V)
void SampleShadow_ComputeSamples_Tent_3x3(real4 shadowMapTexture_TexelSize, real2 coord, out real fetchesWeights[4], out real2 fetchesUV[4])
{
// tent base is 3x3 base thus covering from 9 to 12 texels, thus we need 4 bilinear PCF fetches
real2 tentCenterInTexelSpace = coord.xy * shadowMapTexture_TexelSize.zw;
real2 centerOfFetchesInTexelSpace = floor(tentCenterInTexelSpace + 0.5);
real2 offsetFromTentCenterToCenterOfFetches = tentCenterInTexelSpace - centerOfFetchesInTexelSpace;
// find the weight of each texel based
real4 texelsWeightsU, texelsWeightsV;
SampleShadow_GetTexelWeights_Tent_3x3(offsetFromTentCenterToCenterOfFetches.x, texelsWeightsU);
SampleShadow_GetTexelWeights_Tent_3x3(offsetFromTentCenterToCenterOfFetches.y, texelsWeightsV);
// each fetch will cover a group of 2x2 texels, the weight of each group is the sum of the weights of the texels
real2 fetchesWeightsU = texelsWeightsU.xz + texelsWeightsU.yw;
real2 fetchesWeightsV = texelsWeightsV.xz + texelsWeightsV.yw;
// move the PCF bilinear fetches to respect texels weights
real2 fetchesOffsetsU = texelsWeightsU.yw / fetchesWeightsU.xy + real2(-1.5,0.5);
real2 fetchesOffsetsV = texelsWeightsV.yw / fetchesWeightsV.xy + real2(-1.5,0.5);
fetchesOffsetsU *= shadowMapTexture_TexelSize.xx;
fetchesOffsetsV *= shadowMapTexture_TexelSize.yy;
real2 bilinearFetchOrigin = centerOfFetchesInTexelSpace * shadowMapTexture_TexelSize.xy;
fetchesUV[0] = bilinearFetchOrigin + real2(fetchesOffsetsU.x, fetchesOffsetsV.x);
fetchesUV[1] = bilinearFetchOrigin + real2(fetchesOffsetsU.y, fetchesOffsetsV.x);
fetchesUV[2] = bilinearFetchOrigin + real2(fetchesOffsetsU.x, fetchesOffsetsV.y);
fetchesUV[3] = bilinearFetchOrigin + real2(fetchesOffsetsU.y, fetchesOffsetsV.y);
fetchesWeights[0] = fetchesWeightsU.x * fetchesWeightsV.x;
fetchesWeights[1] = fetchesWeightsU.y * fetchesWeightsV.x;
fetchesWeights[2] = fetchesWeightsU.x * fetchesWeightsV.y;
fetchesWeights[3] = fetchesWeightsU.y * fetchesWeightsV.y;
}
// 5x5 Tent filter (45 degree sloped triangles in U and V)
void SampleShadow_ComputeSamples_Tent_5x5(real4 shadowMapTexture_TexelSize, real2 coord, out real fetchesWeights[9], out real2 fetchesUV[9])
{
// tent base is 5x5 base thus covering from 25 to 36 texels, thus we need 9 bilinear PCF fetches
real2 tentCenterInTexelSpace = coord.xy * shadowMapTexture_TexelSize.zw;
real2 centerOfFetchesInTexelSpace = floor(tentCenterInTexelSpace + 0.5);
real2 offsetFromTentCenterToCenterOfFetches = tentCenterInTexelSpace - centerOfFetchesInTexelSpace;
// find the weight of each texel based on the area of a 45 degree slop tent above each of them.
real3 texelsWeightsU_A, texelsWeightsU_B;
real3 texelsWeightsV_A, texelsWeightsV_B;
SampleShadow_GetTexelWeights_Tent_5x5(offsetFromTentCenterToCenterOfFetches.x, texelsWeightsU_A, texelsWeightsU_B);
SampleShadow_GetTexelWeights_Tent_5x5(offsetFromTentCenterToCenterOfFetches.y, texelsWeightsV_A, texelsWeightsV_B);
// each fetch will cover a group of 2x2 texels, the weight of each group is the sum of the weights of the texels
real3 fetchesWeightsU = real3(texelsWeightsU_A.xz, texelsWeightsU_B.y) + real3(texelsWeightsU_A.y, texelsWeightsU_B.xz);
real3 fetchesWeightsV = real3(texelsWeightsV_A.xz, texelsWeightsV_B.y) + real3(texelsWeightsV_A.y, texelsWeightsV_B.xz);
// move the PCF bilinear fetches to respect texels weights
real3 fetchesOffsetsU = real3(texelsWeightsU_A.y, texelsWeightsU_B.xz) / fetchesWeightsU.xyz + real3(-2.5,-0.5,1.5);
real3 fetchesOffsetsV = real3(texelsWeightsV_A.y, texelsWeightsV_B.xz) / fetchesWeightsV.xyz + real3(-2.5,-0.5,1.5);
fetchesOffsetsU *= shadowMapTexture_TexelSize.xxx;
fetchesOffsetsV *= shadowMapTexture_TexelSize.yyy;
real2 bilinearFetchOrigin = centerOfFetchesInTexelSpace * shadowMapTexture_TexelSize.xy;
fetchesUV[0] = bilinearFetchOrigin + real2(fetchesOffsetsU.x, fetchesOffsetsV.x);
fetchesUV[1] = bilinearFetchOrigin + real2(fetchesOffsetsU.y, fetchesOffsetsV.x);
fetchesUV[2] = bilinearFetchOrigin + real2(fetchesOffsetsU.z, fetchesOffsetsV.x);
fetchesUV[3] = bilinearFetchOrigin + real2(fetchesOffsetsU.x, fetchesOffsetsV.y);
fetchesUV[4] = bilinearFetchOrigin + real2(fetchesOffsetsU.y, fetchesOffsetsV.y);
fetchesUV[5] = bilinearFetchOrigin + real2(fetchesOffsetsU.z, fetchesOffsetsV.y);
fetchesUV[6] = bilinearFetchOrigin + real2(fetchesOffsetsU.x, fetchesOffsetsV.z);
fetchesUV[7] = bilinearFetchOrigin + real2(fetchesOffsetsU.y, fetchesOffsetsV.z);
fetchesUV[8] = bilinearFetchOrigin + real2(fetchesOffsetsU.z, fetchesOffsetsV.z);
fetchesWeights[0] = fetchesWeightsU.x * fetchesWeightsV.x;
fetchesWeights[1] = fetchesWeightsU.y * fetchesWeightsV.x;
fetchesWeights[2] = fetchesWeightsU.z * fetchesWeightsV.x;
fetchesWeights[3] = fetchesWeightsU.x * fetchesWeightsV.y;
fetchesWeights[4] = fetchesWeightsU.y * fetchesWeightsV.y;
fetchesWeights[5] = fetchesWeightsU.z * fetchesWeightsV.y;
fetchesWeights[6] = fetchesWeightsU.x * fetchesWeightsV.z;
fetchesWeights[7] = fetchesWeightsU.y * fetchesWeightsV.z;
fetchesWeights[8] = fetchesWeightsU.z * fetchesWeightsV.z;
}
// 7x7 Tent filter (45 degree sloped triangles in U and V)
void SampleShadow_ComputeSamples_Tent_7x7(real4 shadowMapTexture_TexelSize, real2 coord, out real fetchesWeights[16], out real2 fetchesUV[16])
{
// tent base is 7x7 base thus covering from 49 to 64 texels, thus we need 16 bilinear PCF fetches
real2 tentCenterInTexelSpace = coord.xy * shadowMapTexture_TexelSize.zw;
real2 centerOfFetchesInTexelSpace = floor(tentCenterInTexelSpace + 0.5);
real2 offsetFromTentCenterToCenterOfFetches = tentCenterInTexelSpace - centerOfFetchesInTexelSpace;
// find the weight of each texel based on the area of a 45 degree slop tent above each of them.
real4 texelsWeightsU_A, texelsWeightsU_B;
real4 texelsWeightsV_A, texelsWeightsV_B;
SampleShadow_GetTexelWeights_Tent_7x7(offsetFromTentCenterToCenterOfFetches.x, texelsWeightsU_A, texelsWeightsU_B);
SampleShadow_GetTexelWeights_Tent_7x7(offsetFromTentCenterToCenterOfFetches.y, texelsWeightsV_A, texelsWeightsV_B);
// each fetch will cover a group of 2x2 texels, the weight of each group is the sum of the weights of the texels
real4 fetchesWeightsU = real4(texelsWeightsU_A.xz, texelsWeightsU_B.xz) + real4(texelsWeightsU_A.yw, texelsWeightsU_B.yw);
real4 fetchesWeightsV = real4(texelsWeightsV_A.xz, texelsWeightsV_B.xz) + real4(texelsWeightsV_A.yw, texelsWeightsV_B.yw);
// move the PCF bilinear fetches to respect texels weights
real4 fetchesOffsetsU = real4(texelsWeightsU_A.yw, texelsWeightsU_B.yw) / fetchesWeightsU.xyzw + real4(-3.5,-1.5,0.5,2.5);
real4 fetchesOffsetsV = real4(texelsWeightsV_A.yw, texelsWeightsV_B.yw) / fetchesWeightsV.xyzw + real4(-3.5,-1.5,0.5,2.5);
fetchesOffsetsU *= shadowMapTexture_TexelSize.xxxx;
fetchesOffsetsV *= shadowMapTexture_TexelSize.yyyy;
real2 bilinearFetchOrigin = centerOfFetchesInTexelSpace * shadowMapTexture_TexelSize.xy;
fetchesUV[0] = bilinearFetchOrigin + real2(fetchesOffsetsU.x, fetchesOffsetsV.x);
fetchesUV[1] = bilinearFetchOrigin + real2(fetchesOffsetsU.y, fetchesOffsetsV.x);
fetchesUV[2] = bilinearFetchOrigin + real2(fetchesOffsetsU.z, fetchesOffsetsV.x);
fetchesUV[3] = bilinearFetchOrigin + real2(fetchesOffsetsU.w, fetchesOffsetsV.x);
fetchesUV[4] = bilinearFetchOrigin + real2(fetchesOffsetsU.x, fetchesOffsetsV.y);
fetchesUV[5] = bilinearFetchOrigin + real2(fetchesOffsetsU.y, fetchesOffsetsV.y);
fetchesUV[6] = bilinearFetchOrigin + real2(fetchesOffsetsU.z, fetchesOffsetsV.y);
fetchesUV[7] = bilinearFetchOrigin + real2(fetchesOffsetsU.w, fetchesOffsetsV.y);
fetchesUV[8] = bilinearFetchOrigin + real2(fetchesOffsetsU.x, fetchesOffsetsV.z);
fetchesUV[9] = bilinearFetchOrigin + real2(fetchesOffsetsU.y, fetchesOffsetsV.z);
fetchesUV[10] = bilinearFetchOrigin + real2(fetchesOffsetsU.z, fetchesOffsetsV.z);
fetchesUV[11] = bilinearFetchOrigin + real2(fetchesOffsetsU.w, fetchesOffsetsV.z);
fetchesUV[12] = bilinearFetchOrigin + real2(fetchesOffsetsU.x, fetchesOffsetsV.w);
fetchesUV[13] = bilinearFetchOrigin + real2(fetchesOffsetsU.y, fetchesOffsetsV.w);
fetchesUV[14] = bilinearFetchOrigin + real2(fetchesOffsetsU.z, fetchesOffsetsV.w);
fetchesUV[15] = bilinearFetchOrigin + real2(fetchesOffsetsU.w, fetchesOffsetsV.w);
fetchesWeights[0] = fetchesWeightsU.x * fetchesWeightsV.x;
fetchesWeights[1] = fetchesWeightsU.y * fetchesWeightsV.x;
fetchesWeights[2] = fetchesWeightsU.z * fetchesWeightsV.x;
fetchesWeights[3] = fetchesWeightsU.w * fetchesWeightsV.x;
fetchesWeights[4] = fetchesWeightsU.x * fetchesWeightsV.y;
fetchesWeights[5] = fetchesWeightsU.y * fetchesWeightsV.y;
fetchesWeights[6] = fetchesWeightsU.z * fetchesWeightsV.y;
fetchesWeights[7] = fetchesWeightsU.w * fetchesWeightsV.y;
fetchesWeights[8] = fetchesWeightsU.x * fetchesWeightsV.z;
fetchesWeights[9] = fetchesWeightsU.y * fetchesWeightsV.z;
fetchesWeights[10] = fetchesWeightsU.z * fetchesWeightsV.z;
fetchesWeights[11] = fetchesWeightsU.w * fetchesWeightsV.z;
fetchesWeights[12] = fetchesWeightsU.x * fetchesWeightsV.w;
fetchesWeights[13] = fetchesWeightsU.y * fetchesWeightsV.w;
fetchesWeights[14] = fetchesWeightsU.z * fetchesWeightsV.w;
fetchesWeights[15] = fetchesWeightsU.w * fetchesWeightsV.w;
}
#include "CoreRP/ShaderLibrary/Shadow/ShadowSamplingTent.hlsl"
// ------------------------------------------------------------------
// PCF Filtering methods

13
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/UnityInstancing.hlsl


#endif
#if defined(UNITY_INSTANCED_SH) && !defined(LIGHTMAP_ON)
//In HDRenderpipe we only decide to look at probe data based on Lightmap flags.
#if !defined(DYNAMICLIGHTMAP_ON)
#if !defined(DYNAMICLIGHTMAP_ON)
#define UNITY_USE_SHCOEFFS_ARRAYS
#endif
#if defined(SHADOWS_SHADOWMASK)

#undef UNITY_MATRIX_I_M
#define UNITY_MATRIX_I_M UNITY_ACCESS_INSTANCED_PROP(MERGE_UNITY_BUILTINS_INDEX(UNITY_WORLDTOOBJECTARRAY_CB), unity_WorldToObjectArray)
inline float4 UnityObjectToClipPosInstanced(in float3 pos)
{
return mul(UNITY_MATRIX_VP, mul(UNITY_MATRIX_M, float4(pos, 1.0)));
}
inline float4 UnityObjectToClipPosInstanced(float4 pos)
{
return UnityObjectToClipPosInstanced(pos.xyz);
}
#define UnityObjectToClipPos UnityObjectToClipPosInstanced
#else // UNITY_INSTANCING_ENABLED

2
ScriptableRenderPipeline/Core/package.json


{
"name": "com.unity.render-pipelines.core",
"description": "Core library for Unity render pipelines.",
"version": "0.1.24",
"version": "0.1.25",
"unity": "2018.1",
"dependencies": {
"com.unity.postprocessing": "0.1.7"

66
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Camera/HDAdditionalCameraData.cs


float m_Version = 1.0f;
#pragma warning restore 414
Camera m_camera;
// This struct allow to add specialized path in HDRenderPipeline (can be use to render mini map or planar reflection etc...)
// A rendering path is the list of rendering pass that will be executed at runtime and depends on the associated FrameSettings
// Default is the default rendering path define by the HDRendeRPipelineAsset FrameSettings.

[FormerlySerializedAs("serializedFrameSettings")]
FrameSettings m_FrameSettings = new FrameSettings(); // Serialize frameSettings
// Not serialized, not visible
// Not serialized, visible only in the debug windows
bool m_frameSettingsIsDirty = true;
// Use for debug windows
// When camera name change we need to update the name in DebugWindows.
// This is the purpose of this class
bool m_IsDebugRegistered = false;
string m_CameraRegisterName;
// This is the function use outside to access FrameSettings. It return the current state of FrameSettings for the camera
// taking into account the customization via the debug menu
bool m_IsDebugRegistered = false;
Camera m_camera;
string m_CameraRegisterName;
// This function is call at the beginning of camera loop in HDRenderPipeline.Render()
// It allow to correctly init the m_FrameSettingsRuntime to use.
// If the camera use defaultFrameSettings it must be copied in m_FrameSettingsRuntime
// otherwise it is the serialized m_FrameSettings that are used
// This is required so each camera have its own debug settings even if they all use the RenderingPath.Default path
// and important at Runtime as Default Camera from Scene Preview doesn't exist
// assetFrameSettingsIsDirty is the current dirty frame settings state of HDRenderPipelineAsset
// if it is dirty and camera use RenderingPath.Default, we need to update it
// defaultFrameSettings are the settings store in the HDRenderPipelineAsset
public void UpdateDirtyFrameSettings(bool assetFrameSettingsIsDirty, FrameSettings defaultFrameSettings)
{
if (m_frameSettingsIsDirty || assetFrameSettingsIsDirty)
{
// We do a copy of the settings to those effectively used
if (renderingPath == RenderingPath.Default)
{
defaultFrameSettings.CopyTo(m_FrameSettingsRuntime);
}
else
{
m_FrameSettings.CopyTo(m_FrameSettingsRuntime);
}
m_frameSettingsIsDirty = false;
}
}
FrameSettings.RegisterDebug(m_camera.name, GetFrameSettings());
// Note that we register m_FrameSettingsRuntime, so manipulating it in the Debug windows
// doesn't affect the serialized version
if (m_camera.cameraType != CameraType.Preview)
{
FrameSettings.RegisterDebug(m_camera.name, GetFrameSettings());
}
m_CameraRegisterName = m_camera.name;
m_IsDebugRegistered = true;
}

{
if (m_IsDebugRegistered)
{
FrameSettings.UnRegisterDebug(m_CameraRegisterName);
if (m_camera.cameraType != CameraType.Preview)
{
FrameSettings.UnRegisterDebug(m_CameraRegisterName);
}
m_IsDebugRegistered = false;
}
}

m_camera = GetComponent<Camera>();
m_camera.allowHDR = false;
m_FrameSettings.CopyTo(m_FrameSettingsRuntime);
// Tag as dirty so frameSettings are correctly initialize at next HDRenderPipeline.Render() call
m_frameSettingsIsDirty = true;
RegisterDebug();
}

// We need to detect name change in the editor and update debug windows accordingly
#if UNITY_EDITOR
if (m_camera.name != m_CameraRegisterName)
{

public void OnAfterDeserialize()
{
// We do a copy of the settings to those effectively used
m_FrameSettings.CopyTo(m_FrameSettingsRuntime);
// This is call on load or when this settings are change.
// When FrameSettings are manipulated or RenderPath change we reset them to reflect the change, discarding all the Debug Windows change.
// Tag as dirty so frameSettings are correctly initialize at next HDRenderPipeline.Render() call
m_frameSettingsIsDirty = true;
}
}
}

9
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugDisplay.hlsl


#include "MipMapDebug.cs.hlsl"
#include "ColorPickerDebug.cs.hlsl"
CBUFFER_START(UnityDebugDisplay)
// Set of parameters available when switching to debug shader mode
int _DebugLightingMode; // Match enum DebugLightingMode
int _DebugViewMaterial; // Contain the id (define in various materialXXX.cs.hlsl) of the property to display

float4 _MousePixelCoord; // xy unorm, zw norm
CBUFFER_END
TEXTURE2D(_DebugFont); // Debug font to write string in shader

#endif
break;
case DEBUGVIEWPROPERTIES_INSTANCING:
#if defined(UNITY_INSTANCING_ENABLED)
result = float3(1.0, 0.0, 0.0);
#else
result = float3(0.0, 0.0, 0.0);
#endif
break;
}
}

4
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugViewTiles.shader


int2 pixelCoord = posInput.positionSS.xy;
int2 tileCoord = (float2)pixelCoord / GetTileSize();
int2 mouseTileCoord = _MousePixelCoord / GetTileSize();
int2 mouseTileCoord = _MousePixelCoord.xy / GetTileSize();
int2 offsetInTile = pixelCoord - tileCoord * GetTileSize();
int n = 0;

int maxLights = 32;
if (tileCoord.y < LIGHTCATEGORY_COUNT && tileCoord.x < maxLights + 3)
{
PositionInputs mousePosInput = GetPositionInput(_MousePixelCoord, _ScreenSize.zw, mouseTileCoord);
PositionInputs mousePosInput = GetPositionInput(_MousePixelCoord.xy, _ScreenSize.zw, mouseTileCoord);
float depthMouse = LOAD_TEXTURE2D(_MainDepthTexture, mousePosInput.positionSS).x;
UpdatePositionInput(depthMouse, UNITY_MATRIX_I_VP, UNITY_MATRIX_VP, mousePosInput);

1
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/MaterialDebug.cs


TessellationDisplacement,
DepthOffset,
Lightmap,
Instancing,
Last,
}
}

3
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/MaterialDebug.cs.hlsl


#define DEBUGVIEWPROPERTIES_TESSELLATION_DISPLACEMENT (19)
#define DEBUGVIEWPROPERTIES_DEPTH_OFFSET (20)
#define DEBUGVIEWPROPERTIES_LIGHTMAP (21)
#define DEBUGVIEWPROPERTIES_LAST (22)
#define DEBUGVIEWPROPERTIES_INSTANCING (22)
#define DEBUGVIEWPROPERTIES_LAST (23)
#endif

2
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Camera/HDCameraEditor.Handlers.cs


EditorUtility.CopySerialized(c.GetComponent<HDAdditionalCameraData>(), m_PreviewAdditionalCameraData);
var layer = c.GetComponent<PostProcessLayer>() ?? ComponentSingleton<PostProcessLayer>.instance;
EditorUtility.CopySerialized(layer, m_PreviewPostProcessLayer);
m_PreviewCamera.cameraType = CameraType.SceneView;
m_PreviewCamera.cameraType = CameraType.SceneView; // This is required else if we use Preview the image is flipped... (Unity...)
m_PreviewHDCamera.Update(m_PreviewPostProcessLayer, m_PreviewAdditionalCameraData.GetFrameSettings());
var previewTexture = GetPreviewTextureWithSize((int)previewSize.x, (int)previewSize.y);

3
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Camera/HDCameraEditor.cs


m_UIState.Reset(m_SerializedCamera, Repaint);
m_PreviewCamera = EditorUtility.CreateGameObjectWithHideFlags("Preview Camera", HideFlags.HideAndDontSave, typeof(Camera)).GetComponent<Camera>();
m_PreviewCamera.enabled = false;
m_PreviewCamera.cameraType = CameraType.Preview; // Must be init before adding HDAdditionalCameraData
m_PreviewCamera.enabled = false;
m_PreviewHDCamera = new HDCamera(m_PreviewCamera);
m_PreviewHDCamera.Update(m_PreviewPostProcessLayer, m_PreviewAdditionalCameraData.GetFrameSettings());
}

3
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/RenderLoopSettings/RenderPipelineSettingsUI.cs


public RenderPipelineSettingsUI()
: base(0)
{
}
public override void Reset(SerializedRenderPipelineSettings data, UnityAction repaint)

EditorGUILayout.PropertyField(d.supportDBuffer, _.GetContent("Support Decal Buffer"));
EditorGUILayout.PropertyField(d.supportMSAA, _.GetContent("Support MSAA"));
EditorGUILayout.PropertyField(d.supportSubsurfaceScattering, _.GetContent("Support Subsurface Scattering"));
EditorGUILayout.PropertyField(d.supportAsyncCompute, _.GetContent("Support AsyncCompute"));
--EditorGUI.indentLevel;
}
}

2
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/RenderLoopSettings/SerializedRenderPipelineSettings.cs


public SerializedProperty supportDBuffer;
public SerializedProperty supportMSAA;
public SerializedProperty supportSubsurfaceScattering;
public SerializedProperty supportAsyncCompute;
public SerializedGlobalLightLoopSettings lightLoopSettings;
public SerializedShadowInitParameters shadowInitParams;

supportDBuffer = root.Find((RenderPipelineSettings s) => s.supportDBuffer);
supportMSAA = root.Find((RenderPipelineSettings s) => s.supportMSAA);
supportSubsurfaceScattering = root.Find((RenderPipelineSettings s) => s.supportSubsurfaceScattering);
supportAsyncCompute = root.Find((RenderPipelineSettings s) => s.supportAsyncCompute);
lightLoopSettings = new SerializedGlobalLightLoopSettings(root.Find((RenderPipelineSettings s) => s.lightLoopSettings));
shadowInitParams = new SerializedShadowInitParameters(root.Find((RenderPipelineSettings s) => s.shadowInitParams));

30
ScriptableRenderPipeline/HDRenderPipeline/HDRP/HDRenderPipeline.cs


m_FrameCount = Time.frameCount;
}
// We first update the state of asset frame settings as they can be use by various camera
// but we keep the dirty state to correctly reset other camera that use RenderingPath.Default.
bool assetFrameSettingsIsDirty = m_Asset.frameSettingsIsDirty;
m_Asset.UpdateDirtyFrameSettings();
foreach (var camera in cameras)
{
if (camera == null)

// Note: the SceneView camera will never have additionalCameraData
// Note: the scene view camera will never have additionalCameraData
var srcFrameSettings = (additionalCameraData && additionalCameraData.renderingPath != HDAdditionalCameraData.RenderingPath.Default)
? additionalCameraData.GetFrameSettings()
: m_Asset.GetFrameSettings();
// Init effective frame settings of each camera
// Each camera have its own debug frame settings control from the debug windows
// debug frame settings can't be aggregate with frame settings (i.e we can't aggregate forward only control for example)
// so debug settings (when use) are the effective frame settings
// To be able to have this behavior we init effective frame settings with serialized frame settings and copy
// debug settings change on top of it. Each time frame settings are change in the editor, we reset all debug settings
// to stay in sync. The loop below allow to update all frame settings correctly and is required because
// camera can rely on default frame settings from the HDRendeRPipelineAsset
FrameSettings srcFrameSettings;
if (additionalCameraData)
{
additionalCameraData.UpdateDirtyFrameSettings(assetFrameSettingsIsDirty, m_Asset.GetFrameSettings());
srcFrameSettings = additionalCameraData.GetFrameSettings();
}
else
{
srcFrameSettings = m_Asset.GetFrameSettings();
}
// Get the effective frame settings for this camera taking into account the global setting and camera type
FrameSettings.InitializeFrameSettings(camera, m_Asset.GetRenderPipelineSettings(), srcFrameSettings, ref m_FrameSettings);
// This is the main command buffer used for the frame.

26
ScriptableRenderPipeline/HDRenderPipeline/HDRP/HDRenderPipelineAsset.cs


FrameSettings m_FrameSettings = new FrameSettings(); // This are the defaultFrameSettings for all the camera and apply to sceneView, public to be visible in the inspector
// Not serialized, not visible, the settings effectively used
FrameSettings m_FrameSettingsRuntime = new FrameSettings();
bool m_frameSettingsIsDirty = true;
public bool frameSettingsIsDirty
{
get { return m_frameSettingsIsDirty; }
}
}
// See comment in FrameSettings.UpdateDirtyFrameSettings()
// for detail about this function
public void UpdateDirtyFrameSettings()
{
if (m_frameSettingsIsDirty)
{
m_FrameSettings.CopyTo(m_FrameSettingsRuntime);
m_frameSettingsIsDirty = false;
}
}
// Store the various RenderPipelineSettings for each platform (for now only one)

void ISerializationCallbackReceiver.OnAfterDeserialize()
{
// Modification of defaultFrameSettings in the inspector will call OnValidate().
// We do a copy of the settings to those effectively used
m_FrameSettings.CopyTo(m_FrameSettingsRuntime);
// This is call on load or when this settings are change.
// When FrameSettings are manipulated we reset them to reflect the change, discarding all the Debug Windows change.
// Tag as dirty so frameSettings are correctly initialize at next HDRenderPipeline.Render() call
m_frameSettingsIsDirty = true;
}
}
}

2
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/LayeredLit/LayeredLitData.hlsl


// Layered shader support SSS and Transmission features
surfaceData.materialFeatures = 0;
#ifdef _MATERIALFEATURE_SUBSURFACE_SCATTERING
#ifdef _MATERIAL_FEATURE_SUBSURFACE_SCATTERING
surfaceData.materialFeatures |= MATERIALFEATUREFLAGS_LIT_SUBSURFACE_SCATTERING;
#endif
#ifdef _MATERIAL_FEATURE_TRANSMISSION

2
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/Lit.cs


// SSS
[SurfaceDataAttributes("Diffusion Profile")]
public int diffusionProfile;
public uint diffusionProfile;
[SurfaceDataAttributes("Subsurface Mask")]
public float subsurfaceMask;

2
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/Lit.cs.hlsl


float3 normalWS;
float perceptualRoughness;
float coatMask;
int diffusionProfile;
uint diffusionProfile;
float subsurfaceMask;
float thickness;
bool useThickObjectMode;

297
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/Lit.hlsl


#define DEFAULT_SPECULAR_VALUE 0.04
#define GBUFFER_LIT_SPECULAR_COLOR 15
#define GBUFFER_LIT_SSS_OR_TRANSMISSION 14
#define GBUFFER_LIT_IRIDESCENCE 13
#define GBUFFER_LIT_ANISOTROPIC_UPPER_BOUND 12
// Enum for materialFeatureId (only use for encode/decode GBuffer)
#define GBUFFER_LIT_STANDARD 0
#define GBUFFER_LIT_TRANSMISSION 1 // TODO
#define GBUFFER_LIT_TRANSMISSION_SSS 2
#define GBUFFER_LIT_ANISOTROPIC 3
#define GBUFFER_LIT_IRIDESCENCE 4 // TODO
#define CLEAR_COAT_IOR 1.5
#define CLEAR_COAT_IETA (1.0 / CLEAR_COAT_IOR) // IETA is the inverse eta which is the ratio of IOR of two interface

return ((featureFlags & flag) != 0);
}
float3 ComputeDiffuseColor(float3 baseColor, float metallic)
{
return baseColor * (1.0 - metallic);

}
// Assume that bsdfData.diffusionProfile is init
void FillMaterialSSS(float subsurfaceMask, inout BSDFData bsdfData)
void FillMaterialSSS(uint diffusionProfile, float subsurfaceMask, inout BSDFData bsdfData)
bsdfData.fresnel0 = _TransmissionTintsAndFresnel0[bsdfData.diffusionProfile].a;
bsdfData.diffusionProfile = diffusionProfile;
bsdfData.fresnel0 = _TransmissionTintsAndFresnel0[diffusionProfile].a;
void FillMaterialTransmission(float thickness, inout BSDFData bsdfData)
void FillMaterialTransmission(uint diffusionProfile, float thickness, inout BSDFData bsdfData)
int diffusionProfile = bsdfData.diffusionProfile;
bsdfData.diffusionProfile = diffusionProfile;
bsdfData.fresnel0 = _TransmissionTintsAndFresnel0[diffusionProfile].a;
bsdfData.thickness = _ThicknessRemaps[diffusionProfile].x + _ThicknessRemaps[diffusionProfile].y * thickness;
uint transmissionMode = BitFieldExtract(asuint(_TransmissionFlags), 2u * diffusionProfile, 2u);

bsdfData.normalWS = surfaceData.normalWS;
bsdfData.perceptualRoughness = PerceptualSmoothnessToPerceptualRoughness(surfaceData.perceptualSmoothness);
// There is no mettalic with SSS and specular color mode
// There is no metallic with SSS and specular color mode
// Always assign even if not used, DIFFUSION_PROFILE_NEUTRAL_ID is 0
bsdfData.diffusionProfile = surfaceData.diffusionProfile;
// Note: DIFFUSION_PROFILE_NEUTRAL_ID is 0
// However in practice we keep parity between deferred and forward, so we should contrain the various features.
// The UI is in charge of setuping the constrain not the code, so if users is forward only and want full power, it is easy to unleash by some UI change
// However in practice we keep parity between deferred and forward, so we should constrain the various features.
// The UI is in charge of setuping the constrain, not the code. So if users is forward only and want unlish power, it is easy to unleash by some UI change
// Modify fresnel0
FillMaterialSSS(surfaceData.subsurfaceMask, bsdfData);
// Assign profile id and overwrite fresnel0
FillMaterialSSS(surfaceData.diffusionProfile, surfaceData.subsurfaceMask, bsdfData);
FillMaterialTransmission(surfaceData.thickness, bsdfData);
// Assign profile id and overwrite fresnel0
FillMaterialTransmission(surfaceData.diffusionProfile, surfaceData.thickness, bsdfData);
}
if (HasFeatureFlag(surfaceData.materialFeatures, MATERIALFEATUREFLAGS_LIT_ANISOTROPY))

// conversion function for deferred
//-----------------------------------------------------------------------------
// GBuffer layout.
// GBuffer2 and GBuffer0.a interpretation depends on material feature enabled
//GBuffer0 RGBA8 sRGB Gbuffer0 encode baseColor and so is sRGB to save precision. Alpha is not affected.
//GBuffer1 R10B10G10A2
//GBuffer2 RGBA8
//GBuffer3 RGBA8
//FeatureName Standard
//GBuffer0 baseColor.r, baseColor.g, baseColor.b, specularOcclusion
//GBuffer1 perceptualRoughness, normal.x, normal.y, normal.sign
//GBuffer2 f0.r, f0.g, f0.b, featureID(3) / coatMask(5)
//GBuffer3 bakedDiffuseLighting.rgb
//FeatureName Subsurface Scattering + Transmission
//GBuffer0 baseColor.r, baseColor.g, baseColor.b, diffusionProfile(4) / subsurfaceMask(4)
//GBuffer1 perceptualRoughness, normal.x, normal.y, normal.sign
//GBuffer2 specularOcclusion, thickness, diffusionProfile(4) / subsurfaceMask(4), featureID(3) / coatMask(5)
//GBuffer3 bakedDiffuseLighting.rgb
//FeatureName Anisotropic
//GBuffer0 baseColor.r, baseColor.g, baseColor.b, specularOcclusion
//GBuffer1 perceptualRoughness, normal.x, normal.y, normal.sign
//GBuffer2 anisotropy, tangent.x, tangent.y(3) / metallic(5), featureID(3) / coatMask(5)
//GBuffer3 bakedDiffuseLighting.rgb
//FeatureName Irridescence
//GBuffer0 baseColor.r, baseColor.g, baseColor.b, specularOcclusion
//GBuffer1 perceptualRoughness, normal.x, normal.y, normal.sign
//GBuffer2 IOR, thickness, unused(3bit) / metallic(5), featureID(3) / coatMask(5)
//GBuffer3 bakedDiffuseLighting.rgb
// Note:
// For standard we have chose to always encode fresnel0. Even when we use metal/baseColor parametrization. This avoid
// compiler optimization problem that was using VGPR to deal with the various combination of metal non metal.
// For SSS, we move diffusionProfile(4) / subsurfaceMask(4) in GBuffer0.a so the forward SSS code only need to write into one RT
// and the SSS postprocess only need to read one RT
// We duplicate diffusionProfile / subsurfaceMask in GBuffer2.b so the compiler don't need to read the GBuffer0 before PostEvaluateBSDF
// The lighting code have been adapted to only apply diffuseColor at the end.
// This save VGPR as we don' need to keep the GBuffer0 value in register.
// The layout is also design to only require one RT for the material classification. All the material feature flags are deduced from GBuffer2.
// Encode SurfaceData (BSDF parameters) into GBuffer
// Must be in sync with RT declared in HDRenderPipeline.cs ::Rebuild
void EncodeIntoGBuffer( SurfaceData surfaceData,

ApplyDebugToSurfaceData(surfaceData);
// RT0 - 8:8:8:8 sRGB
// Warning: the contents are later overwritten for Standard and SSS!
// We store perceptualRoughness instead of roughness because it save a sqrt ALU when decoding
// (as we want both perceptualRoughness and roughness for the lighting due to Disney Diffuse model)
// Encode normal on 20bit with oct compression + 2bit of sign
// To have more precision encode the sign of xy in a separate uint
// To have better precision encode the sign of XY separately.
// We store perceptualRoughness instead of roughness because it is perceptually linear.
// mettalic will be store on 4 bit and store special value when not used
int metallic15 = int(surfaceData.metallic * (GBUFFER_LIT_ANISOTROPIC_UPPER_BOUND + 0.5)); // Remap to [0..12] range. 13, 14, 15 are special value
uint materialFeatureId;
// IMPORTANT: In case of foward or gbuffer pass materialFeatures is statically know at compile time, so the compiler can do compile time optimization
// Currently material features SpecularColor, Iridescence, SubsurfaceScattering/Transmission, Anisotropy are mutually exclusive due to Gbuffer constrain
// The priority of feature is handled in the code here and reflect in the UI (see LitUI.cs)
// Process SSS and Transmission together as they encode almost the same data, negligible cost
// TODO: split SSS and transmission.
metallic15 = GBUFFER_LIT_SSS_OR_TRANSMISSION;
// Special case: For SSS we will store the profile id and the subsurface radius at the location of the specular occlusion (in alpha channel of GBuffer0)
// and we will move the specular occlusion in GBuffer2. This is an optimization for SSSSS and have no other side effect as specular occlusion is always used
// during lighting pass when other buffer (Gbuffer0, 1, 2) and read anyway.
materialFeatureId = GBUFFER_LIT_TRANSMISSION_SSS;
// For the SSS feature, the alpha channel is overwritten with (diffusionProfile | subsurfaceMask).
// It is done so that the SSS pass only has to read a single G-Buffer 0.
// We move specular occlusion to the red channel of the G-Buffer 2.
outGBuffer2.rgb = float3(surfaceData.specularOcclusion, surfaceData.thickness, HasFeatureFlag(surfaceData.materialFeatures, MATERIALFEATUREFLAGS_LIT_SUBSURFACE_SCATTERING) ? 1.0 : 0.0); // thickness for Transmission
// We duplicate the alpha channel of the G-Buffer 0 (for diffusion profile).
// It allows us to delay reading the G-Buffer 0 until the end of the deferred lighting shader.
outGBuffer2.rgb = float3(surfaceData.specularOcclusion, surfaceData.thickness, outGBuffer0.a);
else
else if (HasFeatureFlag(surfaceData.materialFeatures, MATERIALFEATUREFLAGS_LIT_ANISOTROPY))
{
materialFeatureId = GBUFFER_LIT_ANISOTROPIC;
// Reconstruct the default tangent frame.
float3x3 frame = GetLocalFrame(surfaceData.normalWS);
// Compute the rotation angle of the actual tangent frame with respect to the default one.
float sinFrame = dot(surfaceData.tangentWS, frame[1]);
float cosFrame = dot(surfaceData.tangentWS, frame[0]);
uint storeSin = abs(sinFrame) < abs(cosFrame) ? 4 : 0;
uint quadrant = ((sinFrame < 0) ? 1 : 0) | ((cosFrame < 0) ? 2 : 0);
// sin [and cos] are approximately linear up to [after] 45 degrees.
float sinOrCos = min(abs(sinFrame), abs(cosFrame)) * sqrt(2);
outGBuffer2.rgb = float3(surfaceData.anisotropy * 0.5 + 0.5,
sinOrCos,
PackFloatUInt8bit(surfaceData.metallic, storeSin | quadrant, 8));
}
else if (HasFeatureFlag(surfaceData.materialFeatures, MATERIALFEATUREFLAGS_LIT_IRIDESCENCE))
if (HasFeatureFlag(surfaceData.materialFeatures, MATERIALFEATUREFLAGS_LIT_SPECULAR_COLOR))
{
metallic15 = GBUFFER_LIT_SPECULAR_COLOR;
outGBuffer2.rgb = LinearToGamma20(surfaceData.specularColor);
}
else if (HasFeatureFlag(surfaceData.materialFeatures, MATERIALFEATUREFLAGS_LIT_ANISOTROPY))
{
// Reconstruct the default tangent frame.
float3x3 frame = GetLocalFrame(surfaceData.normalWS);
materialFeatureId = GBUFFER_LIT_IRIDESCENCE;
outGBuffer2.rgb = float3(0.0 /* TODO: IOR */, surfaceData.thicknessIrid,
PackFloatUInt8bit(surfaceData.metallic, 0, 8));
}
else // Standard
{
// In the case of standard or specular color we always uncompress before encoding, so decoding is more efficient (it allow better optimization for the compiler and save VGPR)
// This mean that on the decode side, MATERIALFEATUREFLAGS_LIT_SPECULAR_COLOR doesn't exist anymore
materialFeatureId = GBUFFER_LIT_STANDARD;
// Compute the rotation angle of the actual tangent frame with respect to the default one.
float sinFrame = dot(surfaceData.tangentWS, frame[1]);
float cosFrame = dot(surfaceData.tangentWS, frame[0]);
uint storeSin = abs(sinFrame) < abs(cosFrame) ? 4 : 0;
uint quadrant = ((sinFrame < 0) ? 1 : 0) | ((cosFrame < 0) ? 2 : 0);
float3 diffuseColor = surfaceData.baseColor;
float3 fresnel0 = surfaceData.specularColor;
outGBuffer2.rgb = float3(min(abs(sinFrame), abs(cosFrame)) * sqrt(2), PackByte(storeSin | quadrant), surfaceData.anisotropy * 0.5 + 0.5);
}
else if (HasFeatureFlag(surfaceData.materialFeatures, MATERIALFEATUREFLAGS_LIT_IRIDESCENCE))
if (!HasFeatureFlag(surfaceData.materialFeatures, MATERIALFEATUREFLAGS_LIT_SPECULAR_COLOR))
metallic15 = GBUFFER_LIT_IRIDESCENCE;
outGBuffer2.rgb = float3(0.0, surfaceData.thicknessIrid, 0.0);
// Convert from the metallic parametrization.
diffuseColor = ComputeDiffuseColor(surfaceData.baseColor, surfaceData.metallic);
fresnel0 = ComputeFresnel0(surfaceData.baseColor, surfaceData.metallic, DEFAULT_SPECULAR_VALUE);
else
{
// Caution: Neutral value for anisotropy is 0.5 not 0
outGBuffer2.rgb = float3(0.0, 0.0, 0.5);
}
outGBuffer0.rgb = diffuseColor; // sRGB RT
// outGBuffer2 is not sRGB, so use a fast encode/decode sRGB to keep precision
outGBuffer2.rgb = FastLinearToSRGB(fresnel0); // TODO: optimize
// Encode coatMask (4bit) / mettalic (4bit)
outGBuffer2.a = PackFloatInt8bit(HasFeatureFlag(surfaceData.materialFeatures, MATERIALFEATUREFLAGS_LIT_CLEAR_COAT) ? surfaceData.coatMask : 0.0, metallic15, 16.0);
// Ensure that surfaceData.coatMask is 0 if the feature is not enabled
float coatMask = HasFeatureFlag(surfaceData.materialFeatures, MATERIALFEATUREFLAGS_LIT_CLEAR_COAT) ? surfaceData.coatMask : 0.0;
outGBuffer2.a = PackFloatUInt8bit(coatMask, materialFeatureId, 8);
// Lighting: 11:11:10f
// RT3 - 11f:11f:10f
// Note that return type is not part of the MACRO DECODE_FROM_GBUFFER, so it is sage to use return value for our need
// If you're not using the feature classification system, pass 0.
// If you're not using the feature classification system, pass UINT_MAX.
// Also, see comment in TileVariantToFeatureFlags. When we are the worse case (i.e last variant), we read the featureflags
// from the structured buffer use to generate the indirect draw call. It allow to not go through all branch and the branch is scalar (not VGPR)
// Note: we have ZERO_INITIALIZE the struct, so bsdfData.diffusionProfile == DIFFUSION_PROFILE_NEUTRAL_ID,
// bsdfData.anisotropy == 0, bsdfData.subsurfaceMask == 0, etc...
ZERO_INITIALIZE(BSDFData, bsdfData);
// Isolate material features.

GBufferType2 inGBuffer2 = LOAD_TEXTURE2D(_GBufferTexture2, positionSS);
GBufferType3 inGBuffer3 = LOAD_TEXTURE2D(_GBufferTexture3, positionSS);
// Init all material flags from Gbuffer2
// Material classification only uses the G-Buffer 2.
int metallic15;
UnpackFloatInt8bit(inGBuffer2.a, 16.0, coatMask, metallic15);
uint materialFeatureId;
UnpackFloatUInt8bit(inGBuffer2.a, 8, coatMask, materialFeatureId);
uint pixelFeatureFlags = MATERIALFEATUREFLAGS_LIT_STANDARD; // Only sky/background do not have the Standard material flag
bool pixelHasSpecularColor = (metallic15 == GBUFFER_LIT_SPECULAR_COLOR); // This is always a dynamic test as it is very cheap
bool pixelHasTransmission = (metallic15 == GBUFFER_LIT_SSS_OR_TRANSMISSION && inGBuffer2.g > 0); // Thickness > 0
bool pixelHasSubsurface = (metallic15 == GBUFFER_LIT_SSS_OR_TRANSMISSION && inGBuffer2.b > 0); // TagSSS > 0
bool pixelHasAnisotropy = (metallic15 <= GBUFFER_LIT_ANISOTROPIC_UPPER_BOUND && abs(inGBuffer2.b - 0.5) >= 1.0/255.0); // Anisotropy > 0
bool pixelHasIridescence = (metallic15 == GBUFFER_LIT_IRIDESCENCE);
bool pixelHasClearCoat = (coatMask > 0);
uint pixelFeatureFlags = MATERIALFEATUREFLAGS_LIT_STANDARD; // Only sky/background do not have the Standard flag.
bool pixelHasSubsurface = materialFeatureId == GBUFFER_LIT_TRANSMISSION_SSS;
bool pixelHasTransmission = materialFeatureId == GBUFFER_LIT_TRANSMISSION || pixelHasSubsurface;
bool pixelHasAnisotropy = materialFeatureId == GBUFFER_LIT_ANISOTROPIC;
bool pixelHasIridescence = materialFeatureId == GBUFFER_LIT_IRIDESCENCE;
bool pixelHasClearCoat = coatMask > 0;
pixelFeatureFlags |= tileFeatureFlags & (pixelHasSpecularColor ? MATERIALFEATUREFLAGS_LIT_SPECULAR_COLOR : 0);
pixelFeatureFlags |= tileFeatureFlags & (pixelHasTransmission ? MATERIALFEATUREFLAGS_LIT_TRANSMISSION : 0);
pixelFeatureFlags |= tileFeatureFlags & (pixelHasSubsurface ? MATERIALFEATUREFLAGS_LIT_SUBSURFACE_SCATTERING : 0);
pixelFeatureFlags |= tileFeatureFlags & (pixelHasAnisotropy ? MATERIALFEATUREFLAGS_LIT_ANISOTROPY : 0);
pixelFeatureFlags |= tileFeatureFlags & (pixelHasIridescence ? MATERIALFEATUREFLAGS_LIT_IRIDESCENCE : 0);
pixelFeatureFlags |= tileFeatureFlags & (pixelHasClearCoat ? MATERIALFEATUREFLAGS_LIT_CLEAR_COAT : 0);
pixelFeatureFlags |= tileFeatureFlags & (pixelHasSubsurface ? MATERIALFEATUREFLAGS_LIT_SUBSURFACE_SCATTERING : 0);
pixelFeatureFlags |= tileFeatureFlags & (pixelHasTransmission ? MATERIALFEATUREFLAGS_LIT_TRANSMISSION : 0);
pixelFeatureFlags |= tileFeatureFlags & (pixelHasAnisotropy ? MATERIALFEATUREFLAGS_LIT_ANISOTROPY : 0);
pixelFeatureFlags |= tileFeatureFlags & (pixelHasIridescence ? MATERIALFEATUREFLAGS_LIT_IRIDESCENCE : 0);
pixelFeatureFlags |= tileFeatureFlags & (pixelHasClearCoat ? MATERIALFEATUREFLAGS_LIT_CLEAR_COAT : 0);
// Start decompressing GBuffer
// Decompress feature-agnostic data from the G-Buffer.
bsdfData.specularOcclusion = inGBuffer0.a;
bsdfData.specularOcclusion = inGBuffer0.a; // Later possibly overwritten by SSS
bsdfData.perceptualRoughness = inGBuffer1.r;
float2 octNormalWS = inGBuffer1.gb;

bsdfData.normalWS = UnpackNormalOctRectEncode(octNormalWS);
// metallic15 is range [0..12] if metallic data is needed
bool pixelHasNoMetallic = HasFeatureFlag(pixelFeatureFlags, MATERIALFEATUREFLAGS_LIT_SPECULAR_COLOR | MATERIALFEATUREFLAGS_LIT_SUBSURFACE_SCATTERING | MATERIALFEATUREFLAGS_LIT_TRANSMISSION);
float metallic = pixelHasNoMetallic ? 0 : metallic15 * (1.0 / GBUFFER_LIT_ANISOTROPIC_UPPER_BOUND);
bsdfData.diffuseColor = ComputeDiffuseColor(baseColor, metallic);
bsdfData.fresnel0 = HasFeatureFlag(pixelFeatureFlags, MATERIALFEATUREFLAGS_LIT_SPECULAR_COLOR) ? Gamma20ToLinear(inGBuffer2.rgb) : ComputeFresnel0(baseColor, metallic, DEFAULT_SPECULAR_VALUE);
bakeDiffuseLighting = inGBuffer3.rgb;
// Decompress feature-specific data from the G-Buffer.
bool pixelHasMetallic = HasFeatureFlag(pixelFeatureFlags, MATERIALFEATUREFLAGS_LIT_ANISOTROPY | MATERIALFEATUREFLAGS_LIT_IRIDESCENCE);
if (pixelHasMetallic)
{
float metallic;
uint unused;
UnpackFloatUInt8bit(inGBuffer2.b, 8, metallic, unused);
// Always assign even if not used, DIFFUSION_PROFILE_NEUTRAL_ID is 0
// Note: we have ZERO_INITIALIZE the struct, so bsdfData.diffusionProfile == DIFFUSION_PROFILE_NEUTRAL_ID, bsdfData.anisotropy == 0, bsdfData.subsurfaceMask == 0 etc...
bsdfData.diffuseColor = ComputeDiffuseColor(baseColor, metallic);
bsdfData.fresnel0 = ComputeFresnel0(baseColor, metallic, DEFAULT_SPECULAR_VALUE);
}
else
{
bsdfData.diffuseColor = baseColor;
bsdfData.fresnel0 = FastSRGBToLinear(inGBuffer2.rgb); // Later possibly overwritten by SSS
}
// Process SSS and Transmission together as they encode almost the same data
// First we must extract the diffusion profile
SSSData sssData;
// We don't need to do this call, see comment below
// DecodeFromSSSBuffer(inGBuffer0, positionSS, sssData);
// Overwrite the diffusion profile/subsurfaceMask extracted by DecodeFromSSSBuffer().
// We must do this so the compiler can optimize away the read from the G-Buffer 0 to the very end (in PostEvaluateBSDF)
// Note that we don't use sssData.subsurfaceMask here. But it is still assign so we can have the information in the
// material debug view + If we require it in the future.
UnpackFloatUInt8bit(inGBuffer2.b, 16, sssData.subsurfaceMask, sssData.diffusionProfile);
SSSData sssData;
DecodeFromSSSBuffer(inGBuffer0, positionSS, sssData);
bsdfData.diffusionProfile = sssData.diffusionProfile;
// Note: both function assign profile and overwrite fresnel0 (both SSS and Transmission)
// in case one feature is enabled and not the other.
// Modify fresnel0
FillMaterialSSS(sssData.subsurfaceMask, bsdfData);
FillMaterialSSS(sssData.diffusionProfile, sssData.subsurfaceMask, bsdfData);
FillMaterialTransmission(inGBuffer2.g, bsdfData);
FillMaterialTransmission(sssData.diffusionProfile, inGBuffer2.g, bsdfData);
}
}

if (HasFeatureFlag(pixelFeatureFlags, MATERIALFEATUREFLAGS_LIT_ANISOTROPY))
{
anisotropy = inGBuffer2.b * 2.0 - 1.0;
anisotropy = inGBuffer2.r * 2.0 - 1.0;
float unused;
uint tangentFlags;
UnpackFloatUInt8bit(inGBuffer2.b, 8, unused, tangentFlags);
uint quadrant = UnpackByte(inGBuffer2.g);
uint storeSin = UnpackByte(inGBuffer2.g) & 4;
float absVal0 = inGBuffer2.r * rsqrt(2);
float absVal1 = sqrt(1 - absVal0 * absVal0);
float sinFrame = storeSin ? absVal0 : absVal1;
float cosFrame = storeSin ? absVal1 : absVal0;
uint quadrant = tangentFlags;
uint storeSin = tangentFlags & 4;
float sinOrCos = inGBuffer2.g * rsqrt(2);
float cosOrSin = sqrt(1 - sinOrCos * sinOrCos);
float sinFrame = storeSin ? sinOrCos : cosOrSin;
float cosFrame = storeSin ? cosOrSin : sinOrCos;
sinFrame = (quadrant & 1) ? -sinFrame : sinFrame;
cosFrame = (quadrant & 2) ? -cosFrame : cosFrame;

// perceptualRoughness is not clamped, and is meant to be used for IBL.
// perceptualRoughness can be modify by FillMaterialClearCoatData, so ConvertAnisotropyToClampRoughness must be call after
ConvertAnisotropyToClampRoughness(bsdfData.perceptualRoughness, bsdfData.anisotropy, bsdfData.roughnessT, bsdfData.roughnessB);
bakeDiffuseLighting = inGBuffer3.rgb;
return pixelFeatureFlags;
}

bool PixelHasSubsurfaceScattering(BSDFData bsdfData)
{
return bsdfData.subsurfaceMask != 0 && HasFeatureFlag(bsdfData.materialFeatures, MATERIALFEATUREFLAGS_LIT_SUBSURFACE_SCATTERING);
return bsdfData.diffusionProfile != DIFFUSION_PROFILE_NEUTRAL_ID && bsdfData.subsurfaceMask != 0 && HasFeatureFlag(bsdfData.materialFeatures, MATERIALFEATUREFLAGS_LIT_SUBSURFACE_SCATTERING);
}
//-----------------------------------------------------------------------------

6
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/SubsurfaceScattering/SubsurfaceScattering.hlsl


{
float3 diffuseColor;
float subsurfaceMask;
int diffusionProfile;
uint diffusionProfile;
};
#define SSSBufferType0 float4

// Note: The SSS buffer used here is sRGB
void EncodeIntoSSSBuffer(SSSData sssData, uint2 positionSS, out SSSBufferType0 outSSSBuffer0)
{
outSSSBuffer0 = float4(sssData.diffuseColor, PackFloatInt8bit(sssData.subsurfaceMask, sssData.diffusionProfile, 16.0));
outSSSBuffer0 = float4(sssData.diffuseColor, PackFloatUInt8bit(sssData.subsurfaceMask, sssData.diffusionProfile, 16));
}
// Note: The SSS buffer used here is sRGB

UnpackFloatInt8bit(sssBuffer.a, 16.0, sssData.subsurfaceMask, sssData.diffusionProfile);
UnpackFloatUInt8bit(sssBuffer.a, 16, sssData.subsurfaceMask, sssData.diffusionProfile);
}
void DecodeFromSSSBuffer(uint2 positionSS, out SSSData sssData)

4
ScriptableRenderPipeline/HDRenderPipeline/HDRP/RenderPipeline/FrameSettings.cs


public bool enablePostprocess = true;
public bool enableStereo = true;
public bool enableAsyncCompute = false;
public bool enableAsyncCompute = true;
public bool enableOpaqueObjects = true;
public bool enableTransparentObjects = true;

// Force forward if we request stereo. TODO: We should not enforce that, users should be able to chose deferred
aggregate.enableForwardRenderingOnly = aggregate.enableForwardRenderingOnly || aggregate.enableStereo;
aggregate.enableAsyncCompute = srcFrameSettings.enableAsyncCompute && renderPipelineSettings.supportAsyncCompute;
aggregate.enableAsyncCompute = srcFrameSettings.enableAsyncCompute && SystemInfo.supportsAsyncCompute;
aggregate.enableOpaqueObjects = srcFrameSettings.enableOpaqueObjects;
aggregate.enableTransparentObjects = srcFrameSettings.enableTransparentObjects;

1
ScriptableRenderPipeline/HDRenderPipeline/HDRP/RenderPipeline/RenderPipelineSettings.cs


// Engine
public bool supportDBuffer = false;
public bool supportMSAA = false;
public bool supportAsyncCompute = false;
public GlobalLightLoopSettings lightLoopSettings = new GlobalLightLoopSettings();
public ShadowInitParameters shadowInitParams = new ShadowInitParameters();

2
ScriptableRenderPipeline/HDRenderPipeline/HDRP/ShaderPass/ShaderPassForward.hlsl


GetVaryingsDataDebug(_DebugViewMaterial, input, result, needLinearToSRGB);
GetBuiltinDataDebug(_DebugViewMaterial, builtinData, result, needLinearToSRGB);
GetSurfaceDataDebug(_DebugViewMaterial, surfaceData, result, needLinearToSRGB);
GetBSDFDataDebug(_DebugViewMaterial, bsdfData, result, needLinearToSRGB); // TODO: This required to initialize all field from BSDFData...
GetBSDFDataDebug(_DebugViewMaterial, bsdfData, result, needLinearToSRGB);
// TEMP!
// For now, the final blit in the backbuffer performs an sRGB write

3
ScriptableRenderPipeline/HDRenderPipeline/HDRP/ShaderPass/ShaderPassForwardUnlit.hlsl


GetPropertiesDataDebug(_DebugViewMaterial, result, needLinearToSRGB);
GetVaryingsDataDebug(_DebugViewMaterial, input, result, needLinearToSRGB);
GetBuiltinDataDebug(_DebugViewMaterial, builtinData, result, needLinearToSRGB);
GetBSDFDataDebug(_DebugViewMaterial, bsdfData, result, needLinearToSRGB); // TODO: This required to initialize all field from BSDFData...
GetSurfaceDataDebug(_DebugViewMaterial, surfaceData, result, needLinearToSRGB);
GetBSDFDataDebug(_DebugViewMaterial, bsdfData, result, needLinearToSRGB);
// TEMP!
// For now, the final blit in the backbuffer performs an sRGB write

4
ScriptableRenderPipeline/HDRenderPipeline/HDRP/ShaderPass/VaryingMesh.hlsl


float4 interpolators5 : TEXCOORD5;
#endif
UNITY_VERTEX_INPUT_INSTANCE_ID // Must be declare before FRONT_FACE_SEMANTIC
UNITY_VERTEX_INPUT_INSTANCE_ID
};
// Functions to pack data to use as few interpolator as possible, the ShaderGraph should generate these functions

7
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Sky/ProceduralSky/Resources/ProceduralSky.shader


float getMiePhase(float eyeCos, float eyeCos2)
{
float temp = 1.0 + MIE_G2 - 2.0 * MIE_G * eyeCos;
temp = pow(max(temp, 0), pow(_SunSize,0.65) * 10);
temp = PositivePow(temp, PositivePow(_SunSize, 0.65) * 10);
#if defined(UNITY_COLORSPACE_GAMMA) && SKYBOX_COLOR_IN_TARGET_COLOR_SPACE
temp = pow(temp, .454545);
#endif
return temp;
}

float3 cameraPos = float3(0,kInnerRadius + kCameraHeight,0); // The camera's current position
// Get the ray from the camera to the vertex and its length (which is the far point of the ray passing through the atmosphere)
float3 eyeRay = dir; // normalize(mul((float3x3)unity_ObjectToWorld, v.vertex.xyz));
float3 eyeRay = dir; // normalize(mul((float3x3)UNITY_MATRIX_M, v.vertex.xyz));
float far = 0.0;
float3 cIn = float3(0.0, 0.0, 0.0);

4
ScriptableRenderPipeline/HDRenderPipeline/package.json


{
"name": "com.unity.render-pipelines.high-definition",
"description": "HD Render Pipeline for Unity.",
"version": "0.1.24",
"version": "0.1.25",
"com.unity.render-pipelines.core": "0.1.24"
"com.unity.render-pipelines.core": "0.1.25"
}
}

110
ScriptableRenderPipeline/LightweightPipeline/LWRP/Data/LightweightPipelineAsset.cs


_8x = 8
}
public enum DefaultMaterialType
{
Standard = 0,
Particle,
Terrain,
UnityBuiltinDefault
}
private Shader m_DefaultShader;
[SerializeField] private float kAssetVersion = 1.0f;
[SerializeField] private int kAssetVersion = 2;
[SerializeField] private int m_MaxPixelLights = 4;
[SerializeField] private bool m_SupportsVertexLight = false;
[SerializeField] private bool m_RequireDepthTexture = false;

[SerializeField] private Vector3 m_Cascade4Split = new Vector3(0.067f, 0.2f, 0.467f);
// Resources
[SerializeField] private Shader m_DefaultShader;
[SerializeField] private LightweightPipelineResource m_ResourceAsset;
private LightweightPipelineResource m_ResourceAsset;
[MenuItem("Assets/Create/Render Pipeline/Lightweight/Pipeline Asset", priority = CoreUtils.assetCreateMenuPriority1)]
static void CreateLightweightPipeline()

public override void Action(int instanceId, string pathName, string resourceFile)
{
var instance = CreateInstance<LightweightPipelineAsset>();
string[] guids = AssetDatabase.FindAssets("LightweightPipelineResource t:scriptableobject", m_SearchPaths);
LightweightPipelineResource resourceAsset = null;
foreach (string guid in guids)
{
string path = AssetDatabase.GUIDToAssetPath(guid);
resourceAsset = AssetDatabase.LoadAssetAtPath<LightweightPipelineResource>(path);
if (resourceAsset != null)
break;
}
// There's currently an issue that prevents FindAssets from find resources withing the package folder.
if (resourceAsset == null)
{
string path = m_SearchPaths[PACKAGE_MANAGER_PATH_INDEX] + "/LWRP/Data/LightweightPipelineResource.asset";
resourceAsset = AssetDatabase.LoadAssetAtPath<LightweightPipelineResource>(path);
}
instance.m_ResourceAsset = resourceAsset;
instance.m_DefaultShader = Shader.Find(LightweightShaderUtils.GetShaderPath(ShaderPathID.STANDARD_PBS));
instance.m_BlitShader = Shader.Find(LightweightShaderUtils.GetShaderPath(ShaderPathID.HIDDEN_BLIT));
instance.m_CopyDepthShader = Shader.Find(LightweightShaderUtils.GetShaderPath(ShaderPathID.HIDDEN_DEPTH_COPY));

private void LoadResourceFile()
{
string[] guids = AssetDatabase.FindAssets("LightweightPipelineResource t:scriptableobject", m_SearchPaths);
LightweightPipelineResource resourceAsset = null;
foreach (string guid in guids)
{
string path = AssetDatabase.GUIDToAssetPath(guid);
m_ResourceAsset = AssetDatabase.LoadAssetAtPath<LightweightPipelineResource>(path);
if (m_ResourceAsset != null)
break;
}
// There's currently an issue that prevents FindAssets from find resources withing the package folder.
if (m_ResourceAsset == null)
{
string path = m_SearchPaths[PACKAGE_MANAGER_PATH_INDEX] + "/LWRP/Data/LightweightPipelineResource.asset";
m_ResourceAsset = AssetDatabase.LoadAssetAtPath<LightweightPipelineResource>(path);
}
}
#endif
protected override IRenderPipeline InternalCreatePipeline()

DestroyCreatedInstances();
}
private Material GetMaterial(DefaultMaterialType materialType)
{
#if UNITY_EDITOR
if (m_ResourceAsset == null)
LoadResourceFile();
switch (materialType)
{
case DefaultMaterialType.Standard:
return m_ResourceAsset.DefaultMaterial;
case DefaultMaterialType.Particle:
return m_ResourceAsset.DefaultParticleMaterial;
case DefaultMaterialType.Terrain:
return m_ResourceAsset.DefaultTerrainMaterial;
// Unity Builtin Default
default:
return null;
}
#else
return null;
#endif
}
public bool AreShadowsEnabled()
{
return ShadowSetting != ShadowType.NO_SHADOW;

public override Material GetDefaultMaterial()
{
#if UNITY_EDITOR
if (m_ResourceAsset != null)
return m_ResourceAsset.DefaultMaterial;
#endif
return null;
return GetMaterial(DefaultMaterialType.Standard);
#if UNITY_EDITOR
if (m_ResourceAsset != null)
return m_ResourceAsset.DefaultParticleMaterial;
#endif
return null;
return GetMaterial(DefaultMaterialType.Particle);
return null;
return GetMaterial(DefaultMaterialType.UnityBuiltinDefault);
#if UNITY_EDITOR
if (m_ResourceAsset != null)
return m_ResourceAsset.DefaultTerrainMaterial;
#endif
return null;
return GetMaterial(DefaultMaterialType.Terrain);
return null;
return GetMaterial(DefaultMaterialType.UnityBuiltinDefault);
return null;
return GetMaterial(DefaultMaterialType.UnityBuiltinDefault);
return null;
return GetMaterial(DefaultMaterialType.UnityBuiltinDefault);
return null;
return GetMaterial(DefaultMaterialType.UnityBuiltinDefault);
if (m_DefaultShader == null)
m_DefaultShader = Shader.Find(LightweightShaderUtils.GetShaderPath(ShaderPathID.STANDARD_PBS));
return m_DefaultShader;
}

13
ScriptableRenderPipeline/LightweightPipeline/LWRP/LightweightConstantBuffer.cs


public static int _AdditionalLightSpotDir;
public static int _AdditionalLightSpotAttenuation;
}
public static class ShadowConstantBuffer
{
public static int _WorldToShadow;
public static int _ShadowData;
public static int _DirShadowSplitSpheres;
public static int _DirShadowSplitSphereRadii;
public static int _ShadowOffset0;
public static int _ShadowOffset1;
public static int _ShadowOffset2;
public static int _ShadowOffset3;
public static int _ShadowmapSize;
}
}

124
ScriptableRenderPipeline/LightweightPipeline/LWRP/LightweightPipeline.cs


private bool m_IsOffscreenCamera;
private Vector4 kDefaultLightPosition = new Vector4(0.0f, 0.0f, 1.0f, 0.0f);
private Vector4 kDefaultLightColor = Color.black;
private Vector4 kDefaultLightAttenuation = new Vector4(0.0f, 1.0f, 0.0f, 1.0f);
private Vector4 kDefaultLightSpotDirection = new Vector4(0.0f, 0.0f, 1.0f, 0.0f);
private Vector4 kDefaultLightSpotAttenuation = new Vector4(0.0f, 1.0f, 0.0f, 0.0f);
private Vector4[] m_LightPositions = new Vector4[kMaxVisibleLights];
private Vector4[] m_LightColors = new Vector4[kMaxVisibleLights];
private Vector4[] m_LightDistanceAttenuations = new Vector4[kMaxVisibleLights];

PerCameraBuffer._AdditionalLightDistanceAttenuation = Shader.PropertyToID("_AdditionalLightDistanceAttenuation");
PerCameraBuffer._AdditionalLightSpotDir = Shader.PropertyToID("_AdditionalLightSpotDir");
PerCameraBuffer._AdditionalLightSpotAttenuation = Shader.PropertyToID("_AdditionalLightSpotAttenuation");
ShadowConstantBuffer._WorldToShadow = Shader.PropertyToID("_WorldToShadow");
ShadowConstantBuffer._ShadowData = Shader.PropertyToID("_ShadowData");
ShadowConstantBuffer._DirShadowSplitSpheres = Shader.PropertyToID("_DirShadowSplitSpheres");
ShadowConstantBuffer._DirShadowSplitSphereRadii = Shader.PropertyToID("_DirShadowSplitSphereRadii");
ShadowConstantBuffer._ShadowOffset0 = Shader.PropertyToID("_ShadowOffset0");
ShadowConstantBuffer._ShadowOffset1 = Shader.PropertyToID("_ShadowOffset1");
ShadowConstantBuffer._ShadowOffset2 = Shader.PropertyToID("_ShadowOffset2");
ShadowConstantBuffer._ShadowOffset3 = Shader.PropertyToID("_ShadowOffset3");
ShadowConstantBuffer._ShadowmapSize = Shader.PropertyToID("_ShadowmapSize");
m_ShadowMapRTID = Shader.PropertyToID("_ShadowMap");

// If we have a main light we don't shade it in the per-object light loop. We also remove it from the per-object cull list
int mainLightPresent = (lightData.mainLightIndex >= 0) ? 1 : 0;
int additionalPixelLightsCount = visibleLightsCount - mainLightPresent;
int vertexLightCount = (m_Asset.SupportsVertexLight) ? Math.Min(visibleLights.Count, kMaxPerObjectLights) - additionalPixelLightsCount : 0;
int vertexLightCount = (m_Asset.SupportsVertexLight) ? Math.Min(visibleLights.Count, kMaxPerObjectLights) - additionalPixelLightsCount - mainLightPresent : 0;
vertexLightCount = Math.Min(vertexLightCount, kMaxVertexLights);
lightData.pixelAdditionalLightsCount = additionalPixelLightsCount;

private void InitializeLightConstants(List<VisibleLight> lights, int lightIndex, out Vector4 lightPos, out Vector4 lightColor, out Vector4 lightDistanceAttenuation, out Vector4 lightSpotDir,
out Vector4 lightSpotAttenuation)
{
float directContributionNotBaked = 1.0f;
lightPos = new Vector4(0.0f, 0.0f, 1.0f, 0.0f);
lightColor = Color.black;
lightDistanceAttenuation = new Vector4(0.0f, 1.0f, 0.0f, directContributionNotBaked);
lightSpotDir = new Vector4(0.0f, 0.0f, 1.0f, 0.0f);
lightSpotAttenuation = new Vector4(0.0f, 1.0f, 0.0f, 0.0f);
lightPos = kDefaultLightPosition;
lightColor = kDefaultLightColor;
lightDistanceAttenuation = kDefaultLightSpotAttenuation;
lightSpotDir = kDefaultLightSpotDirection;
lightSpotAttenuation = kDefaultLightAttenuation;
// When no lights are visible, main light will be set to -1.
// In this case we initialize it to default values and return

float oneOverFadeRangeSqr = 1.0f / fadeRangeSqr;
float lightRangeSqrOverFadeRangeSqr = -lightRangeSqr / fadeRangeSqr;
float quadAtten = 25.0f / lightRangeSqr;
lightDistanceAttenuation = new Vector4(quadAtten, oneOverFadeRangeSqr, lightRangeSqrOverFadeRangeSqr, directContributionNotBaked);
lightDistanceAttenuation = new Vector4(quadAtten, oneOverFadeRangeSqr, lightRangeSqrOverFadeRangeSqr, 1.0f);
}
if (lightData.lightType == LightType.Spot)

if (lightData.shadowMapSampleType != LightShadows.None)
SetupShadowReceiverConstants(cmd, lights[lightData.mainLightIndex]);
if (lightData.totalAdditionalLightsCount > 0)
SetupAdditionalListConstants(cmd, lights, ref lightData);
SetupAdditionalListConstants(cmd, lights, ref lightData);
}
private void SetupMainLightConstants(CommandBuffer cmd, List<VisibleLight> lights, int lightIndex)

{
int additionalLightIndex = 0;
// We need to update per-object light list with the proper map to our global additional light buffer
// First we initialize all lights in the map to -1 to tell the system to discard main light index and
// remaining lights in the scene that don't fit the max additional light buffer (kMaxVisibileAdditionalLights)
int[] perObjectLightIndexMap = m_CullResults.GetLightIndexMap();
for (int i = 0; i < lights.Count; ++i)
perObjectLightIndexMap[i] = -1;
for (int i = 0; i < lights.Count && additionalLightIndex < kMaxVisibleLights; ++i)
if (lightData.totalAdditionalLightsCount > 0)
if (i != lightData.mainLightIndex)
// We need to update per-object light list with the proper map to our global additional light buffer
// First we initialize all lights in the map to -1 to tell the system to discard main light index and
// remaining lights in the scene that don't fit the max additional light buffer (kMaxVisibileAdditionalLights)
int[] perObjectLightIndexMap = m_CullResults.GetLightIndexMap();
for (int i = 0; i < lights.Count; ++i)
perObjectLightIndexMap[i] = -1;
for (int i = 0; i < lights.Count && additionalLightIndex < kMaxVisibleLights; ++i)
// The engine performs per-object light culling and initialize 8 light indices into two vec4 constants unity_4LightIndices0 and unity_4LightIndices1.
// In the shader we iterate over each visible light using the indices provided in these constants to index our global light buffer
// ex: first light position would be m_LightPosisitions[unity_4LightIndices[0]];
if (i != lightData.mainLightIndex)
{
// The engine performs per-object light culling and initialize 8 light indices into two vec4 constants unity_4LightIndices0 and unity_4LightIndices1.
// In the shader we iterate over each visible light using the indices provided in these constants to index our global light buffer
// ex: first light position would be m_LightPosisitions[unity_4LightIndices[0]];
// However since we sorted the lights we need to tell the engine how to map the original/unsorted indices to our global buffer
// We do it by settings the perObjectLightIndexMap to the appropriate additionalLightIndex.
perObjectLightIndexMap[GetLightUnsortedIndex(i)] = additionalLightIndex;
InitializeLightConstants(lights, i, out m_LightPositions[additionalLightIndex],
out m_LightColors[additionalLightIndex],
out m_LightDistanceAttenuations[additionalLightIndex],
out m_LightSpotDirections[additionalLightIndex],
out m_LightSpotAttenuations[additionalLightIndex]);
additionalLightIndex++;
// However since we sorted the lights we need to tell the engine how to map the original/unsorted indices to our global buffer
// We do it by settings the perObjectLightIndexMap to the appropriate additionalLightIndex.
perObjectLightIndexMap[GetLightUnsortedIndex(i)] = additionalLightIndex;
InitializeLightConstants(lights, i, out m_LightPositions[additionalLightIndex],
out m_LightColors[additionalLightIndex],
out m_LightDistanceAttenuations[additionalLightIndex],
out m_LightSpotDirections[additionalLightIndex],
out m_LightSpotAttenuations[additionalLightIndex]);
additionalLightIndex++;
}
m_CullResults.SetLightIndexMap(perObjectLightIndexMap);
cmd.SetGlobalVector(PerCameraBuffer._AdditionalLightCount, new Vector4(lightData.pixelAdditionalLightsCount,
lightData.totalAdditionalLightsCount, 0.0f, 0.0f));
m_CullResults.SetLightIndexMap(perObjectLightIndexMap);
else
{
cmd.SetGlobalVector(PerCameraBuffer._AdditionalLightCount, Vector4.zero);
cmd.SetGlobalVector(PerCameraBuffer._AdditionalLightCount, new Vector4(lightData.pixelAdditionalLightsCount,
lightData.totalAdditionalLightsCount, 0.0f, 0.0f));
// Clear to default all light cosntant data
for (int i = 0; i < kMaxVisibleLights; ++i)
InitializeLightConstants(lights, -1, out m_LightPositions[additionalLightIndex],
out m_LightColors[additionalLightIndex],
out m_LightDistanceAttenuations[additionalLightIndex],
out m_LightSpotDirections[additionalLightIndex],
out m_LightSpotAttenuations[additionalLightIndex]);
}
cmd.SetGlobalVectorArray(PerCameraBuffer._AdditionalLightPosition, m_LightPositions);
cmd.SetGlobalVectorArray(PerCameraBuffer._AdditionalLightColor, m_LightColors);
cmd.SetGlobalVectorArray(PerCameraBuffer._AdditionalLightDistanceAttenuation, m_LightDistanceAttenuations);

// Scale bias by cascade's world space depth range.
// Directional shadow lights have orthogonal projection.
// proj.m22 = -2 / (far - near) since the projection's depth range is [-1.0, 1.0]
// Therefore we scale it by 0.5. We keep the negative sign and only flip it in case z is
// reversed.
// In order to be correct we should multiply bias by 0.5 but this introducing aliasing along cascades more visible.
bias = light.shadowBias * proj.m22 * 0.5f * sign;
bias = light.shadowBias * proj.m22 * sign;
// Currently only square POT cascades resolutions are used.
// We scale normalBias

private void SetupShadowReceiverConstants(CommandBuffer cmd, VisibleLight shadowLight)
{
Light light = shadowLight.light;
float shadowResolution = m_ShadowSlices[0].shadowResolution;
int cascadeCount = m_ShadowCasterCascadesCount;
for (int i = 0; i < kMaxCascades; ++i)

noOpShadowMatrix.m33 = (SystemInfo.usesReversedZBuffer) ? 1.0f : 0.0f;
m_ShadowMatrices[kMaxCascades] = noOpShadowMatrix;
float invShadowResolution = 0.5f / shadowResolution;
cmd.SetGlobalMatrixArray("_WorldToShadow", m_ShadowMatrices);
cmd.SetGlobalVector("_ShadowData", new Vector4(light.shadowStrength, 0.0f, 0.0f, 0.0f));
cmd.SetGlobalVectorArray("_DirShadowSplitSpheres", m_DirectionalShadowSplitDistances);
cmd.SetGlobalVector("_DirShadowSplitSphereRadii", m_DirectionalShadowSplitRadii);
cmd.SetGlobalVector("_ShadowOffset0", new Vector4(-invShadowResolution, -invShadowResolution, 0.0f, 0.0f));
cmd.SetGlobalVector("_ShadowOffset1", new Vector4(invShadowResolution, -invShadowResolution, 0.0f, 0.0f));
cmd.SetGlobalVector("_ShadowOffset2", new Vector4(-invShadowResolution, invShadowResolution, 0.0f, 0.0f));
cmd.SetGlobalVector("_ShadowOffset3", new Vector4(invShadowResolution, invShadowResolution, 0.0f, 0.0f));
float invShadowResolution = 1.0f / m_Asset.ShadowAtlasResolution;
float invHalfShadowResolution = 0.5f * invShadowResolution;
cmd.SetGlobalMatrixArray(ShadowConstantBuffer._WorldToShadow, m_ShadowMatrices);
cmd.SetGlobalVector(ShadowConstantBuffer._ShadowData, new Vector4(light.shadowStrength, 0.0f, 0.0f, 0.0f));
cmd.SetGlobalVectorArray(ShadowConstantBuffer._DirShadowSplitSpheres, m_DirectionalShadowSplitDistances);
cmd.SetGlobalVector(ShadowConstantBuffer._DirShadowSplitSphereRadii, m_DirectionalShadowSplitRadii);
cmd.SetGlobalVector(ShadowConstantBuffer._ShadowOffset0, new Vector4(-invHalfShadowResolution, -invHalfShadowResolution, 0.0f, 0.0f));
cmd.SetGlobalVector(ShadowConstantBuffer._ShadowOffset1, new Vector4( invHalfShadowResolution, -invHalfShadowResolution, 0.0f, 0.0f));
cmd.SetGlobalVector(ShadowConstantBuffer._ShadowOffset2, new Vector4(-invHalfShadowResolution, invHalfShadowResolution, 0.0f, 0.0f));
cmd.SetGlobalVector(ShadowConstantBuffer._ShadowOffset3, new Vector4( invHalfShadowResolution, invHalfShadowResolution, 0.0f, 0.0f));
cmd.SetGlobalVector(ShadowConstantBuffer._ShadowmapSize, new Vector4(invShadowResolution, invShadowResolution, m_Asset.ShadowAtlasResolution, m_Asset.ShadowAtlasResolution));
}
private void SetShaderKeywords(CommandBuffer cmd, ref LightData lightData, List<VisibleLight> visibleLights)

11
ScriptableRenderPipeline/LightweightPipeline/LWRP/ShaderLibrary/Input.hlsl


#define MAX_VISIBLE_LIGHTS 16
struct InputData
{
float3 positionWS;
half3 normalWS;
half3 viewDirectionWS;
float4 shadowCoord;
half fogCoord;
half3 vertexLighting;
half3 bakedGI;
};
///////////////////////////////////////////////////////////////////////////////
// Constant Buffers //
///////////////////////////////////////////////////////////////////////////////

4
ScriptableRenderPipeline/LightweightPipeline/LWRP/ShaderLibrary/InputSurface.hlsl


half3 specular;
half metallic;
half smoothness;
half3 normal;
half3 normalTS;
half3 emission;
half occlusion;
half alpha;

#endif
outSurfaceData.smoothness = specGloss.a;
outSurfaceData.normal = Normal(uv);
outSurfaceData.normalTS = Normal(uv);
outSurfaceData.occlusion = Occlusion(uv);
outSurfaceData.emission = Emission(uv);
outSurfaceData.alpha = Alpha(albedoAlpha.a);

118
ScriptableRenderPipeline/LightweightPipeline/LWRP/ShaderLibrary/Lighting.hlsl


half3 direction;
half3 color;
half attenuation;
half realtimeAttenuation;
half subtractiveModeAttenuation;
};
///////////////////////////////////////////////////////////////////////////////

return atten * atten;
}
half4 GetLightDirectionAndRealtimeAttenuation(LightInput lightInput, float3 positionWS)
half4 GetLightDirectionAndAttenuation(LightInput lightInput, float3 positionWS)
{
half4 directionAndAttenuation;
float3 posToLightVec = lightInput.position.xyz - positionWS * lightInput.position.w;

return directionAndAttenuation;
}
half4 GetMainLightDirectionAndRealtimeAttenuation(LightInput lightInput, float3 positionWS)
half4 GetMainLightDirectionAndAttenuation(LightInput lightInput, float3 positionWS)
{
half4 directionAndAttenuation;

directionAndAttenuation = GetLightDirectionAndRealtimeAttenuation(lightInput, positionWS);
directionAndAttenuation = GetLightDirectionAndAttenuation(lightInput, positionWS);
// Cookies and shadows are only computed for main light
// Cookies are only computed for main light
directionAndAttenuation.w *= RealtimeShadowAttenuation(positionWS);
return directionAndAttenuation;
}

lightInput.spotDirection = _MainLightSpotDir;
lightInput.spotAttenuation = _MainLightSpotAttenuation;
half4 directionAndRealtimeAttenuation = GetMainLightDirectionAndRealtimeAttenuation(lightInput, positionWS);
half4 directionAndRealtimeAttenuation = GetMainLightDirectionAndAttenuation(lightInput, positionWS);
light.realtimeAttenuation = directionAndRealtimeAttenuation.w;
light.attenuation = MixRealtimeAndBakedOcclusion(light.realtimeAttenuation, lightInput.distanceAttenuation.w);
light.attenuation = directionAndRealtimeAttenuation.w;
light.subtractiveModeAttenuation = lightInput.distanceAttenuation.w;
light.color = lightInput.color;
return light;

lightInput.spotDirection = _AdditionalLightSpotDir[lightIndex];
lightInput.spotAttenuation = _AdditionalLightSpotAttenuation[lightIndex];
half4 directionAndRealtimeAttenuation = GetLightDirectionAndRealtimeAttenuation(lightInput, positionWS);
half4 directionAndRealtimeAttenuation = GetLightDirectionAndAttenuation(lightInput, positionWS);
light.realtimeAttenuation = directionAndRealtimeAttenuation.w;
light.attenuation = MixRealtimeAndBakedOcclusion(light.realtimeAttenuation, lightInput.distanceAttenuation.w);
light.attenuation = directionAndRealtimeAttenuation.w;
light.subtractiveModeAttenuation = lightInput.distanceAttenuation.w;
light.color = lightInput.color;
return light;

half3 SubtractDirectMainLightFromLightmap(Light mainLight, half3 normalWS, half3 bakedGI)
{
#if defined(_MAIN_LIGHT_DIRECTIONAL) && defined(_MIXED_LIGHTING_SUBTRACTIVE) && defined(LIGHTMAP_ON) && defined(_SHADOWS_ENABLED)
// Let's try to make realtime shadows work on a surface, which already contains
// baked lighting and shadowing from the main sun light.
// Summary:

// 1) Gives good estimate of illumination as if light would've been shadowed during the bake.
// Preserves bounce and other baked lights
// No shadows on the geometry facing away from the light
half shadowStrength = _ShadowData.x;
half shadowStrength = GetShadowStrength();
half3 estimatedLightContributionMaskedByInverseOfShadow = lambert * (1.0 - mainLight.realtimeAttenuation);
half3 estimatedLightContributionMaskedByInverseOfShadow = lambert * (1.0 - mainLight.attenuation);
half3 subtractedLightmap = bakedGI - estimatedLightContributionMaskedByInverseOfShadow;
// 2) Allows user to define overall ambient of the scene and control situation when realtime shadow becomes too dark.

// 3) Pick darkest color
return min(bakedGI, realtimeShadow);
#endif
return bakedGI;
}
half3 GlobalIllumination(BRDFData brdfData, half3 bakedGI, half occlusion, half3 normalWS, half3 viewDirectionWS)

return EnvironmentBRDF(brdfData, indirectDiffuse, indirectSpecular, fresnelTerm);
}
void MixRealtimeAndBakedGI(inout Light light, half3 normalWS, inout half3 bakedGI, half4 shadowMask)
{
#if defined(_MAIN_LIGHT_DIRECTIONAL) && defined(_MIXED_LIGHTING_SUBTRACTIVE) && defined(LIGHTMAP_ON) && defined(_SHADOWS_ENABLED)
bakedGI = SubtractDirectMainLightFromLightmap(light, normalWS, bakedGI);
#endif
#if defined(LIGHTMAP_ON)
#if defined(_MIXED_LIGHTING_SHADOWMASK)
// TODO:
#elif defined(_MIXED_LIGHTING_SUBTRACTIVE)
// Subtractive Light mode has direct light contribution baked into lightmap for mixed lights.
// We need to remove direct realtime contribution from mixed lights
// subtractiveModeBakedOcclusion is set 0.0 if this light occlusion was baked in the lightmap, 1.0 otherwise.
light.attenuation *= light.subtractiveModeAttenuation;
#endif
#endif
}
///////////////////////////////////////////////////////////////////////////////
// Lighting Functions //
///////////////////////////////////////////////////////////////////////////////

{
Light light = GetLight(lightIter, positionWS);
half3 lightColor = light.color * light.realtimeAttenuation;
half3 lightColor = light.color * light.attenuation;
vertexLightColor += LightingLambert(lightColor, light.direction, normalWS);
}
#endif

// Fragment Functions //
// Used by ShaderGraph and others builtin renderers //
///////////////////////////////////////////////////////////////////////////////
half4 LightweightFragmentPBR(float3 positionWS, half3 normalWS, half3 viewDirectionWS,
half3 bakedGI, half3 vertexLighting, half3 albedo, half metallic, half3 specular,
half4 LightweightFragmentPBR(InputData inputData, half3 albedo, half metallic, half3 specular,
Light mainLight = GetMainLight(positionWS);
bakedGI = SubtractDirectMainLightFromLightmap(mainLight, normalWS, bakedGI);
half3 color = GlobalIllumination(brdfData, bakedGI, occlusion, normalWS, viewDirectionWS);
color += LightingPhysicallyBased(brdfData, mainLight, normalWS, viewDirectionWS);
Light mainLight = GetMainLight(inputData.positionWS);
mainLight.attenuation *= RealtimeShadowAttenuation(inputData.positionWS, inputData.shadowCoord);
MixRealtimeAndBakedGI(mainLight, inputData.normalWS, inputData.bakedGI, half4(0, 0, 0, 0));
half3 color = GlobalIllumination(brdfData, inputData.bakedGI, occlusion, inputData.normalWS, inputData.viewDirectionWS);
color += LightingPhysicallyBased(brdfData, mainLight, inputData.normalWS, inputData.viewDirectionWS);
Light light = GetLight(i, positionWS);
color += LightingPhysicallyBased(brdfData, light, normalWS, viewDirectionWS);
Light light = GetLight(i, inputData.positionWS);
color += LightingPhysicallyBased(brdfData, light, inputData.normalWS, inputData.viewDirectionWS);
color += vertexLighting * brdfData.diffuse;
color += inputData.vertexLighting * brdfData.diffuse;
half4 LightweightFragmentLambert(float3 positionWS, half3 normalWS, half3 viewDirectionWS,
half fogFactor, half3 bakedGI, half3 diffuse, half3 emission, half alpha)
half4 LightweightFragmentBlinnPhong(InputData inputData, half3 diffuse, half4 specularGloss, half shininess, half3 emission, half alpha)
Light mainLight = GetMainLight(positionWS);
half3 indirectDiffuse = SubtractDirectMainLightFromLightmap(mainLight, normalWS, bakedGI);
half3 lambert = LightingLambert(mainLight.color, mainLight.direction, normalWS);
half3 diffuseColor = lambert * mainLight.attenuation + indirectDiffuse;
#ifdef _ADDITIONAL_LIGHTS
int pixelLightCount = GetPixelLightCount();
for (int i = 0; i < pixelLightCount; ++i)
{
Light light = GetLight(i, positionWS);
half3 attenuatedLightColor = light.color * light.attenuation;
diffuseColor += LightingLambert(attenuatedLightColor, light.direction, normalWS);
}
#endif
half3 finalColor = diffuseColor * diffuse + emission;
ApplyFog(finalColor, fogFactor);
return half4(finalColor, alpha);
}
half4 LightweightFragmentBlinnPhong(float3 positionWS, half3 normalWS, half3 viewDirectionWS,
half fogFactor, half3 bakedGI, half3 diffuse, half4 specularGloss, half shininess, half3 emission, half alpha)
{
Light mainLight = GetMainLight(positionWS);
half3 indirectDiffuse = SubtractDirectMainLightFromLightmap(mainLight, normalWS, bakedGI);
Light mainLight = GetMainLight(inputData.positionWS);
mainLight.attenuation *= RealtimeShadowAttenuation(inputData.positionWS, inputData.shadowCoord);
MixRealtimeAndBakedGI(mainLight, inputData.normalWS, inputData.bakedGI, half4(0, 0, 0, 0));
half3 diffuseColor = indirectDiffuse + LightingLambert(attenuatedLightColor, mainLight.direction, normalWS);
half3 specularColor = LightingSpecular(attenuatedLightColor, mainLight.direction, normalWS, viewDirectionWS, specularGloss, shininess);
half3 diffuseColor = inputData.bakedGI + LightingLambert(attenuatedLightColor, mainLight.direction, inputData.normalWS);
half3 specularColor = LightingSpecular(attenuatedLightColor, mainLight.direction, inputData.normalWS, inputData.viewDirectionWS, specularGloss, shininess);
Light light = GetLight(i, positionWS);
Light light = GetLight(i, inputData.positionWS);
diffuseColor += LightingLambert(attenuatedLightColor, light.direction, normalWS);
specularColor += LightingSpecular(attenuatedLightColor, light.direction, normalWS, viewDirectionWS, specularGloss, shininess);
diffuseColor += LightingLambert(attenuatedLightColor, light.direction, inputData.normalWS);
specularColor += LightingSpecular(attenuatedLightColor, light.direction, inputData.normalWS, inputData.viewDirectionWS, specularGloss, shininess);
finalColor += inputData.vertexLighting * diffuse;
#if defined(_SPECGLOSSMAP) || defined(_SPECULAR_COLOR)
#endif
ApplyFog(finalColor, fogFactor);
ApplyFog(finalColor, inputData.fogCoord);
return half4(finalColor, alpha);
}
#endif

82
ScriptableRenderPipeline/LightweightPipeline/LWRP/ShaderLibrary/LightweightPassLit.hlsl


float3 posWS : TEXCOORD2;
half3 normal : TEXCOORD3;
#if _NORMALMAP
#ifdef _NORMALMAP
half3 tangent : TEXCOORD4;
half3 binormal : TEXCOORD5;
#endif

#ifdef _SHADOWS_ENABLED
float4 shadowCoord : TEXCOORD8;
#endif
void InitializeInputData(LightweightVertexOutput IN, half3 normalTS, out InputData inputData)
{
inputData.positionWS = IN.posWS.xyz;
#ifdef _NORMALMAP
inputData.normalWS = TangentToWorldNormal(normalTS, IN.tangent, IN.binormal, IN.normal);
#else
inputData.normalWS = normalize(IN.normal);
#endif
#ifdef SHADER_API_MOBILE
// viewDirection should be normalized here, but we avoid doing it as it's close enough and we save some ALU.
inputData.viewDirectionWS = IN.viewDir;
#else
inputData.viewDirectionWS = normalize(IN.viewDir);
#endif
#ifdef _SHADOWS_ENABLED
inputData.shadowCoord = IN.shadowCoord;
#else
inputData.shadowCoord = float4(0, 0, 0, 0);
#endif
inputData.fogCoord = IN.fogFactorAndVertexLight.x;
inputData.vertexLighting = IN.fogFactorAndVertexLight.yzw;
inputData.bakedGI = SampleGI(IN.lightmapUVOrVertexSH, inputData.normalWS);
}
///////////////////////////////////////////////////////////////////////////////
// Vertex and Fragment functions //
///////////////////////////////////////////////////////////////////////////////

half fogFactor = ComputeFogFactor(o.clipPos.z);
o.fogFactorAndVertexLight = half4(fogFactor, vertexLight);
#if defined(_SHADOWS_ENABLED) && !defined(_SHADOWS_CASCADE)
o.shadowCoord = ComputeShadowCoord(o.posWS.xyz);
#endif
return o;
}

SurfaceData surfaceData;
InitializeStandardLitSurfaceData(IN.uv, surfaceData);
#if _NORMALMAP
half3 normalWS = TangentToWorldNormal(surfaceData.normal, IN.tangent, IN.binormal, IN.normal);
#else
half3 normalWS = normalize(IN.normal);
#endif
InputData inputData;
InitializeInputData(IN, surfaceData.normalTS, inputData);
half3 indirectDiffuse = SampleGI(IN.lightmapUVOrVertexSH, normalWS);
float fogFactor = IN.fogFactorAndVertexLight.x;
half4 color = LightweightFragmentPBR(inputData, surfaceData.albedo, surfaceData.metallic, surfaceData.specular, surfaceData.smoothness, surfaceData.occlusion, surfaceData.emission, surfaceData.alpha);
// viewDirection should be normalized here, but we avoid doing it as it's close enough and we save some ALU.
half4 color = LightweightFragmentPBR(IN.posWS.xyz, normalWS, IN.viewDir, indirectDiffuse, IN.fogFactorAndVertexLight.yzw, surfaceData.albedo, surfaceData.metallic, surfaceData.specular, surfaceData.smoothness, surfaceData.occlusion, surfaceData.emission, surfaceData.alpha);
ApplyFog(color.rgb, fogFactor);
ApplyFog(color.rgb, inputData.fogCoord);
return color;
}

UNITY_SETUP_INSTANCE_ID(IN);
float2 uv = IN.uv;
half4 diffuseAlpha = SAMPLE_TEXTURE2D(_MainTex, sampler_MainTex, uv);
half3 diffuse = diffuseAlpha.rgb * _Color.rgb;

AlphaDiscard(alpha, _Cutoff);
#if _NORMALMAP
half3 normalTangent = Normal(uv);
half3 normalWS = TangentToWorldNormal(normalTangent, IN.tangent, IN.binormal, IN.normal);
#ifdef _NORMALMAP
half3 normalTS = Normal(uv);
half3 normalWS = normalize(IN.normal);
half3 normalTS = half3(0, 0, 1);
half4 specularGloss = SpecularGloss(uv, diffuseAlpha.a);
half shininess = _Shininess * 128.0h;
half3 viewDirectionWS = SafeNormalize(IN.viewDir.xyz);
float3 positionWS = IN.posWS.xyz;
half3 diffuseGI = SampleGI(IN.lightmapUVOrVertexSH, normalWS);
#if _VERTEX_LIGHTS
diffuseGI += IN.fogFactorAndVertexLight.yzw;
#endif
half shininess = _Shininess * 128.0h;
half fogFactor = IN.fogFactorAndVertexLight.x;
InputData inputData;
InitializeInputData(IN, normalTS, inputData);
#if defined(_SPECGLOSSMAP) || defined(_SPECULAR_COLOR)
half4 specularGloss = SpecularGloss(uv, diffuseAlpha.a);
return LightweightFragmentBlinnPhong(positionWS, normalWS, viewDirectionWS, fogFactor, diffuseGI, diffuse, specularGloss, shininess, emission, alpha);
#else
return LightweightFragmentLambert(positionWS, normalWS, viewDirectionWS, fogFactor, diffuseGI, diffuse, emission, alpha);
#endif
return LightweightFragmentBlinnPhong(inputData, diffuse, specularGloss, shininess, emission, alpha);
};
#endif

22
ScriptableRenderPipeline/LightweightPipeline/LWRP/ShaderLibrary/Particles.hlsl


#endif
#if defined(_NORMALMAP)
float3 normal = normalize(UnpackNormalScale(readTexture(_BumpMap, sampler_BumpMap, IN), _BumpScale));
half3 normalTS = normalize(UnpackNormalScale(readTexture(_BumpMap, sampler_BumpMap, IN), _BumpScale));
float3 normal = float3(0, 0, 1);
half3 normalTS = float3(0, 0, 1);
#endif
#if defined(_EMISSION)

surfaceData.albedo = albedo.rbg;
surfaceData.specular = half3(0, 0, 0);
surfaceData.normal = normal;
surfaceData.normalTS = normalTS;
surfaceData.emission = emission * _EmissionColor.rgb;
surfaceData.metallic = metallicGloss.r;
surfaceData.smoothness = metallicGloss.g;

#if defined(_ALPHATEST_ON)
clip(surfaceData.alpha - _Cutoff + 0.0001);
#endif
}
void InitializeInputData(VertexOutputLit IN, half3 normalTS, out InputData input)
{
input.positionWS = IN.posWS.xyz;
#if _NORMALMAP
input.normalWS = TangentToWorldNormal(normalTS, IN.tangent, IN.binormal, IN.normal);
#else
input.normalWS = normalize(IN.normal);
#endif
input.viewDirectionWS = SafeNormalize(GetCameraPositionWS() - input.positionWS);
input.shadowCoord = float4(0, 0, 0, 0);
input.fogCoord = IN.posWS.w;
input.vertexLighting = half3(0, 0, 0);
input.bakedGI = half3(0, 0, 0);
}

80
ScriptableRenderPipeline/LightweightPipeline/LWRP/ShaderLibrary/Shadows.hlsl


#define LIGHTWEIGHT_SHADOWS_INCLUDED
#include "CoreRP/ShaderLibrary/Common.hlsl"
#include "CoreRP/ShaderLibrary/Shadow/ShadowSamplingTent.hlsl"
#define MAX_SHADOW_CASCADES 4

// Last cascade is initialized with a no-op matrix. It always transforms
// shadow coord to half(0, 0, NEAR_PLANE). We use this trick to avoid
// branching since ComputeCascadeIndex can return cascade index = MAX_SHADOW_CASCADES
float4x4 _WorldToShadow[MAX_SHADOW_CASCADES + 1];
float4 _DirShadowSplitSpheres[MAX_SHADOW_CASCADES];
float4 _DirShadowSplitSphereRadii;
half4 _ShadowOffset0;
half4 _ShadowOffset1;
half4 _ShadowOffset2;
half4 _ShadowOffset3;
half4 _ShadowData; // (x: shadowStrength)
float4x4 _WorldToShadow[MAX_SHADOW_CASCADES + 1];
float4 _DirShadowSplitSpheres[MAX_SHADOW_CASCADES];
float4 _DirShadowSplitSphereRadii;
half4 _ShadowOffset0;
half4 _ShadowOffset1;
half4 _ShadowOffset2;
half4 _ShadowOffset3;
half4 _ShadowData; // (x: shadowStrength)
float4 _ShadowmapSize; // (xy: 1/width and 1/height, zw: width and height)
CBUFFER_END
inline half SampleShadowmap(float4 shadowCoord)

#endif
half attenuation;
#ifdef SHADER_API_MOBILE
// 4-tap hardware comparison
half4 attenuation4;
attenuation4.x = SAMPLE_TEXTURE2D_SHADOW(_ShadowMap, sampler_ShadowMap, shadowCoord.xyz + _ShadowOffset0.xyz);

half attenuation = dot(attenuation4, 0.25);
attenuation = dot(attenuation4, 0.25);
#else
real fetchesWeights[9];
real2 fetchesUV[9];
SampleShadow_ComputeSamples_Tent_5x5(_ShadowmapSize, shadowCoord.xy, fetchesWeights, fetchesUV);
attenuation = fetchesWeights[0] * SAMPLE_TEXTURE2D_SHADOW(_ShadowMap, sampler_ShadowMap, real3(fetchesUV[0].xy, shadowCoord.z));
attenuation += fetchesWeights[1] * SAMPLE_TEXTURE2D_SHADOW(_ShadowMap, sampler_ShadowMap, real3(fetchesUV[1].xy, shadowCoord.z));
attenuation += fetchesWeights[2] * SAMPLE_TEXTURE2D_SHADOW(_ShadowMap, sampler_ShadowMap, real3(fetchesUV[2].xy, shadowCoord.z));
attenuation += fetchesWeights[3] * SAMPLE_TEXTURE2D_SHADOW(_ShadowMap, sampler_ShadowMap, real3(fetchesUV[3].xy, shadowCoord.z));
attenuation += fetchesWeights[4] * SAMPLE_TEXTURE2D_SHADOW(_ShadowMap, sampler_ShadowMap, real3(fetchesUV[4].xy, shadowCoord.z));
attenuation += fetchesWeights[5] * SAMPLE_TEXTURE2D_SHADOW(_ShadowMap, sampler_ShadowMap, real3(fetchesUV[5].xy, shadowCoord.z));
attenuation += fetchesWeights[6] * SAMPLE_TEXTURE2D_SHADOW(_ShadowMap, sampler_ShadowMap, real3(fetchesUV[6].xy, shadowCoord.z));
attenuation += fetchesWeights[7] * SAMPLE_TEXTURE2D_SHADOW(_ShadowMap, sampler_ShadowMap, real3(fetchesUV[7].xy, shadowCoord.z));
attenuation += fetchesWeights[8] * SAMPLE_TEXTURE2D_SHADOW(_ShadowMap, sampler_ShadowMap, real3(fetchesUV[8].xy, shadowCoord.z));
#endif
half attenuation = SAMPLE_TEXTURE2D_SHADOW(_ShadowMap, sampler_ShadowMap, shadowCoord.xyz);
attenuation = SAMPLE_TEXTURE2D_SHADOW(_ShadowMap, sampler_ShadowMap, shadowCoord.xyz);
#endif
// Apply shadow strength

return (shadowCoord.x <= 0 || shadowCoord.x >= 1 || shadowCoord.y <= 0 || shadowCoord.y >= 1 || shadowCoord.z >= 1) ? 1.0 : attenuation;
}
inline half ComputeCascadeIndex(float3 wpos)
inline half ComputeCascadeIndex(float3 positionWS)
float3 fromCenter0 = wpos.xyz - _DirShadowSplitSpheres[0].xyz;
float3 fromCenter1 = wpos.xyz - _DirShadowSplitSpheres[1].xyz;
float3 fromCenter2 = wpos.xyz - _DirShadowSplitSpheres[2].xyz;
float3 fromCenter3 = wpos.xyz - _DirShadowSplitSpheres[3].xyz;
float3 fromCenter0 = positionWS.xyz - _DirShadowSplitSpheres[0].xyz;
float3 fromCenter1 = positionWS.xyz - _DirShadowSplitSpheres[1].xyz;
float3 fromCenter2 = positionWS.xyz - _DirShadowSplitSpheres[2].xyz;
float3 fromCenter3 = positionWS.xyz - _DirShadowSplitSpheres[3].xyz;
float4 distances2 = float4(dot(fromCenter0, fromCenter0), dot(fromCenter1, fromCenter1), dot(fromCenter2, fromCenter2), dot(fromCenter3, fromCenter3));
half4 weights = half4(distances2 < _DirShadowSplitSphereRadii);

}
inline float4 ComputeShadowCoord(float3 positionWS, half cascadeIndex = 0)
float4 ComputeShadowCoord(float3 positionWS)
half cascadeIndex = ComputeCascadeIndex(positionWS);
return mul(_WorldToShadow[cascadeIndex], float4(positionWS, 1.0));
#endif

inline half RealtimeShadowAttenuation(float3 positionWS)
half GetShadowStrength()
{
return _ShadowData.x;
}
half RealtimeShadowAttenuation(float3 positionWS)
half cascadeIndex = ComputeCascadeIndex(positionWS);
float4 shadowCoord = ComputeShadowCoord(positionWS, cascadeIndex);
float4 shadowCoord = ComputeShadowCoord(positionWS);
half MixRealtimeAndBakedOcclusion(half realtimeAttenuation, half subtractiveModeBakedOcclusion, half4 shadowMaskModeBakedOcclusion = half4(0, 0, 0, 0))
half RealtimeShadowAttenuation(float3 positionWS, float4 shadowCoord)
#if defined(LIGHTMAP_ON)
#if defined(_MIXED_LIGHTING_SHADOWMASK)
// TODO:
#elif defined(_MIXED_LIGHTING_SUBTRACTIVE)
// Subtractive Light mode has direct light contribution baked into lightmap for mixed lights.
// We need to remove direct realtime contribution from mixed lights
// subtractiveModeBakedOcclusion is set 0.0 if this light occlusion was baked in the lightmap, 1.0 otherwise.
return realtimeAttenuation * subtractiveModeBakedOcclusion;
#if !defined(_SHADOWS_ENABLED)
return 1.0;
#ifdef _SHADOWS_CASCADE
shadowCoord = ComputeShadowCoord(positionWS);
return realtimeAttenuation;
return SampleShadowmap(shadowCoord);
}
#endif

1
ScriptableRenderPipeline/LightweightPipeline/LWRP/Shaders/LightweightStandard.shader


#pragma vertex LightweightVertexMeta
#pragma fragment LightweightFragmentMeta
#pragma shader_feature _SPECULAR_SETUP
#pragma shader_feature _EMISSION
#pragma shader_feature _METALLICSPECGLOSSMAP
#pragma shader_feature _ _SMOOTHNESS_TEXTURE_ALBEDO_CHANNEL_A

18
ScriptableRenderPipeline/LightweightPipeline/LWRP/Shaders/LightweightStandardParticles.shader


SurfaceData surfaceData;
InitializeSurfaceData(IN, surfaceData);
float3 positionWS = IN.posWS.xyz;
half3 viewDirWS = SafeNormalize(GetCameraPositionWS() - positionWS);
half fogFactor = IN.posWS.w;
#if _NORMALMAP
half3 normalWS = TangentToWorldNormal(surfaceData.normal, IN.tangent, IN.binormal, IN.normal);
#else
half3 normalWS = normalize(IN.normal);
#endif
InputData inputData;
InitializeInputData(IN, surfaceData.normalTS, inputData);
half3 zero = half3(0.0, 0.0, 0.0);
half4 color = LightweightFragmentPBR(positionWS, normalWS, viewDirWS, /*indirectDiffuse*/ zero, /*vertex lighting*/ zero, surfaceData.albedo,
surfaceData.metallic, /* specularColor */ zero, surfaceData.smoothness, surfaceData.occlusion, surfaceData.emission, surfaceData.alpha);
ApplyFog(color.rgb, fogFactor);
half4 color = LightweightFragmentPBR(inputData, surfaceData.albedo,
surfaceData.metallic, half3(0, 0, 0), surfaceData.smoothness, surfaceData.occlusion, surfaceData.emission, surfaceData.alpha);
ApplyFog(color.rgb, inputData.fogCoord);
return color;
}

60
ScriptableRenderPipeline/LightweightPipeline/LWRP/Shaders/LightweightStandardTerrain.shader


#endif
half4 fogFactorAndVertexLight : TEXCOORD6; // x: fogFactor, yzw: vertex light
float3 positionWS : TEXCOORD7;
#ifdef _SHADOWS_ENABLED
float4 shadowCoord : TEXCOORD8;
#endif
void InitializeInputData(VertexOutput IN, half3 normalTS, out InputData input)
{
input = (InputData)0;
input.positionWS = IN.positionWS;
#ifdef _TERRAIN_NORMAL_MAP
input.normalWS = TangentToWorldNormal(normalTS, IN.tangent, IN.binormal, IN.normal);
#else
input.normalWS = normalize(IN.normal);
#endif
input.viewDirectionWS = SafeNormalize(GetCameraPositionWS() - IN.positionWS);
#ifdef _SHADOWS_ENABLED
input.shadowCoord = IN.shadowCoord;
#endif
input.fogCoord = IN.fogFactorAndVertexLight.x;
#ifdef LIGHTMAP_ON
input.bakedGI = SampleLightmap(IN.uvControlAndLM.zw, input.normalWS);
#endif
}
void SplatmapMix(VertexOutput IN, half4 defaultAlpha, out half4 splat_control, out half weight, out half4 mixedDiffuse, inout half3 mixedNormal)
{

VertexOutput SplatmapVert(VertexInput v)
{
VertexOutput o;
VertexOutput o = (VertexOutput)0;
float3 positionWS = TransformObjectToWorld(v.vertex.xyz);
float4 clipPos = TransformWorldToHClip(positionWS);

o.fogFactorAndVertexLight.yzw = VertexLighting(positionWS, o.normal);
o.positionWS = positionWS;
o.clipPos = clipPos;
#if defined(_SHADOWS_ENABLED) && !defined(_SHADOWS_CASCADE)
o.shadowCoord = ComputeShadowCoord(o.positionWS.xyz);
#endif
return o;
}

half weight;
half4 mixedDiffuse;
half4 defaultSmoothness = half4(_Smoothness0, _Smoothness1, _Smoothness2, _Smoothness3);
half3 normalTangent;
SplatmapMix(IN, defaultSmoothness, splat_control, weight, mixedDiffuse, normalTangent);
half3 normalTS;
SplatmapMix(IN, defaultSmoothness, splat_control, weight, mixedDiffuse, normalTS);
half3 albedo = mixedDiffuse.rgb;
half smoothness = mixedDiffuse.a;

#ifdef _TERRAIN_NORMAL_MAP
half3 normalWS = TangentToWorldNormal(normalTangent, IN.tangent, IN.binormal, IN.normal);
#else
half3 normalWS = normalize(IN.normal);
#endif
InputData inputData;
InitializeInputData(IN, normalTS, inputData);
half4 color = LightweightFragmentPBR(inputData, albedo, metallic, specular, smoothness, /* occlusion */ 1.0, /* emission */ half3(0, 0, 0), alpha);
half3 indirectDiffuse = half3(0, 0, 0);
#ifdef LIGHTMAP_ON
indirectDiffuse = SampleLightmap(IN.uvControlAndLM.zw, normalWS);
#endif
half3 viewDirectionWS = SafeNormalize(GetCameraPositionWS() - IN.positionWS);
half fogFactor = IN.fogFactorAndVertexLight.x;
half4 color = LightweightFragmentPBR(IN.positionWS, normalWS, viewDirectionWS, indirectDiffuse,
IN.fogFactorAndVertexLight.yzw, albedo, metallic, specular, smoothness, /* occlusion */ 1.0, /* emission */ half3(0, 0, 0), alpha);
ApplyFog(color.rgb, fogFactor);
ApplyFog(color.rgb, inputData.fogCoord);
return color;
}
ENDHLSL

5
ScriptableRenderPipeline/LightweightPipeline/LWRP/Shaders/LightweightStandardUnlit.shader


}
SubShader
{
Tags { "RenderType" = "Opaque" "IgnoreProjectors" = "True" "RenderPipeline" = "LightweightPipe" }
Tags { "RenderType" = "Opaque" "IgnoreProjectors" = "True" "RenderPipeline" = "LightweightPipeline" }
LOD 100
Blend [_SrcBlend][_DstBlend]

#if _SAMPLE_GI
#if _NORMALMAP
half3 normalWS = TangentToWorldNormal(surfaceData.normal, IN.tangent, IN.binormal, IN.normal);
half3 normalWS = TangentToWorldNormal(surfaceData.normalTS, IN.tangent, IN.binormal, IN.normal);
#else
half3 normalWS = normalize(IN.normal);
#endif

ENDHLSL
}
}
FallBack "Hidden/InternalErrorShader"
CustomEditor "LightweightUnlitGUI"
}

4
ScriptableRenderPipeline/LightweightPipeline/package.json


{
"name": "com.unity.render-pipelines.lightweight",
"description": "Lightweight Render Pipeline for Unity.",
"version": "0.1.24",
"version": "0.1.25",
"com.unity.render-pipelines.core": "0.1.24"
"com.unity.render-pipelines.core": "0.1.25"
}
}

2
ScriptableRenderPipeline/master-package.json


{
"version": "0.1.24",
"version": "0.1.25",
"unity": "2018.1",
"dependencies": {
"com.unity.postprocessing": "0.1.7"

4
Tests/GraphicsTests/RenderPipeline/LightweightPipeline/LightweightPipelineAsset.asset


m_Script: {fileID: 11500000, guid: bf2edee5c58d82540a51f03df9d42094, type: 3}
m_Name: LightweightPipelineAsset
m_EditorClassIdentifier:
kAssetVersion: 1
kAssetVersion: 2
m_MaxPixelLights: 4
m_SupportsVertexLight: 0
m_RequireDepthTexture: 0

m_ShadowCascades: 2
m_Cascade2Split: 0.25
m_Cascade4Split: {x: 0.067, y: 0.2, z: 0.467}
m_DefaultShader: {fileID: 4800000, guid: 933532a4fcc9baf4fa0491de14d08ed7, type: 3}
m_ResourceAsset: {fileID: 11400000, guid: c8afc0a27fb8c0b4da18151c689a1082, type: 2}

3
Tests/GraphicsTests/RenderPipeline/LightweightPipeline/LightweightPipelineAsset.asset.meta


fileFormatVersion: 2
guid: e6987eea1dd29074597d54ed91a54a26
timeCreated: 1489068733
licenseType: Pro
externalObjects: {}
mainObjectFileID: 11400000
userData:
assetBundleName:

65
Tests/GraphicsTests/RenderPipeline/LightweightPipeline/Scenes/040_UpgradeScene/LightingData.asset
文件差异内容过多而无法显示
查看文件

2
Tests/GraphicsTests/RenderPipeline/LightweightPipeline/Scenes/040_UpgradeScene/LightingData.asset.meta


fileFormatVersion: 2
guid: c1e3921e553ae4bdea5bd3521abb96e2
timeCreated: 1509632247
licenseType: Pro
NativeFormatImporter:
externalObjects: {}
mainObjectFileID: 25800000

999
Tests/GraphicsTests/RenderPipeline/LightweightPipeline/Scenes/040_UpgradeScene/Lightmap-0_comp_dir.png
文件差异内容过多而无法显示
查看文件

1001
Tests/GraphicsTests/RenderPipeline/LightweightPipeline/Scenes/040_UpgradeScene/Lightmap-0_comp_light.exr
文件差异内容过多而无法显示
查看文件

999
Tests/GraphicsTests/RenderPipeline/LightweightPipeline/Scenes/040_UpgradeScene/Lightmap-1_comp_dir.png
文件差异内容过多而无法显示
查看文件

1001
Tests/GraphicsTests/RenderPipeline/LightweightPipeline/Scenes/040_UpgradeScene/Lightmap-1_comp_light.exr
文件差异内容过多而无法显示
查看文件

1001
Tests/GraphicsTests/RenderPipeline/LightweightPipeline/Scenes/040_UpgradeScene/ReflectionProbe-0.exr
文件差异内容过多而无法显示
查看文件

244
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Shadow/ShadowSamplingTent.hlsl


// ------------------------------------------------------------------
// PCF Filtering Tent Functions
// ------------------------------------------------------------------
// Assuming a isoceles right angled triangle of height "triangleHeight" (as drawn below).
// This function return the area of the triangle above the first texel.
//
// |\ <-- 45 degree slop isosceles right angled triangle
// | \
// ---- <-- length of this side is "triangleHeight"
// _ _ _ _ <-- texels
real SampleShadow_GetTriangleTexelArea(real triangleHeight)
{
return triangleHeight - 0.5;
}
// Assuming a isoceles triangle of 1.5 texels height and 3 texels wide lying on 4 texels.
// This function return the area of the triangle above each of those texels.
// | <-- offset from -0.5 to 0.5, 0 meaning triangle is exactly in the center
// / \ <-- 45 degree slop isosceles triangle (ie tent projected in 2D)
// / \
// _ _ _ _ <-- texels
// X Y Z W <-- result indices (in computedArea.xyzw and computedAreaUncut.xyzw)
void SampleShadow_GetTexelAreas_Tent_3x3(real offset, out real4 computedArea, out real4 computedAreaUncut)
{
// Compute the exterior areas
real offset01SquaredHalved = (offset + 0.5) * (offset + 0.5) * 0.5;
computedAreaUncut.x = computedArea.x = offset01SquaredHalved - offset;
computedAreaUncut.w = computedArea.w = offset01SquaredHalved;
// Compute the middle areas
// For Y : We find the area in Y of as if the left section of the isoceles triangle would
// intersect the axis between Y and Z (ie where offset = 0).
computedAreaUncut.y = SampleShadow_GetTriangleTexelArea(1.5 - offset);
// This area is superior to the one we are looking for if (offset < 0) thus we need to
// subtract the area of the triangle defined by (0,1.5-offset), (0,1.5+offset), (-offset,1.5).
real clampedOffsetLeft = min(offset,0);
real areaOfSmallLeftTriangle = clampedOffsetLeft * clampedOffsetLeft;
computedArea.y = computedAreaUncut.y - areaOfSmallLeftTriangle;
// We do the same for the Z but with the right part of the isoceles triangle
computedAreaUncut.z = SampleShadow_GetTriangleTexelArea(1.5 + offset);
real clampedOffsetRight = max(offset,0);
real areaOfSmallRightTriangle = clampedOffsetRight * clampedOffsetRight;
computedArea.z = computedAreaUncut.z - areaOfSmallRightTriangle;
}
// Assuming a isoceles triangle of 1.5 texels height and 3 texels wide lying on 4 texels.
// This function return the weight of each texels area relative to the full triangle area.
void SampleShadow_GetTexelWeights_Tent_3x3(real offset, out real4 computedWeight)
{
real4 dummy;
SampleShadow_GetTexelAreas_Tent_3x3(offset, computedWeight, dummy);
computedWeight *= 0.44444;//0.44 == 1/(the triangle area)
}
// Assuming a isoceles triangle of 2.5 texel height and 5 texels wide lying on 6 texels.
// This function return the weight of each texels area relative to the full triangle area.
// / \
// _ _ _ _ _ _ <-- texels
// 0 1 2 3 4 5 <-- computed area indices (in texelsWeights[])
void SampleShadow_GetTexelWeights_Tent_5x5(real offset, out real3 texelsWeightsA, out real3 texelsWeightsB)
{
// See _UnityInternalGetAreaPerTexel_3TexelTriangleFilter for details.
real4 computedArea_From3texelTriangle;
real4 computedAreaUncut_From3texelTriangle;
SampleShadow_GetTexelAreas_Tent_3x3(offset, computedArea_From3texelTriangle, computedAreaUncut_From3texelTriangle);
// Triangle slope is 45 degree thus we can almost reuse the result of the 3 texel wide computation.
// the 5 texel wide triangle can be seen as the 3 texel wide one but shifted up by one unit/texel.
// 0.16 is 1/(the triangle area)
texelsWeightsA.x = 0.16 * (computedArea_From3texelTriangle.x);
texelsWeightsA.y = 0.16 * (computedAreaUncut_From3texelTriangle.y);
texelsWeightsA.z = 0.16 * (computedArea_From3texelTriangle.y + 1);
texelsWeightsB.x = 0.16 * (computedArea_From3texelTriangle.z + 1);
texelsWeightsB.y = 0.16 * (computedAreaUncut_From3texelTriangle.z);
texelsWeightsB.z = 0.16 * (computedArea_From3texelTriangle.w);
}
// Assuming a isoceles triangle of 3.5 texel height and 7 texels wide lying on 8 texels.
// This function return the weight of each texels area relative to the full triangle area.
// / \
// _ _ _ _ _ _ _ _ <-- texels
// 0 1 2 3 4 5 6 7 <-- computed area indices (in texelsWeights[])
void SampleShadow_GetTexelWeights_Tent_7x7(real offset, out real4 texelsWeightsA, out real4 texelsWeightsB)
{
// See _UnityInternalGetAreaPerTexel_3TexelTriangleFilter for details.
real4 computedArea_From3texelTriangle;
real4 computedAreaUncut_From3texelTriangle;
SampleShadow_GetTexelAreas_Tent_3x3(offset, computedArea_From3texelTriangle, computedAreaUncut_From3texelTriangle);
// Triangle slope is 45 degree thus we can almost reuse the result of the 3 texel wide computation.
// the 7 texel wide triangle can be seen as the 3 texel wide one but shifted up by two unit/texel.
// 0.081632 is 1/(the triangle area)
texelsWeightsA.x = 0.081632 * (computedArea_From3texelTriangle.x);
texelsWeightsA.y = 0.081632 * (computedAreaUncut_From3texelTriangle.y);
texelsWeightsA.z = 0.081632 * (computedAreaUncut_From3texelTriangle.y + 1);
texelsWeightsA.w = 0.081632 * (computedArea_From3texelTriangle.y + 2);
texelsWeightsB.x = 0.081632 * (computedArea_From3texelTriangle.z + 2);
texelsWeightsB.y = 0.081632 * (computedAreaUncut_From3texelTriangle.z + 1);
texelsWeightsB.z = 0.081632 * (computedAreaUncut_From3texelTriangle.z);
texelsWeightsB.w = 0.081632 * (computedArea_From3texelTriangle.w);
}
// 3x3 Tent filter (45 degree sloped triangles in U and V)
void SampleShadow_ComputeSamples_Tent_3x3(real4 shadowMapTexture_TexelSize, real2 coord, out real fetchesWeights[4], out real2 fetchesUV[4])
{
// tent base is 3x3 base thus covering from 9 to 12 texels, thus we need 4 bilinear PCF fetches
real2 tentCenterInTexelSpace = coord.xy * shadowMapTexture_TexelSize.zw;
real2 centerOfFetchesInTexelSpace = floor(tentCenterInTexelSpace + 0.5);
real2 offsetFromTentCenterToCenterOfFetches = tentCenterInTexelSpace - centerOfFetchesInTexelSpace;
// find the weight of each texel based
real4 texelsWeightsU, texelsWeightsV;
SampleShadow_GetTexelWeights_Tent_3x3(offsetFromTentCenterToCenterOfFetches.x, texelsWeightsU);
SampleShadow_GetTexelWeights_Tent_3x3(offsetFromTentCenterToCenterOfFetches.y, texelsWeightsV);
// each fetch will cover a group of 2x2 texels, the weight of each group is the sum of the weights of the texels
real2 fetchesWeightsU = texelsWeightsU.xz + texelsWeightsU.yw;
real2 fetchesWeightsV = texelsWeightsV.xz + texelsWeightsV.yw;
// move the PCF bilinear fetches to respect texels weights
real2 fetchesOffsetsU = texelsWeightsU.yw / fetchesWeightsU.xy + real2(-1.5,0.5);
real2 fetchesOffsetsV = texelsWeightsV.yw / fetchesWeightsV.xy + real2(-1.5,0.5);
fetchesOffsetsU *= shadowMapTexture_TexelSize.xx;
fetchesOffsetsV *= shadowMapTexture_TexelSize.yy;
real2 bilinearFetchOrigin = centerOfFetchesInTexelSpace * shadowMapTexture_TexelSize.xy;
fetchesUV[0] = bilinearFetchOrigin + real2(fetchesOffsetsU.x, fetchesOffsetsV.x);
fetchesUV[1] = bilinearFetchOrigin + real2(fetchesOffsetsU.y, fetchesOffsetsV.x);
fetchesUV[2] = bilinearFetchOrigin + real2(fetchesOffsetsU.x, fetchesOffsetsV.y);
fetchesUV[3] = bilinearFetchOrigin + real2(fetchesOffsetsU.y, fetchesOffsetsV.y);
fetchesWeights[0] = fetchesWeightsU.x * fetchesWeightsV.x;
fetchesWeights[1] = fetchesWeightsU.y * fetchesWeightsV.x;
fetchesWeights[2] = fetchesWeightsU.x * fetchesWeightsV.y;
fetchesWeights[3] = fetchesWeightsU.y * fetchesWeightsV.y;
}
// 5x5 Tent filter (45 degree sloped triangles in U and V)
void SampleShadow_ComputeSamples_Tent_5x5(real4 shadowMapTexture_TexelSize, real2 coord, out real fetchesWeights[9], out real2 fetchesUV[9])
{
// tent base is 5x5 base thus covering from 25 to 36 texels, thus we need 9 bilinear PCF fetches
real2 tentCenterInTexelSpace = coord.xy * shadowMapTexture_TexelSize.zw;
real2 centerOfFetchesInTexelSpace = floor(tentCenterInTexelSpace + 0.5);
real2 offsetFromTentCenterToCenterOfFetches = tentCenterInTexelSpace - centerOfFetchesInTexelSpace;
// find the weight of each texel based on the area of a 45 degree slop tent above each of them.
real3 texelsWeightsU_A, texelsWeightsU_B;
real3 texelsWeightsV_A, texelsWeightsV_B;
SampleShadow_GetTexelWeights_Tent_5x5(offsetFromTentCenterToCenterOfFetches.x, texelsWeightsU_A, texelsWeightsU_B);
SampleShadow_GetTexelWeights_Tent_5x5(offsetFromTentCenterToCenterOfFetches.y, texelsWeightsV_A, texelsWeightsV_B);
// each fetch will cover a group of 2x2 texels, the weight of each group is the sum of the weights of the texels
real3 fetchesWeightsU = real3(texelsWeightsU_A.xz, texelsWeightsU_B.y) + real3(texelsWeightsU_A.y, texelsWeightsU_B.xz);
real3 fetchesWeightsV = real3(texelsWeightsV_A.xz, texelsWeightsV_B.y) + real3(texelsWeightsV_A.y, texelsWeightsV_B.xz);
// move the PCF bilinear fetches to respect texels weights
real3 fetchesOffsetsU = real3(texelsWeightsU_A.y, texelsWeightsU_B.xz) / fetchesWeightsU.xyz + real3(-2.5,-0.5,1.5);
real3 fetchesOffsetsV = real3(texelsWeightsV_A.y, texelsWeightsV_B.xz) / fetchesWeightsV.xyz + real3(-2.5,-0.5,1.5);
fetchesOffsetsU *= shadowMapTexture_TexelSize.xxx;
fetchesOffsetsV *= shadowMapTexture_TexelSize.yyy;
real2 bilinearFetchOrigin = centerOfFetchesInTexelSpace * shadowMapTexture_TexelSize.xy;
fetchesUV[0] = bilinearFetchOrigin + real2(fetchesOffsetsU.x, fetchesOffsetsV.x);
fetchesUV[1] = bilinearFetchOrigin + real2(fetchesOffsetsU.y, fetchesOffsetsV.x);
fetchesUV[2] = bilinearFetchOrigin + real2(fetchesOffsetsU.z, fetchesOffsetsV.x);
fetchesUV[3] = bilinearFetchOrigin + real2(fetchesOffsetsU.x, fetchesOffsetsV.y);
fetchesUV[4] = bilinearFetchOrigin + real2(fetchesOffsetsU.y, fetchesOffsetsV.y);
fetchesUV[5] = bilinearFetchOrigin + real2(fetchesOffsetsU.z, fetchesOffsetsV.y);
fetchesUV[6] = bilinearFetchOrigin + real2(fetchesOffsetsU.x, fetchesOffsetsV.z);
fetchesUV[7] = bilinearFetchOrigin + real2(fetchesOffsetsU.y, fetchesOffsetsV.z);
fetchesUV[8] = bilinearFetchOrigin + real2(fetchesOffsetsU.z, fetchesOffsetsV.z);
fetchesWeights[0] = fetchesWeightsU.x * fetchesWeightsV.x;
fetchesWeights[1] = fetchesWeightsU.y * fetchesWeightsV.x;
fetchesWeights[2] = fetchesWeightsU.z * fetchesWeightsV.x;
fetchesWeights[3] = fetchesWeightsU.x * fetchesWeightsV.y;
fetchesWeights[4] = fetchesWeightsU.y * fetchesWeightsV.y;
fetchesWeights[5] = fetchesWeightsU.z * fetchesWeightsV.y;
fetchesWeights[6] = fetchesWeightsU.x * fetchesWeightsV.z;
fetchesWeights[7] = fetchesWeightsU.y * fetchesWeightsV.z;
fetchesWeights[8] = fetchesWeightsU.z * fetchesWeightsV.z;
}
// 7x7 Tent filter (45 degree sloped triangles in U and V)
void SampleShadow_ComputeSamples_Tent_7x7(real4 shadowMapTexture_TexelSize, real2 coord, out real fetchesWeights[16], out real2 fetchesUV[16])
{
// tent base is 7x7 base thus covering from 49 to 64 texels, thus we need 16 bilinear PCF fetches
real2 tentCenterInTexelSpace = coord.xy * shadowMapTexture_TexelSize.zw;
real2 centerOfFetchesInTexelSpace = floor(tentCenterInTexelSpace + 0.5);
real2 offsetFromTentCenterToCenterOfFetches = tentCenterInTexelSpace - centerOfFetchesInTexelSpace;
// find the weight of each texel based on the area of a 45 degree slop tent above each of them.
real4 texelsWeightsU_A, texelsWeightsU_B;
real4 texelsWeightsV_A, texelsWeightsV_B;
SampleShadow_GetTexelWeights_Tent_7x7(offsetFromTentCenterToCenterOfFetches.x, texelsWeightsU_A, texelsWeightsU_B);
SampleShadow_GetTexelWeights_Tent_7x7(offsetFromTentCenterToCenterOfFetches.y, texelsWeightsV_A, texelsWeightsV_B);
// each fetch will cover a group of 2x2 texels, the weight of each group is the sum of the weights of the texels
real4 fetchesWeightsU = real4(texelsWeightsU_A.xz, texelsWeightsU_B.xz) + real4(texelsWeightsU_A.yw, texelsWeightsU_B.yw);
real4 fetchesWeightsV = real4(texelsWeightsV_A.xz, texelsWeightsV_B.xz) + real4(texelsWeightsV_A.yw, texelsWeightsV_B.yw);
// move the PCF bilinear fetches to respect texels weights
real4 fetchesOffsetsU = real4(texelsWeightsU_A.yw, texelsWeightsU_B.yw) / fetchesWeightsU.xyzw + real4(-3.5,-1.5,0.5,2.5);
real4 fetchesOffsetsV = real4(texelsWeightsV_A.yw, texelsWeightsV_B.yw) / fetchesWeightsV.xyzw + real4(-3.5,-1.5,0.5,2.5);
fetchesOffsetsU *= shadowMapTexture_TexelSize.xxxx;
fetchesOffsetsV *= shadowMapTexture_TexelSize.yyyy;
real2 bilinearFetchOrigin = centerOfFetchesInTexelSpace * shadowMapTexture_TexelSize.xy;
fetchesUV[0] = bilinearFetchOrigin + real2(fetchesOffsetsU.x, fetchesOffsetsV.x);
fetchesUV[1] = bilinearFetchOrigin + real2(fetchesOffsetsU.y, fetchesOffsetsV.x);
fetchesUV[2] = bilinearFetchOrigin + real2(fetchesOffsetsU.z, fetchesOffsetsV.x);
fetchesUV[3] = bilinearFetchOrigin + real2(fetchesOffsetsU.w, fetchesOffsetsV.x);
fetchesUV[4] = bilinearFetchOrigin + real2(fetchesOffsetsU.x, fetchesOffsetsV.y);
fetchesUV[5] = bilinearFetchOrigin + real2(fetchesOffsetsU.y, fetchesOffsetsV.y);
fetchesUV[6] = bilinearFetchOrigin + real2(fetchesOffsetsU.z, fetchesOffsetsV.y);
fetchesUV[7] = bilinearFetchOrigin + real2(fetchesOffsetsU.w, fetchesOffsetsV.y);
fetchesUV[8] = bilinearFetchOrigin + real2(fetchesOffsetsU.x, fetchesOffsetsV.z);
fetchesUV[9] = bilinearFetchOrigin + real2(fetchesOffsetsU.y, fetchesOffsetsV.z);
fetchesUV[10] = bilinearFetchOrigin + real2(fetchesOffsetsU.z, fetchesOffsetsV.z);
fetchesUV[11] = bilinearFetchOrigin + real2(fetchesOffsetsU.w, fetchesOffsetsV.z);
fetchesUV[12] = bilinearFetchOrigin + real2(fetchesOffsetsU.x, fetchesOffsetsV.w);
fetchesUV[13] = bilinearFetchOrigin + real2(fetchesOffsetsU.y, fetchesOffsetsV.w);
fetchesUV[14] = bilinearFetchOrigin + real2(fetchesOffsetsU.z, fetchesOffsetsV.w);
fetchesUV[15] = bilinearFetchOrigin + real2(fetchesOffsetsU.w, fetchesOffsetsV.w);
fetchesWeights[0] = fetchesWeightsU.x * fetchesWeightsV.x;
fetchesWeights[1] = fetchesWeightsU.y * fetchesWeightsV.x;
fetchesWeights[2] = fetchesWeightsU.z * fetchesWeightsV.x;
fetchesWeights[3] = fetchesWeightsU.w * fetchesWeightsV.x;
fetchesWeights[4] = fetchesWeightsU.x * fetchesWeightsV.y;
fetchesWeights[5] = fetchesWeightsU.y * fetchesWeightsV.y;
fetchesWeights[6] = fetchesWeightsU.z * fetchesWeightsV.y;
fetchesWeights[7] = fetchesWeightsU.w * fetchesWeightsV.y;
fetchesWeights[8] = fetchesWeightsU.x * fetchesWeightsV.z;
fetchesWeights[9] = fetchesWeightsU.y * fetchesWeightsV.z;
fetchesWeights[10] = fetchesWeightsU.z * fetchesWeightsV.z;
fetchesWeights[11] = fetchesWeightsU.w * fetchesWeightsV.z;
fetchesWeights[12] = fetchesWeightsU.x * fetchesWeightsV.w;
fetchesWeights[13] = fetchesWeightsU.y * fetchesWeightsV.w;
fetchesWeights[14] = fetchesWeightsU.z * fetchesWeightsV.w;
fetchesWeights[15] = fetchesWeightsU.w * fetchesWeightsV.w;
}

9
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Shadow/ShadowSamplingTent.hlsl.meta


fileFormatVersion: 2
guid: 6ff912bb23fcd5b468dd2e5c37addac3
timeCreated: 1491321444
licenseType: Pro
ShaderImporter:
defaultTextures: []
userData:
assetBundleName:
assetBundleVariant:

/ScriptableRenderPipeline/LightweightPipeline/LWRP/Shaders/LightweightUnlit.shader.meta → /ScriptableRenderPipeline/LightweightPipeline/LWRP/Shaders/LightweightStandardUnlit.shader.meta

/ScriptableRenderPipeline/LightweightPipeline/LWRP/Shaders/LightweightUnlit.shader → /ScriptableRenderPipeline/LightweightPipeline/LWRP/Shaders/LightweightStandardUnlit.shader

正在加载...
取消
保存