浏览代码

Merge branch 'master'

/main
Evgenii Golubev 7 年前
当前提交
dd8b51a8
共有 375 个文件被更改,包括 1673 次插入1414 次删除
  1. 3
      .gitignore
  2. 6
      README.md
  3. 12
      ScriptableRenderPipeline/Core/CoreRP/CoreUtils.cs
  4. 8
      ScriptableRenderPipeline/Core/CoreRP/Debugging/DebugManager.cs
  5. 5
      ScriptableRenderPipeline/Core/CoreRP/Debugging/Prefabs/Scripts/DebugUIHandlerEnumField.cs
  6. 15
      ScriptableRenderPipeline/Core/CoreRP/Debugging/Prefabs/Scripts/UIFoldout.cs
  7. 259
      ScriptableRenderPipeline/Core/CoreRP/Editor/CoreEditorUtils.cs
  8. 6
      ScriptableRenderPipeline/Core/CoreRP/Editor/Debugging/DebugState.cs
  9. 36
      ScriptableRenderPipeline/Core/CoreRP/Editor/Debugging/DebugUIDrawer.Builtins.cs
  10. 1
      ScriptableRenderPipeline/Core/CoreRP/Editor/Debugging/DebugUIDrawer.cs
  11. 35
      ScriptableRenderPipeline/Core/CoreRP/Editor/Debugging/DebugWindow.cs
  12. 9
      ScriptableRenderPipeline/Core/CoreRP/Editor/TextureCombiner/TextureCombiner.cs
  13. 7
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/API/GLCore.hlsl
  14. 7
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/API/GLES3.hlsl
  15. 7
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/API/Metal.hlsl
  16. 7
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/API/Vulkan.hlsl
  17. 2
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/BSDF.hlsl
  18. 6
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Common.hlsl
  19. 68
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/EntityLighting.hlsl
  20. 27
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Filtering.hlsl
  21. 76
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Macros.hlsl
  22. 5
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Packing.hlsl
  23. 55
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Shadow/ShadowAlgorithms.hlsl
  24. 37
      ScriptableRenderPipeline/Core/CoreRP/Shadow/Shadow.cs
  25. 8
      ScriptableRenderPipeline/Core/CoreRP/Shadow/ShadowBase.cs
  26. 23
      ScriptableRenderPipeline/Core/CoreRP/TextureCache.cs
  27. 2
      ScriptableRenderPipeline/Core/package.json
  28. 8
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Camera/HDAdditionalCameraData.cs
  29. 1
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugColorPicker.shader
  30. 2
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugDisplay.cs
  31. 7
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugDisplay.hlsl
  32. 1
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugDisplayLatlong.shader
  33. 1
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugFullScreen.shader
  34. 1
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugViewMaterialGBuffer.shader
  35. 2
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugViewTiles.shader
  36. 9
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/MaterialDebug.cs
  37. 32
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Decal/DecalProjectorComponent.cs
  38. 146
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Decal/DecalSystem.cs
  39. 15
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Camera/HDCameraEditor.cs
  40. 3
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Camera/HDCameraUI.cs
  41. 5
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/EditorRenderPipelineResources/ReflectionProbesPreview.shader
  42. 2
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/HDAssetFactory.cs
  43. 2
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/HDRenderPipelineMenuItems.cs
  44. 6
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Lighting/HDLightEditor.Styles.cs
  45. 29
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Lighting/HDLightEditor.cs
  46. 11
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Lighting/Reflection/HDReflectionProbeEditor.ProbeUtility.cs
  47. 17
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Lighting/Reflection/HDReflectionProbeEditor.cs
  48. 17
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Lighting/Reflection/HDReflectionProbeUI.Drawers.cs
  49. 4
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Lighting/Reflection/HDReflectionProbeUI.cs
  50. 8
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Lighting/Reflection/PlanarReflectionProbeUI.Drawers.cs
  51. 6
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Lighting/Reflection/SerializedHDReflectionProbe.cs
  52. 6
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Lighting/Reflection/SerializedPlanarReflectionProbe.cs
  53. 1
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Material/DiffusionProfile/DrawDiffusionProfile.shader
  54. 1
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Material/DiffusionProfile/DrawTransmittanceGraph.shader
  55. 4
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Material/Lit/LitUI.cs
  56. 44
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Material/Lit/StandardsToHDLitMaterialUpgrader.cs
  57. 12
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Material/Unlit/BaseUnlitUI.cs
  58. 4
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Material/Unlit/UnlitsToHDUnlitUpgrader.cs
  59. 28
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/RenderLoopSettings/FrameSettingsUI.cs
  60. 9
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/RenderLoopSettings/LightLoopSettingsUI.cs
  61. 2
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/RenderLoopSettings/RenderPipelineSettingsUI.cs
  62. 2
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/RenderLoopSettings/SerializedFrameSettings.cs
  63. 6
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Sky/HDRISky/HDRISkyEditor.cs
  64. 101
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/HDRenderPipeline.cs
  65. 2
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/HDRenderPipelineAsset.asset
  66. 4
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/HDStringConstants.cs
  67. 1
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Deferred.shader
  68. 55
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Light/HDAdditionalLightData.cs
  69. 263
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightDefinition.cs
  70. 301
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightDefinition.cs.hlsl
  71. 16
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightLoop/ClusteredUtils.hlsl
  72. 157
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightLoop/LightLoop.cs
  73. 14
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightLoop/LightLoopDef.hlsl
  74. 40
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightLoop/LightLoopSettings.cs
  75. 25
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightLoop/Shadow.hlsl
  76. 22
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightLoop/ShadowContext.hlsl
  77. 71
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightLoop/lightlistbuild-bigtile.compute
  78. 155
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightLoop/lightlistbuild-clustered.compute
  79. 12
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightLoop/scrbound.compute
  80. 24
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightUtilities.hlsl
  81. 33
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightUtils.cs
  82. 9
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Reflection/HDAdditionalReflectionData.cs
  83. 13
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Reflection/PlanarReflectionProbe.cs
  84. 23
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Reflection/PlanarReflectionProbeCache.cs
  85. 9
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Reflection/ProbeWrapper.cs
  86. 30
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Reflection/ReflectionProbeCache.cs
  87. 48
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Reflection/VolumeProjection.hlsl
  88. 2
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/MRTBufferManager.cs
  89. 6
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Decal/Decal.cs.hlsl
  90. 12
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Decal/Decal.shader
  91. 20
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Decal/DecalUtilities.hlsl
  92. 21
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/GBufferManager.cs
  93. 1
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/GGXConvolution/GGXConvolve.shader
  94. 7
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/GGXConvolution/RuntimeFilterIBL.cs
  95. 49
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/LayeredLit/LayeredLit.shader
  96. 52
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/LayeredLit/LayeredLitTessellation.shader
  97. 5
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/Lit.cs
  98. 191
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/Lit.hlsl
  99. 52
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/Lit.shader
  100. 55
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/LitTessellation.shader

3
.gitignore


Library/*
obj/*
Temp/*
.DS_Store
*.aspx
*.browser
*.csproj

*.resS
*.sdf
*.sln
*.sublime-project
*.sublime-workspace
*.suo
*.userprefs

6
README.md


__Note: The Master branch is our current development branch and may not work on the latest publicly available version of Unity. You should always use the latest release tag and latest Unity beta version for testing purposes.__
To use the latest version of the SRP, follow the instructions below:
This repository consists of a folder that needs to be placed in the Assets\ folder of your Unity project. We recommend creating a new project to test SRP. Do not clone this repo into an existing project unless you want to break it, or unless you are updating to a newer version of the SRP repo.
This repository consists of a folder that needs to be placed in the Assets\ folder of your Unity project. We recommend creating a new project to test SRP. Do not clone this repo into an existing project unless you want to break it, or unless you are updating to a newer version of the SRP repo. Your project's `packages.json` file (in either `UnityPackageManager/` or `Packages/`) does not have a dependency on any of the packages in this repository or the contained submodules.
You can use the GitHub desktop app to clone the latest version of the SRP repo or you can use GitHub console commands.

```
> git checkout Unity-2018.1.0b2 (or the latest tag)
> git submodule update --init --recursive --remote (This command fetches the Postprocessing module, which is needed to use SRP)
> git submodule update --init (This command fetches the Postprocessing module, which is needed to use SRP)
```
### To download the repo using console commands:

> git clone https://github.com/Unity-Technologies/ScriptableRenderPipeline
> cd ScriptableRenderPipeline
> git checkout Unity-2018.1.0b2 (or the latest tag)
> git submodule update --init --recursive --remote (This command fetches the Postprocessing module, which is needed to use SRP)
> git submodule update --init (This command fetches the Postprocessing module, which is needed to use SRP)
```
## Scriptable Render Pipeline Assets

12
ScriptableRenderPipeline/Core/CoreRP/CoreUtils.cs


return temp;
}
public static string GetTextureAutoName(int width, int height, TextureFormat format, TextureDimension dim = TextureDimension.None, string name = "", bool mips = false, int depth = 0)
{
string temp;
if(depth == 0)
temp = string.Format("{0}x{1}_{2}{3}", width, height, format, mips ? "_Mips" : "");
else
temp = string.Format("{0}x{1}x{2}_{3}{4}", width, height, depth, format, mips ? "_Mips" : "");
temp = String.Format("{0}_{1}_{2}", name == "" ? "Texture" : name, (dim == TextureDimension.None) ? "" : dim.ToString(), temp);
return temp;
}
public static void ClearCubemap(CommandBuffer cmd, RenderTexture renderTexture, Color clearColor, bool clearMips = false)
{
int mipCount = 1;

8
ScriptableRenderPipeline/Core/CoreRP/Debugging/DebugManager.cs


{
get
{
var uiManager = UnityObject.FindObjectOfType<DebugUIHandlerCanvas>();
// Might be needed to update the reference after domain reload
if (uiManager != null)
{
m_Root = uiManager.gameObject;
}
return m_Root != null && m_Root.activeInHierarchy;
}
set

5
ScriptableRenderPipeline/Core/CoreRP/Debugging/Prefabs/Scripts/DebugUIHandlerEnumField.cs


void UpdateValueLabel()
{
int index = Array.IndexOf(m_Field.enumValues, m_Field.GetValue());
// Fallback just in case, we may be handling sub/sectionned enums here
if (index < 0)
index = 0;
valueLabel.text = "< " + m_Field.enumNames[index].text + " >";
}
}

15
ScriptableRenderPipeline/Core/CoreRP/Debugging/Prefabs/Scripts/UIFoldout.cs


public GameObject arrowOpened;
public GameObject arrowClosed;
protected override void Awake()
protected override void Start()
base.Awake();
base.Start();
onValueChanged.AddListener(SetState);
SetState(isOn);
}

if (arrowOpened == null || arrowClosed == null || content == null)
return;
arrowOpened.SetActive(state);
arrowClosed.SetActive(!state);
content.SetActive(state);
if (arrowOpened.activeSelf != state)
arrowOpened.SetActive(state);
if (arrowClosed.activeSelf == state)
arrowClosed.SetActive(!state);
if (content.activeSelf != state)
content.SetActive(state);
if (rebuildLayout)
LayoutRebuilder.ForceRebuildLayoutImmediate(transform.parent as RectTransform);

259
ScriptableRenderPipeline/Core/CoreRP/Editor/CoreEditorUtils.cs


return value;
}
public static void DrawPopup(GUIContent label, SerializedProperty property, string[] options)
{
var mode = property.intValue;
EditorGUI.BeginChangeCheck();
if (mode >= options.Length)
Debug.LogError(string.Format("Invalid option while trying to set {0}", label.text));
mode = EditorGUILayout.Popup(label, mode, options);
if (EditorGUI.EndChangeCheck())
{
Undo.RecordObject(property.objectReferenceValue, property.name);
property.intValue = mode;
}
}
public static void DrawCascadeSplitGUI<T>(ref SerializedProperty shadowCascadeSplit)
{
float[] cascadePartitionSizes = null;
System.Type type = typeof(T);
if (type == typeof(float))
{
cascadePartitionSizes = new float[] { shadowCascadeSplit.floatValue };
}
else if (type == typeof(Vector3))
{
Vector3 splits = shadowCascadeSplit.vector3Value;
cascadePartitionSizes = new float[]
{
Mathf.Clamp(splits[0], 0.0f, 1.0f),
Mathf.Clamp(splits[1] - splits[0], 0.0f, 1.0f),
Mathf.Clamp(splits[2] - splits[1], 0.0f, 1.0f)
};
}
if (cascadePartitionSizes != null)
{
EditorGUI.BeginChangeCheck();
ShadowCascadeSplitGUI.HandleCascadeSliderGUI(ref cascadePartitionSizes);
if (EditorGUI.EndChangeCheck())
{
if (type == typeof(float))
shadowCascadeSplit.floatValue = cascadePartitionSizes[0];
else
{
Vector3 updatedValue = new Vector3();
updatedValue[0] = cascadePartitionSizes[0];
updatedValue[1] = updatedValue[0] + cascadePartitionSizes[1];
updatedValue[2] = updatedValue[1] + cascadePartitionSizes[2];
shadowCascadeSplit.vector3Value = updatedValue;
}
}
}
}
public static void RemoveMaterialKeywords(Material material)
{
material.shaderKeywords = null;

}
return data;
}
}
static class ShadowCascadeSplitGUI
{
private const int kSliderbarTopMargin = 2;
private const int kSliderbarHeight = 24;
private const int kSliderbarBottomMargin = 2;
private const int kPartitionHandleWidth = 2;
private const int kPartitionHandleExtraHitAreaWidth = 2;
private static readonly Color[] kCascadeColors =
{
new Color(0.5f, 0.5f, 0.6f, 1.0f),
new Color(0.5f, 0.6f, 0.5f, 1.0f),
new Color(0.6f, 0.6f, 0.5f, 1.0f),
new Color(0.6f, 0.5f, 0.5f, 1.0f),
};
// using a LODGroup skin
private static readonly GUIStyle s_CascadeSliderBG = "LODSliderRange";
private static readonly GUIStyle s_TextCenteredStyle = new GUIStyle(EditorStyles.whiteMiniLabel)
{
alignment = TextAnchor.MiddleCenter
};
// Internal struct to bundle drag information
private class DragCache
{
public int m_ActivePartition; // the cascade partition that we are currently dragging/resizing
public float m_NormalizedPartitionSize; // the normalized size of the partition (0.0f < size < 1.0f)
public Vector2 m_LastCachedMousePosition; // mouse position the last time we registered a drag or mouse down.
public DragCache(int activePartition, float normalizedPartitionSize, Vector2 currentMousePos)
{
m_ActivePartition = activePartition;
m_NormalizedPartitionSize = normalizedPartitionSize;
m_LastCachedMousePosition = currentMousePos;
}
};
private static DragCache s_DragCache;
private static readonly int s_CascadeSliderId = "s_CascadeSliderId".GetHashCode();
private static SceneView s_RestoreSceneView;
private static SceneView.CameraMode s_OldSceneDrawMode;
private static bool s_OldSceneLightingMode;
/**
* Static function to handle the GUI and User input related to the cascade slider.
*
* @param normalizedCascadePartition The array of partition sizes in the range 0.0f - 1.0f; expects ONE entry if cascades = 2, and THREE if cascades=4
* The last entry will be automatically determined by summing up the array, and doing 1.0f - sum
*/
public static void HandleCascadeSliderGUI(ref float[] normalizedCascadePartitions)
{
EditorGUILayout.LabelField("Cascade splits");
// get the inspector width since we need it while drawing the partition rects.
// Only way currently is to reserve the block in the layout using GetRect(), and then immediately drawing the empty box
// to match the call to GetRect.
// From this point on, we move to non-layout based code.
var sliderRect = GUILayoutUtility.GetRect(GUIContent.none
, s_CascadeSliderBG
, GUILayout.Height(kSliderbarTopMargin + kSliderbarHeight + kSliderbarBottomMargin)
, GUILayout.ExpandWidth(true));
GUI.Box(sliderRect, GUIContent.none);
float currentX = sliderRect.x;
float cascadeBoxStartY = sliderRect.y + kSliderbarTopMargin;
float cascadeSliderWidth = sliderRect.width - (normalizedCascadePartitions.Length * kPartitionHandleWidth);
Color origTextColor = GUI.color;
Color origBackgroundColor = GUI.backgroundColor;
int colorIndex = -1;
// setup the array locally with the last partition
float[] adjustedCascadePartitions = new float[normalizedCascadePartitions.Length + 1];
System.Array.Copy(normalizedCascadePartitions, adjustedCascadePartitions, normalizedCascadePartitions.Length);
adjustedCascadePartitions[adjustedCascadePartitions.Length - 1] = 1.0f - normalizedCascadePartitions.Sum();
// check for user input on any of the partition handles
// this mechanism gets the current event in the queue... make sure that the mouse is over our control before consuming the event
int sliderControlId = GUIUtility.GetControlID(s_CascadeSliderId, FocusType.Passive);
Event currentEvent = Event.current;
int hotPartitionHandleIndex = -1; // the index of any partition handle that we are hovering over or dragging
// draw each cascade partition
for (int i = 0; i < adjustedCascadePartitions.Length; ++i)
{
float currentPartition = adjustedCascadePartitions[i];
colorIndex = (colorIndex + 1) % kCascadeColors.Length;
GUI.backgroundColor = kCascadeColors[colorIndex];
float boxLength = (cascadeSliderWidth * currentPartition);
// main cascade box
Rect partitionRect = new Rect(currentX, cascadeBoxStartY, boxLength, kSliderbarHeight);
GUI.Box(partitionRect, GUIContent.none, s_CascadeSliderBG);
currentX += boxLength;
// cascade box percentage text
GUI.color = Color.white;
Rect textRect = partitionRect;
var cascadeText = string.Format("{0}\n{1:F1}%", i, currentPartition * 100.0f);
GUI.Label(textRect, cascadeText, s_TextCenteredStyle);
// no need to draw the partition handle for last box
if (i == adjustedCascadePartitions.Length - 1)
break;
// partition handle
GUI.backgroundColor = Color.black;
Rect handleRect = partitionRect;
handleRect.x = currentX;
handleRect.width = kPartitionHandleWidth;
GUI.Box(handleRect, GUIContent.none, s_CascadeSliderBG);
// we want a thin handle visually (since wide black bar looks bad), but a slightly larger
// hit area for easier manipulation
Rect handleHitRect = handleRect;
handleHitRect.xMin -= kPartitionHandleExtraHitAreaWidth;
handleHitRect.xMax += kPartitionHandleExtraHitAreaWidth;
if (handleHitRect.Contains(currentEvent.mousePosition))
hotPartitionHandleIndex = i;
// add regions to slider where the cursor changes to Resize-Horizontal
if (s_DragCache == null)
{
EditorGUIUtility.AddCursorRect(handleHitRect, MouseCursor.ResizeHorizontal, sliderControlId);
}
currentX += kPartitionHandleWidth;
}
GUI.color = origTextColor;
GUI.backgroundColor = origBackgroundColor;
EventType eventType = currentEvent.GetTypeForControl(sliderControlId);
switch (eventType)
{
case EventType.MouseDown:
if (hotPartitionHandleIndex >= 0)
{
s_DragCache = new DragCache(hotPartitionHandleIndex, normalizedCascadePartitions[hotPartitionHandleIndex], currentEvent.mousePosition);
if (GUIUtility.hotControl == 0)
GUIUtility.hotControl = sliderControlId;
currentEvent.Use();
// Switch active scene view into shadow cascades visualization mode, once we start
// tweaking cascade splits.
if (s_RestoreSceneView == null)
{
s_RestoreSceneView = SceneView.lastActiveSceneView;
if (s_RestoreSceneView != null)
{
s_OldSceneDrawMode = s_RestoreSceneView.cameraMode;
s_OldSceneLightingMode = s_RestoreSceneView.m_SceneLighting;
s_RestoreSceneView.cameraMode = SceneView.GetBuiltinCameraMode(DrawCameraMode.ShadowCascades);
}
}
}
break;
case EventType.MouseUp:
// mouseUp event anywhere should release the hotcontrol (if it belongs to us), drags (if any)
if (GUIUtility.hotControl == sliderControlId)
{
GUIUtility.hotControl = 0;
currentEvent.Use();
}
s_DragCache = null;
// Restore previous scene view drawing mode once we stop tweaking cascade splits.
if (s_RestoreSceneView != null)
{
s_RestoreSceneView.cameraMode = s_OldSceneDrawMode;
s_RestoreSceneView.m_SceneLighting = s_OldSceneLightingMode;
s_RestoreSceneView = null;
}
break;
case EventType.MouseDrag:
if (GUIUtility.hotControl != sliderControlId)
break;
// convert the mouse movement to normalized cascade width. Make sure that we are safe to apply the delta before using it.
float delta = (currentEvent.mousePosition - s_DragCache.m_LastCachedMousePosition).x / cascadeSliderWidth;
bool isLeftPartitionHappy = ((adjustedCascadePartitions[s_DragCache.m_ActivePartition] + delta) > 0.0f);
bool isRightPartitionHappy = ((adjustedCascadePartitions[s_DragCache.m_ActivePartition + 1] - delta) > 0.0f);
if (isLeftPartitionHappy && isRightPartitionHappy)
{
s_DragCache.m_NormalizedPartitionSize += delta;
normalizedCascadePartitions[s_DragCache.m_ActivePartition] = s_DragCache.m_NormalizedPartitionSize;
if (s_DragCache.m_ActivePartition < normalizedCascadePartitions.Length - 1)
normalizedCascadePartitions[s_DragCache.m_ActivePartition + 1] -= delta;
GUI.changed = true;
}
s_DragCache.m_LastCachedMousePosition = currentEvent.mousePosition;
currentEvent.Use();
break;
}
}
}
}

6
ScriptableRenderPipeline/Core/CoreRP/Editor/Debugging/DebugState.cs


[SerializeField]
protected string m_QueryPath;
// We need this to keep track of the state modified in the current frame.
// This helps reduces the cost of re-applying states to original widgets and is also needed
// when two states point to the same value (e.g. when using split enums like HDRP does for
// the `fullscreenDebugMode`.
internal static DebugState m_CurrentDirtyState;
public string queryPath
{
get { return m_QueryPath; }

36
ScriptableRenderPipeline/Core/CoreRP/Editor/Debugging/DebugUIDrawer.Builtins.cs


using System;
using UnityEngine;
using UnityEngine.Experimental.Rendering;

EditorGUI.BeginChangeCheck();
var rect = PrepareControlRect();
bool value = EditorGUI.Toggle(rect, CoreEditorUtils.GetContent(w.displayName), s.value);
bool value = EditorGUI.Toggle(rect, CoreEditorUtils.GetContent(w.displayName), w.GetValue());
if (EditorGUI.EndChangeCheck())
Apply(w, s, value);

var rect = PrepareControlRect();
int value = w.min != null && w.max != null
? EditorGUI.IntSlider(rect, CoreEditorUtils.GetContent(w.displayName), s.value, w.min(), w.max())
: EditorGUI.IntField(rect, CoreEditorUtils.GetContent(w.displayName), s.value);
? EditorGUI.IntSlider(rect, CoreEditorUtils.GetContent(w.displayName), w.GetValue(), w.min(), w.max())
: EditorGUI.IntField(rect, CoreEditorUtils.GetContent(w.displayName), w.GetValue());
if (EditorGUI.EndChangeCheck())
Apply(w, s, value);

// No UIntField so we need to max to 0 ourselves or the value will wrap around
var rect = PrepareControlRect();
int tmp = w.min != null && w.max != null
? EditorGUI.IntSlider(rect, CoreEditorUtils.GetContent(w.displayName), Mathf.Max(0, (int)s.value), Mathf.Max(0, (int)w.min()), Mathf.Max(0, (int)w.max()))
: EditorGUI.IntField(rect, CoreEditorUtils.GetContent(w.displayName), Mathf.Max(0, (int)s.value));
? EditorGUI.IntSlider(rect, CoreEditorUtils.GetContent(w.displayName), Mathf.Max(0, (int)w.GetValue()), Mathf.Max(0, (int)w.min()), Mathf.Max(0, (int)w.max()))
: EditorGUI.IntField(rect, CoreEditorUtils.GetContent(w.displayName), Mathf.Max(0, (int)w.GetValue()));
uint value = (uint)Mathf.Max(0, tmp);

var rect = PrepareControlRect();
float value = w.min != null && w.max != null
? EditorGUI.Slider(rect, CoreEditorUtils.GetContent(w.displayName), s.value, w.min(), w.max())
: EditorGUI.FloatField(rect, CoreEditorUtils.GetContent(w.displayName), s.value);
? EditorGUI.Slider(rect, CoreEditorUtils.GetContent(w.displayName), w.GetValue(), w.min(), w.max())
: EditorGUI.FloatField(rect, CoreEditorUtils.GetContent(w.displayName), w.GetValue());
if (EditorGUI.EndChangeCheck())
Apply(w, s, value);

EditorGUI.BeginChangeCheck();
int value = s.value;
int value = w.GetValue();
if (w.enumNames == null || w.enumValues == null)
{
EditorGUILayout.LabelField("Can't draw an empty enumeration.");

var rect = PrepareControlRect();
value = EditorGUI.IntPopup(rect, CoreEditorUtils.GetContent(w.displayName), s.value, w.enumNames, w.enumValues);
int index = Array.IndexOf(w.enumValues, w.GetValue());
// Fallback just in case, we may be handling sub/sectionned enums here
if (index < 0)
value = w.enumValues[0];
value = EditorGUI.IntPopup(rect, CoreEditorUtils.GetContent(w.displayName), value, w.enumNames, w.enumValues);
}
if (EditorGUI.EndChangeCheck())

EditorGUI.BeginChangeCheck();
bool value = EditorGUILayout.Foldout(s.value, CoreEditorUtils.GetContent(w.displayName), true);
bool value = EditorGUILayout.Foldout(w.GetValue(), CoreEditorUtils.GetContent(w.displayName), true);
if (EditorGUI.EndChangeCheck())
Apply(w, s, value);

EditorGUI.BeginChangeCheck();
var rect = PrepareControlRect();
var value = EditorGUI.ColorField(rect, CoreEditorUtils.GetContent(w.displayName), s.value, w.showPicker, w.showAlpha, w.hdr);
var value = EditorGUI.ColorField(rect, CoreEditorUtils.GetContent(w.displayName), w.GetValue(), w.showPicker, w.showAlpha, w.hdr);
if (EditorGUI.EndChangeCheck())
Apply(w, s, value);

EditorGUI.BeginChangeCheck();
var value = EditorGUILayout.Vector2Field(w.displayName, s.value);
var value = EditorGUILayout.Vector2Field(w.displayName, w.GetValue());
if (EditorGUI.EndChangeCheck())
Apply(w, s, value);

EditorGUI.BeginChangeCheck();
var value = EditorGUILayout.Vector3Field(w.displayName, s.value);
var value = EditorGUILayout.Vector3Field(w.displayName, w.GetValue());
if (EditorGUI.EndChangeCheck())
Apply(w, s, value);

EditorGUI.BeginChangeCheck();
var value = EditorGUILayout.Vector4Field(w.displayName, s.value);
var value = EditorGUILayout.Vector4Field(w.displayName, w.GetValue());
if (EditorGUI.EndChangeCheck())
Apply(w, s, value);

1
ScriptableRenderPipeline/Core/CoreRP/Editor/Debugging/DebugUIDrawer.cs


state.SetValue(value, widget);
widget.SetValue(value);
EditorUtility.SetDirty(state);
DebugState.m_CurrentDirtyState = state;
UnityEditorInternal.InternalEditorUtility.RepaintAllViews();
}

35
ScriptableRenderPipeline/Core/CoreRP/Editor/Debugging/DebugWindow.cs


void OnDestroy()
{
DebugManager.instance.onSetDirty -= MarkDirty;
Undo.ClearUndo(m_Settings);
if (m_WidgetStates != null)
{

}
}
public void ApplyStates()
public void ApplyStates(bool forceApplyAll = false)
if (!forceApplyAll && DebugState.m_CurrentDirtyState != null)
{
ApplyState(DebugState.m_CurrentDirtyState.queryPath, DebugState.m_CurrentDirtyState);
DebugState.m_CurrentDirtyState = null;
return;
}
{
var widget = DebugManager.instance.GetItem(state.Key) as DebugUI.IValueField;
ApplyState(state.Key, state.Value);
DebugState.m_CurrentDirtyState = null;
}
void ApplyState(string queryPath, DebugState state)
{
var widget = DebugManager.instance.GetItem(queryPath) as DebugUI.IValueField;
if (widget == null)
continue;
if (widget == null)
return;
widget.SetValue(state.Value.GetValue());
}
widget.SetValue(state.GetValue());
}
void OnUndoRedoPerformed()

// Something has been undone / redone, re-apply states to the debug tree
if (stateHash != m_Settings.currentStateHash)
{
ApplyStates();
ApplyStates(true);
m_Settings.currentStateHash = stateHash;
}

if (m_Settings.selectedPanel == i && Event.current.type == EventType.Repaint)
s_Styles.selected.Draw(elementRect, false, false, false, false);
if (GUI.Toggle(elementRect, m_Settings.selectedPanel == i, panel.displayName, s_Styles.sectionElement))
EditorGUI.BeginChangeCheck();
GUI.Toggle(elementRect, m_Settings.selectedPanel == i, panel.displayName, s_Styles.sectionElement);
if (EditorGUI.EndChangeCheck())
Undo.RecordObject(m_Settings, "Debug Panel Selection");
Undo.RegisterCompleteObjectUndo(m_Settings, "Debug Panel Selection");
m_Settings.selectedPanel = i;
}
}

9
ScriptableRenderPipeline/Core/CoreRP/Editor/TextureCombiner/TextureCombiner.cs


return combined;
}
private Texture GetRawTexture (Texture original, bool sRGB = false)
private Texture GetRawTexture (Texture original, bool sRGBFallback = false)
{
if (m_RawTextures == null) m_RawTextures = new Dictionary<Texture, Texture>();
if (!m_RawTextures.ContainsKey(original))

AssetDatabase.ImportAsset(rawPath);
TextureImporter rawImporter = (TextureImporter) TextureImporter.GetAtPath(rawPath);
TextureImporter rawImporter = (TextureImporter) AssetImporter.GetAtPath(rawPath);
rawImporter.textureType = TextureImporterType.Default;
rawImporter.mipmapEnabled = false;
rawImporter.isReadable = true;

rawImporter.sRGBTexture = sRGB;
Texture2D originalTex2D = original as Texture2D;
rawImporter.sRGBTexture = (originalTex2D == null)? sRGBFallback : ( AssetImporter.GetAtPath(AssetDatabase.GetAssetPath(original)) as TextureImporter).sRGBTexture;
rawImporter.maxTextureSize = 8192;
rawImporter.textureCompression = TextureImporterCompression.Uncompressed;

7
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/API/GLCore.hlsl


#define CBUFFER_START(name)
#define CBUFFER_END
// flow control attributes
#define UNITY_BRANCH [branch]
#define UNITY_FLATTEN [flatten]
#define UNITY_UNROLL [unroll]
#define UNITY_UNROLLX(_x) [unroll(_x)]
#define UNITY_LOOP [loop]
// OpenGL 4.1 SM 5.0 https://docs.unity3d.com/Manual/SL-ShaderCompileTargets.html
#if (SHADER_TARGET >= 46)
#define OPENGL4_1_SM5 1

7
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/API/GLES3.hlsl


#define CBUFFER_START(name)
#define CBUFFER_END
// flow control attributes
#define UNITY_BRANCH [branch]
#define UNITY_FLATTEN [flatten]
#define UNITY_UNROLL [unroll]
#define UNITY_UNROLLX(_x) [unroll(_x)]
#define UNITY_LOOP [loop]
// GLES 3.1 + AEP shader feature https://docs.unity3d.com/Manual/SL-ShaderCompileTargets.html
#if (SHADER_TARGET >= 40)
#define GLES3_1_AEP 1

7
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/API/Metal.hlsl


#define CBUFFER_START(name) cbuffer name {
#define CBUFFER_END };
// flow control attributes
#define UNITY_BRANCH [branch]
#define UNITY_FLATTEN [flatten]
#define UNITY_UNROLL [unroll]
#define UNITY_UNROLLX(_x) [unroll(_x)]
#define UNITY_LOOP [loop]
// Initialize arbitrary structure with zero values.
// Do not exist on some platform, in this case we need to have a standard name that call a function that will initialize all parameters to 0
#define ZERO_INITIALIZE(type, name) name = (type)0;

7
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/API/Vulkan.hlsl


#define CBUFFER_START(name) cbuffer name {
#define CBUFFER_END };
// flow control attributes
#define UNITY_BRANCH [branch]
#define UNITY_FLATTEN [flatten]
#define UNITY_UNROLL [unroll]
#define UNITY_UNROLLX(_x) [unroll(_x)]
#define UNITY_LOOP [loop]
// Initialize arbitrary structure with zero values.
// Do not exist on some platform, in this case we need to have a standard name that call a function that will initialize all parameters to 0
#define ZERO_INITIALIZE(type, name) name = (type)0;

2
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/BSDF.hlsl


}
// Evaluate the reflectance for a thin-film layer on top of a dielectric medum.
real3 EvalIridescence(real eta_1, real cosTheta1, real iridescenceThickness, real baseLayerFresnel0)
real3 EvalIridescence(real eta_1, real cosTheta1, real iridescenceThickness, real3 baseLayerFresnel0)
{
// iridescenceThickness unit is micrometer for this equation here. Mean 0.5 is 500nm.
real Dinc = 3.0 * iridescenceThickness;

6
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Common.hlsl


#define TEMPLATE_2_REAL TEMPLATE_2_HALF
#define TEMPLATE_3_REAL TEMPLATE_3_HALF
#define HAS_HALF 1
#else
#define real float

#define TEMPLATE_2_REAL TEMPLATE_2_FLT
#define TEMPLATE_3_REAL TEMPLATE_3_FLT
#define HAS_HALF 0
#endif // SHADER_API_MOBILE
#endif // #ifndef real

#ifdef SHADER_STAGE_COMPUTE
# ifndef SHADER_TARGET
# if defined(SHADER_API_METAL) || defined(SHADER_API_VULKAN)
# if defined(SHADER_API_METAL)
# define SHADER_TARGET 45
# else
# define SHADER_TARGET 50

68
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/EntityLighting.hlsl


return x2 + x3;
}
real3 SampleSH9(real4 SHCoefficients[7], real3 N)
#if HAS_HALF
half3 SampleSH9(half4 SHCoefficients[7], half3 N)
real4 shAr = SHCoefficients[0];
real4 shAg = SHCoefficients[1];
real4 shAb = SHCoefficients[2];
real4 shBr = SHCoefficients[3];
real4 shBg = SHCoefficients[4];
real4 shBb = SHCoefficients[5];
real4 shCr = SHCoefficients[6];
half4 shAr = SHCoefficients[0];
half4 shAg = SHCoefficients[1];
half4 shAb = SHCoefficients[2];
half4 shBr = SHCoefficients[3];
half4 shBg = SHCoefficients[4];
half4 shBb = SHCoefficients[5];
half4 shCr = SHCoefficients[6];
real3 res = SHEvalLinearL0L1(N, shAr, shAg, shAb);
half3 res = SHEvalLinearL0L1(N, shAr, shAg, shAb);
// Quadratic polynomials
res += SHEvalLinearL2(N, shBr, shBg, shBb, shCr);

#endif
float3 SampleSH9(float4 SHCoefficients[7], float3 N)
{
float4 shAr = SHCoefficients[0];
float4 shAg = SHCoefficients[1];
float4 shAb = SHCoefficients[2];
float4 shBr = SHCoefficients[3];
float4 shBg = SHCoefficients[4];
float4 shBb = SHCoefficients[5];
float4 shCr = SHCoefficients[6];
// Linear + constant polynomial terms
float3 res = SHEvalLinearL0L1(N, shAr, shAg, shAb);
// Quadratic polynomials
res += SHEvalLinearL2(N, shBr, shBg, shBb, shCr);
return res;
}
// This sample a 3D volume storing SH
// Volume is store as 3D texture with 4 R, G, B, Occ set of 4 coefficient store atlas in same 3D texture. Occ is use for occlusion.

// It is required for other platform that aren't supporting this format to implement variant of these functions
// (But these kind of platform should use regular render loop and not news shaders).
// RGBM lightmaps are currently always gamma encoded, so we use a constant of range^2.2 = 5^2.2
#define LIGHTMAP_RGBM_RANGE 34.493242
// DLDR lightmaps are currently always gamma encoded, so we use a constant of 2.0^2.2 = 4.59
#define LIGHTMAP_DLDR_RANGE 4.59
// TODO: This is the max value allowed for emissive (bad name - but keep for now to retrieve it) (It is 8^2.2 (gamma) and 8 is the limit of punctual light slider...), comme from UnityCg.cginc. Fix it!
// Ask Jesper if this can be change for HDRenderPipeline
#define EMISSIVE_RGBM_SCALE 97.0

return rgbm;
}
real3 UnpackLightmapRGBM(real4 rgbmInput)
real3 UnpackLightmapRGBM(real4 rgbmInput, real4 decodeInstructions)
// RGBM lightmaps are always gamma encoded for now, so decode with that in mind:
return rgbmInput.rgb * pow(rgbmInput.a, 2.2) * LIGHTMAP_RGBM_RANGE;
return rgbmInput.rgb * pow(rgbmInput.a, decodeInstructions.y) * decodeInstructions.x;
real3 UnpackLightmapDoubleLDR(real4 encodedColor)
real3 UnpackLightmapDoubleLDR(real4 encodedColor, real4 decodeInstructions)
return encodedColor.rgb * LIGHTMAP_DLDR_RANGE;
return encodedColor.rgb * decodeInstructions.x;
real3 DecodeLightmap(real4 encodedIlluminance)
real3 DecodeLightmap(real4 encodedIlluminance, real4 decodeInstructions)
return UnpackLightmapRGBM(encodedIlluminance);
return UnpackLightmapRGBM(encodedIlluminance, decodeInstructions);
return UnpackLightmapDoubleLDR(encodedIlluminance);
return UnpackLightmapDoubleLDR(encodedIlluminance, decodeInstructions);
#endif
}

return (decodeInstructions.x * pow(alpha, decodeInstructions.y)) * encodedIrradiance.rgb;
}
real3 SampleSingleLightmap(TEXTURE2D_ARGS(lightmapTex, lightmapSampler), float2 uv, float4 transform, bool encodedLightmap)
real3 SampleSingleLightmap(TEXTURE2D_ARGS(lightmapTex, lightmapSampler), float2 uv, float4 transform, bool encodedLightmap, real4 decodeInstructions)
{
// transform is scale and bias
uv = uv * transform.xy + transform.zw;

{
real4 encodedIlluminance = SAMPLE_TEXTURE2D(lightmapTex, lightmapSampler, uv).rgba;
illuminance = DecodeLightmap(encodedIlluminance);
illuminance = DecodeLightmap(encodedIlluminance, decodeInstructions);
}
else
{

}
real3 SampleDirectionalLightmap(TEXTURE2D_ARGS(lightmapTex, lightmapSampler), TEXTURE2D_ARGS(lightmapDirTex, lightmapDirSampler), float2 uv, float4 transform, float3 normalWS, bool encodedLightmap)
real3 SampleDirectionalLightmap(TEXTURE2D_ARGS(lightmapTex, lightmapSampler), TEXTURE2D_ARGS(lightmapDirTex, lightmapDirSampler), float2 uv, float4 transform, float3 normalWS, bool encodedLightmap, real4 decodeInstructions)
{
// In directional mode Enlighten bakes dominant light direction
// in a way, that using it for half Lambert and then dividing by a "rebalancing coefficient"

if (encodedLightmap)
{
real4 encodedIlluminance = SAMPLE_TEXTURE2D(lightmapTex, lightmapSampler, uv).rgba;
illuminance = DecodeLightmap(encodedIlluminance);
illuminance = DecodeLightmap(encodedIlluminance, decodeInstructions);
}
else
{

27
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Filtering.hlsl


// Compute weights & offsets for 4x bilinear taps for the biquadratic B-Spline filter.
// The fractional coordinate should be in the [0, 1] range (centered on 0.5).
// Inspired by: http://vec3.ca/bicubic-filtering-in-fewer-taps/
void BiquadraticFilter(real2 fracCoord, out real2 weights[2], out real2 offsets[2])
void BiquadraticFilter(float2 fracCoord, out float2 weights[2], out float2 offsets[2])
{
float2 l = BSpline2IntLeft(fracCoord);
float2 m = BSpline2IntMiddle(fracCoord);
float2 r = 1 - l - m;
// Compute offsets for 4x bilinear taps for the quadratic B-Spline reconstruction kernel.
// 0: lerp between left and middle
// 1: lerp between middle and right
weights[0] = l + 0.5 * m;
weights[1] = r + 0.5 * m;
offsets[0] = -0.5 + 0.5 * m * rcp(weights[0]);
offsets[1] = 0.5 + r * rcp(weights[1]);
}
// If half is natively supported, create another variant
#if HAS_HALF
void BiquadraticFilter(half2 fracCoord, out half2 weights[2], out half2 offsets[2])
real2 l = BSpline2IntLeft(fracCoord);
real2 m = BSpline2IntMiddle(fracCoord);
real2 r = 1 - l - m;
half2 l = BSpline2IntLeft(fracCoord);
half2 m = BSpline2IntMiddle(fracCoord);
half2 r = 1 - l - m;
// Compute offsets for 4x bilinear taps for the quadratic B-Spline reconstruction kernel.
// 0: lerp between left and middle

offsets[0] = -0.5 + 0.5 * m * rcp(weights[0]);
offsets[1] = 0.5 + r * rcp(weights[1]);
}
#endif
#endif // UNITY_FILTERING_INCLUDED

76
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Macros.hlsl


half FunctionName(half Parameter1) { FunctionBody; } \
half2 FunctionName(half2 Parameter1) { FunctionBody; } \
half3 FunctionName(half3 Parameter1) { FunctionBody; } \
half4 FunctionName(half4 Parameter1) { FunctionBody; }
half4 FunctionName(half4 Parameter1) { FunctionBody; } \
float FunctionName(float Parameter1) { FunctionBody; } \
float2 FunctionName(float2 Parameter1) { FunctionBody; } \
float3 FunctionName(float3 Parameter1) { FunctionBody; } \
float4 FunctionName(float4 Parameter1) { FunctionBody; }
#ifdef SHADER_API_GLES
#define TEMPLATE_1_INT(FunctionName, Parameter1, FunctionBody) \

half FunctionName(half Parameter1, half Parameter2) { FunctionBody; } \
half2 FunctionName(half2 Parameter1, half2 Parameter2) { FunctionBody; } \
half3 FunctionName(half3 Parameter1, half3 Parameter2) { FunctionBody; } \
half4 FunctionName(half4 Parameter1, half4 Parameter2) { FunctionBody; }
half4 FunctionName(half4 Parameter1, half4 Parameter2) { FunctionBody; } \
float FunctionName(float Parameter1, float Parameter2) { FunctionBody; } \
float2 FunctionName(float2 Parameter1, float2 Parameter2) { FunctionBody; } \
float3 FunctionName(float3 Parameter1, float3 Parameter2) { FunctionBody; } \
float4 FunctionName(float4 Parameter1, float4 Parameter2) { FunctionBody; }
#ifdef SHADER_API_GLES

half FunctionName(half Parameter1, half Parameter2, half Parameter3) { FunctionBody; } \
half2 FunctionName(half2 Parameter1, half2 Parameter2, half2 Parameter3) { FunctionBody; } \
half3 FunctionName(half3 Parameter1, half3 Parameter2, half3 Parameter3) { FunctionBody; } \
half4 FunctionName(half4 Parameter1, half4 Parameter2, half4 Parameter3) { FunctionBody; }
half4 FunctionName(half4 Parameter1, half4 Parameter2, half4 Parameter3) { FunctionBody; } \
float FunctionName(float Parameter1, float Parameter2, float Parameter3) { FunctionBody; } \
float2 FunctionName(float2 Parameter1, float2 Parameter2, float2 Parameter3) { FunctionBody; } \
float3 FunctionName(float3 Parameter1, float3 Parameter2, float3 Parameter3) { FunctionBody; } \
float4 FunctionName(float4 Parameter1, float4 Parameter2, float4 Parameter3) { FunctionBody; }
#ifdef SHADER_API_GLES
#define TEMPLATE_3_INT(FunctionName, Parameter1, Parameter2, Parameter3, FunctionBody) \

void FunctionName(inout bool3 a, inout bool3 b) { bool3 t = a; a = b; b = t; } \
void FunctionName(inout bool4 a, inout bool4 b) { bool4 t = a; a = b; b = t; }
#else
#define TEMPLATE_SWAP(FunctionName) \
void FunctionName(inout real a, inout real b) { real t = a; a = b; b = t; } \
void FunctionName(inout real2 a, inout real2 b) { real2 t = a; a = b; b = t; } \
void FunctionName(inout real3 a, inout real3 b) { real3 t = a; a = b; b = t; } \
void FunctionName(inout real4 a, inout real4 b) { real4 t = a; a = b; b = t; } \
void FunctionName(inout int a, inout int b) { int t = a; a = b; b = t; } \
void FunctionName(inout int2 a, inout int2 b) { int2 t = a; a = b; b = t; } \
void FunctionName(inout int3 a, inout int3 b) { int3 t = a; a = b; b = t; } \
void FunctionName(inout int4 a, inout int4 b) { int4 t = a; a = b; b = t; } \
void FunctionName(inout uint a, inout uint b) { uint t = a; a = b; b = t; } \
void FunctionName(inout uint2 a, inout uint2 b) { uint2 t = a; a = b; b = t; } \
void FunctionName(inout uint3 a, inout uint3 b) { uint3 t = a; a = b; b = t; } \
void FunctionName(inout uint4 a, inout uint4 b) { uint4 t = a; a = b; b = t; } \
void FunctionName(inout bool a, inout bool b) { bool t = a; a = b; b = t; } \
void FunctionName(inout bool2 a, inout bool2 b) { bool2 t = a; a = b; b = t; } \
void FunctionName(inout bool3 a, inout bool3 b) { bool3 t = a; a = b; b = t; } \
void FunctionName(inout bool4 a, inout bool4 b) { bool4 t = a; a = b; b = t; }
#if HAS_HALF
#define TEMPLATE_SWAP(FunctionName) \
void FunctionName(inout real a, inout real b) { real t = a; a = b; b = t; } \
void FunctionName(inout real2 a, inout real2 b) { real2 t = a; a = b; b = t; } \
void FunctionName(inout real3 a, inout real3 b) { real3 t = a; a = b; b = t; } \
void FunctionName(inout real4 a, inout real4 b) { real4 t = a; a = b; b = t; } \
void FunctionName(inout float a, inout float b) { float t = a; a = b; b = t; } \
void FunctionName(inout float2 a, inout float2 b) { float2 t = a; a = b; b = t; } \
void FunctionName(inout float3 a, inout float3 b) { float3 t = a; a = b; b = t; } \
void FunctionName(inout float4 a, inout float4 b) { float4 t = a; a = b; b = t; } \
void FunctionName(inout int a, inout int b) { int t = a; a = b; b = t; } \
void FunctionName(inout int2 a, inout int2 b) { int2 t = a; a = b; b = t; } \
void FunctionName(inout int3 a, inout int3 b) { int3 t = a; a = b; b = t; } \
void FunctionName(inout int4 a, inout int4 b) { int4 t = a; a = b; b = t; } \
void FunctionName(inout uint a, inout uint b) { uint t = a; a = b; b = t; } \
void FunctionName(inout uint2 a, inout uint2 b) { uint2 t = a; a = b; b = t; } \
void FunctionName(inout uint3 a, inout uint3 b) { uint3 t = a; a = b; b = t; } \
void FunctionName(inout uint4 a, inout uint4 b) { uint4 t = a; a = b; b = t; } \
void FunctionName(inout bool a, inout bool b) { bool t = a; a = b; b = t; } \
void FunctionName(inout bool2 a, inout bool2 b) { bool2 t = a; a = b; b = t; } \
void FunctionName(inout bool3 a, inout bool3 b) { bool3 t = a; a = b; b = t; } \
void FunctionName(inout bool4 a, inout bool4 b) { bool4 t = a; a = b; b = t; }
#else
#define TEMPLATE_SWAP(FunctionName) \
void FunctionName(inout real a, inout real b) { real t = a; a = b; b = t; } \
void FunctionName(inout real2 a, inout real2 b) { real2 t = a; a = b; b = t; } \
void FunctionName(inout real3 a, inout real3 b) { real3 t = a; a = b; b = t; } \
void FunctionName(inout real4 a, inout real4 b) { real4 t = a; a = b; b = t; } \
void FunctionName(inout int a, inout int b) { int t = a; a = b; b = t; } \
void FunctionName(inout int2 a, inout int2 b) { int2 t = a; a = b; b = t; } \
void FunctionName(inout int3 a, inout int3 b) { int3 t = a; a = b; b = t; } \
void FunctionName(inout int4 a, inout int4 b) { int4 t = a; a = b; b = t; } \
void FunctionName(inout uint a, inout uint b) { uint t = a; a = b; b = t; } \
void FunctionName(inout uint2 a, inout uint2 b) { uint2 t = a; a = b; b = t; } \
void FunctionName(inout uint3 a, inout uint3 b) { uint3 t = a; a = b; b = t; } \
void FunctionName(inout uint4 a, inout uint4 b) { uint4 t = a; a = b; b = t; } \
void FunctionName(inout bool a, inout bool b) { bool t = a; a = b; b = t; } \
void FunctionName(inout bool2 a, inout bool2 b) { bool2 t = a; a = b; b = t; } \
void FunctionName(inout bool3 a, inout bool3 b) { bool3 t = a; a = b; b = t; } \
void FunctionName(inout bool4 a, inout bool4 b) { bool4 t = a; a = b; b = t; }
#endif
#endif

5
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Packing.hlsl


return normalize(normal);
}
real3 UnpackNormalRGBNoScale(real4 packedNormal)
{
return packedNormal.rgb * 2.0 - 1.0;
}
real3 UnpackNormalAG(real4 packedNormal, real scale = 1.0)
{
real3 normal;

55
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Shadow/ShadowAlgorithms.hlsl


return EvalShadow_GetTexcoords( sd, positionWS, ndc, perspProj );
}
uint2 EvalShadow_GetTexcoords( ShadowData sd, real3 positionWS, out real2 closestSampleNDC, bool perspProj )
real2 EvalShadow_GetTexcoords( ShadowData sd, real3 positionWS, out real2 closestSampleNDC, bool perspProj )
{
real4 posCS = EvalShadow_WorldToShadow( sd, positionWS, perspProj );
real2 posNDC = perspProj ? (posCS.xy / posCS.w) : posCS.xy;

return uint2( (posTC * sd.scaleOffset.xy + sd.scaleOffset.zw) * sd.textureSize.xy );
return posTC * sd.scaleOffset.xy + sd.scaleOffset.zw;
uint2 EvalShadow_GetIntTexcoords( ShadowData sd, real3 positionWS, out real2 closestSampleNDC, bool perspProj )
{
real2 texCoords = EvalShadow_GetTexcoords(sd, positionWS, closestSampleNDC, perspProj);
return uint2(texCoords * sd.textureSize.xy);
}
//
// Biasing functions

wposDir = -sphere.xyz + positionWS;
float distSq = dot( wposDir, wposDir );
relDistance = distSq / sphere.w;
if( relDistance <= 1.0 )
if( relDistance > 0.0 && relDistance <= 1.0 )
{
splitSphere = sphere.xyz;
wposDir /= sqrt( distSq );

sd = shadowContext.shadowDatas[index + faceIndex];
real4 closestNDC = { 0,0,0,1 };
uint2 texelIdx = EvalShadow_GetTexcoords( sd, positionWS, closestNDC.xy, true );
uint2 texelIdx = EvalShadow_GetIntTexcoords( sd, positionWS, closestNDC.xy, true );
// load the texel
uint texIdx, sampIdx;

sd = shadowContext.shadowDatas[index + faceIndex];
real4 closestNDC = { 0,0,0,1 };
uint2 texelIdx = EvalShadow_GetTexcoords( sd, positionWS, closestNDC.xy, true );
uint2 texelIdx = EvalShadow_GetIntTexcoords( sd, positionWS, closestNDC.xy, true );
// load the texel
closestNDC.z = LOAD_TEXTURE2D_ARRAY_LOD( tex, texelIdx, sd.slice, 0 ).x;

ShadowData sd = shadowContext.shadowDatas[index];
real4 closestNDC = { 0,0,0,1 };
uint2 texelIdx = EvalShadow_GetTexcoords( sd, positionWS, closestNDC.xy, true );
uint2 texelIdx = EvalShadow_GetIntTexcoords( sd, positionWS, closestNDC.xy, true );
// load the texel
uint texIdx, sampIdx;

ShadowData sd = shadowContext.shadowDatas[index];
real4 closestNDC = { 0,0,0,1 };
uint2 texelIdx = EvalShadow_GetTexcoords( sd, positionWS, closestNDC.xy, true );
uint2 texelIdx = EvalShadow_GetIntTexcoords( sd, positionWS, closestNDC.xy, true );
// load the texel
closestNDC.z = LOAD_TEXTURE2D_ARRAY_LOD( tex, texelIdx, sd.slice, 0 ).x;

sd = shadowContext.shadowDatas[index + faceIndex];
real4 closestNDC = { 0,0,0,1 };
uint2 texelIdx = EvalShadow_GetTexcoords( sd, positionWS, closestNDC.xy, true );
uint2 texelIdx = EvalShadow_GetIntTexcoords( sd, positionWS, closestNDC.xy, true );
// load the texel
uint texIdx, sampIdx;

sd = shadowContext.shadowDatas[index + faceIndex];
real4 closestNDC = { 0,0,0,1 };
uint2 texelIdx = EvalShadow_GetTexcoords( sd, positionWS, closestNDC.xy, true );
uint2 texelIdx = EvalShadow_GetIntTexcoords( sd, positionWS, closestNDC.xy, true );
// load the texel
closestNDC.z = LOAD_TEXTURE2D_ARRAY_LOD( tex, texelIdx, sd.slice, 0 ).x;

return closestWS.xyz / closestWS.w;
}
real EvalShadow_SampleClosestDistance_Punctual( ShadowContext shadowContext, Texture2DArray tex, SamplerState sampl,
real3 positionWS, int index, real3 L, real3 lightPositionWS )
{
// get the algorithm
ShadowData sd = shadowContext.shadowDatas[index];
uint shadowType;
UnpackShadowType( sd.shadowType, shadowType );
// load the right shadow data for the current face
int faceIndex = shadowType == GPUSHADOWTYPE_POINT ? (CubeMapFaceID( -L ) + 1) : 0;
sd = shadowContext.shadowDatas[index + faceIndex];
real4 closestNDC = { 0,0,0,1 };
real2 texelIdx = EvalShadow_GetTexcoords( sd, positionWS, closestNDC.xy, true );
// sample the shadow map
closestNDC.z = SAMPLE_TEXTURE2D_ARRAY_LOD( tex, sampl, texelIdx, sd.slice, 0 ).x;
// reconstruct depth position
real4 closestWS = mul( closestNDC, sd.shadowToWorld );
real3 occluderPosWS = closestWS.xyz / closestWS.w;
return distance( occluderPosWS, lightPositionWS );
}
real3 EvalShadow_GetClosestSample_Cascade( ShadowContext shadowContext, real3 positionWS, real3 normalWS, int index, real4 L )
{
// load the right shadow data for the current face

if( shadowSplitIndex < 0 )
return 1.0;
return 0.0;
uint2 texelIdx = EvalShadow_GetTexcoords( sd, positionWS, closestNDC.xy, false );
uint2 texelIdx = EvalShadow_GetIntTexcoords( sd, positionWS, closestNDC.xy, false );
// load the texel
uint texIdx, sampIdx;

int shadowSplitIndex = EvalShadow_GetSplitIndex( shadowContext, index, positionWS, payloadOffset, alpha );
if( shadowSplitIndex < 0 )
return 1.0;
return 0.0;
uint2 texelIdx = EvalShadow_GetTexcoords( sd, positionWS, closestNDC.xy, false );
uint2 texelIdx = EvalShadow_GetIntTexcoords( sd, positionWS, closestNDC.xy, false );
// load the texel
uint texIdx, sampIdx;

37
ScriptableRenderPipeline/Core/CoreRP/Shadow/Shadow.cs


// UI stuff
protected struct ValRange
{
GUIContent Name;
#if UNITY_EDITOR
GUIContent Name;
float ValDef;
#endif
float ValDef;
public ValRange( string name, float valMin, float valDef, float valMax, float valScale ) { Name = new GUIContent( name ); ValMin = valMin; ValDef = valDef; ValMax = valMax; ValScale = valScale; }
public ValRange( string name, float valMin, float valDef, float valMax, float valScale )
{
#if UNITY_EDITOR
Name = new GUIContent( name );
ValMin = valMin;
ValMax = valMax;
#endif
ValDef = valDef;
ValScale = valScale;
}
#if UNITY_EDITOR
public void Slider( ref int currentVal ) { currentVal = ShadowUtils.Asint( ValScale * UnityEditor.EditorGUILayout.Slider( Name, ShadowUtils.Asfloat( currentVal ) / ValScale, ValMin, ValMax ) ); }
#else

if( sr.shadowType == GPUShadowType.Directional )
{
asd.GetShadowCascades( out cascadeCnt, out cascadeRatios, out cascadeBorders );
for( int i = 0; i < m_TmpSplits.Length; i++ )
m_TmpSplits[i].w = -1.0f;
}

}
// read
float texelSizeX = 1.0f, texelSizeY = 1.0f;
CachedEntry ce = m_EntryCache[ceIdx];
ce.zclip = sr.shadowType != GPUShadowType.Directional;

if( ce.current.splitData.cullingSphere.w != float.NegativeInfinity )
{
int face = (int)key.faceIdx;
texelSizeX = 2.0f / ce.current.proj.m00;
texelSizeY = 2.0f / ce.current.proj.m11;
m_TmpBorders[face] = cascadeBorders[face];
m_TmpSplits[key.faceIdx].w *= ce.current.splitData.cullingSphere.w;
}

uint first = k_MaxCascadesInShader, second = k_MaxCascadesInShader;
for( uint i = 0; i < k_MaxCascadesInShader; i++, payloadOffset++ )
{
first = (first == k_MaxCascadesInShader && m_TmpSplits[i].w > 0.0f) ? i : first;
second = (second == k_MaxCascadesInShader && m_TmpSplits[i].w > 0.0f) ? i : second;
first = ( first == k_MaxCascadesInShader && m_TmpSplits[i].w > 0.0f) ? i : first;
second = ((second == k_MaxCascadesInShader || second == first) && m_TmpSplits[i].w > 0.0f) ? i : second;
sp.Set( m_TmpSplits[i] );
payload[payloadOffset] = sp;
}

}
if( curx + vp.width > xmax || cury + curh > ymax || curslice == m_Slices )
{
Debug.LogError( "ERROR! Shadow atlasing failed." );
Debug.LogWarning( "Shadow atlasing has failed." );
return false;
}
vp.x = curx;

int sa, sv, sp;
asd.GetShadowAlgorithm( out sa, out sv, out sp );
sreq.shadowAlgorithm = ShadowUtils.Pack( (ShadowAlgorithm) sa, (ShadowVariant) sv, (ShadowPrecision) sp );
GPUShadowAlgorithm packed_algo = ShadowUtils.Pack( (ShadowAlgorithm) sa, (ShadowVariant) sv, (ShadowPrecision) sp );
GetGlobalShadowOverride( shadowType, ref packed_algo );
sreq.shadowAlgorithm = packed_algo;
totalRequestCount += (uint) facecount;
requestsGranted.AddUnchecked( sreq );
totalSlots--;

}
if( smidx == k_MaxShadowmapPerType )
{
Debug.LogError("The requested shadows do not fit into any shadowmap.");
Debug.LogWarning("The requested shadows do not fit into any shadowmap.");
return false;
}
}

{
if( !sm.ReserveFinalize( frameId, ref shadowDatas, ref shadowmapPayload ) )
{
Debug.LogError("Shadow allocation failed in the ReserveFinalize step." );
Debug.LogWarning("Shadow allocation failed in the ReserveFinalize step." );
return false;
}
}

8
ScriptableRenderPipeline/Core/CoreRP/Shadow/ShadowBase.cs


{
m_GlobalOverrides[(int)shadowType].enabled = enabled;
}
public bool GetGlobalShadowOverride( GPUShadowType shadowType, ref GPUShadowAlgorithm algo )
{
if( m_GlobalOverrides[(int)shadowType].enabled )
algo = ShadowUtils.Pack( m_GlobalOverrides[(int)shadowType].algorithm, m_GlobalOverrides[(int)shadowType].variant, m_GlobalOverrides[(int)shadowType].precision );
return m_GlobalOverrides[(int)shadowType].enabled;
}
}
// This is the struct passed into shaders

23
ScriptableRenderPipeline/Core/CoreRP/TextureCache.cs


{
private Texture2DArray m_Cache;
public TextureCache2D(string cacheName = "")
: base(cacheName)
{
}
public override void TransferToSlice(CommandBuffer cmd, int sliceIndex, Texture texture)
{
var mismatch = (m_Cache.width != texture.width) || (m_Cache.height != texture.height);

m_Cache = new Texture2DArray(width, height, numTextures, format, isMipMapped)
{
hideFlags = HideFlags.HideAndDontSave,
wrapMode = TextureWrapMode.Clamp
wrapMode = TextureWrapMode.Clamp,
name = CoreUtils.GetTextureAutoName(width, height, format, TextureDimension.Tex2DArray, depth: numTextures, name: m_CacheName)
};
return res;

private int m_CubeMipLevelPropName;
private int m_cubeSrcTexPropName;
public TextureCacheCubemap(string cacheName = "")
: base(cacheName)
{
}
public override void TransferToSlice(CommandBuffer cmd, int sliceIndex, Texture texture)
{
if (!TextureCache.supportsCubemapArrayTextures)

wrapMode = TextureWrapMode.Repeat,
wrapModeV = TextureWrapMode.Clamp,
filterMode = FilterMode.Trilinear,
anisoLevel = 0
anisoLevel = 0,
name = CoreUtils.GetTextureAutoName(panoWidthTop, panoHeightTop, format, TextureDimension.Tex2DArray, depth: numCubeMaps, name: m_CacheName)
};
m_NumPanoMipLevels = isMipMapped ? GetNumMips(panoWidthTop, panoHeightTop) : 1;

hideFlags = HideFlags.HideAndDontSave,
wrapMode = TextureWrapMode.Clamp,
filterMode = FilterMode.Trilinear,
anisoLevel = 0 // It is important to set 0 here, else unity force anisotropy filtering
anisoLevel = 0, // It is important to set 0 here, else unity force anisotropy filtering
name = CoreUtils.GetTextureAutoName(width, width, format, TextureDimension.CubeArray, depth: numCubeMaps, name: m_CacheName)
};
}

public abstract class TextureCache
{
protected int m_NumMipLevels;
protected string m_CacheName;
public static bool isMobileBuildTarget
{

// assert(m_SliceArray[m_SortedIdxArray[q-1]].CountLRU>=m_SliceArray[m_SortedIdxArray[q]].CountLRU);
}
protected TextureCache()
protected TextureCache(string cacheName)
m_CacheName = cacheName;
m_NumTextures = 0;
m_NumMipLevels = 0;
}

2
ScriptableRenderPipeline/Core/package.json


"version": "0.1.33",
"unity": "2018.1",
"dependencies": {
"com.unity.postprocessing": "0.2.0"
"com.unity.postprocessing": "2.0.2-preview"
}
}

8
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Camera/HDAdditionalCameraData.cs


using UnityEngine.Serialization;
using UnityEngine.Serialization;
namespace UnityEngine.Experimental.Rendering.HDPipeline
{

void UnRegisterDebug()
{
if (m_camera == null)
return;
if (m_IsDebugRegistered)
{
if (m_camera.cameraType != CameraType.Preview && m_camera.cameraType != CameraType.Reflection)

// When LDR, unity render in 8bitSRGB, then do a final shader with sRGB conversion
// What should be done is just in our Post process we convert to sRGB and store in a linear 10bit, but require C++ change...
m_camera = GetComponent<Camera>();
if (m_camera == null)
return;
m_camera.allowHDR = false;
// Tag as dirty so frameSettings are correctly initialize at next HDRenderPipeline.Render() call

1
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugColorPicker.shader


{
SubShader
{
Tags{ "RenderPipeline" = "HDRenderPipeline" }
Pass
{
ZWrite Off

2
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugDisplay.cs


{
children =
{
new DebugUI.UIntField { displayName = "Shadow Atlas Index", getter = () => lightingDebugSettings.shadowMapIndex, setter = value => lightingDebugSettings.shadowAtlasIndex = value, min = () => 0u, max = () => (uint)(RenderPipelineManager.currentPipeline as HDRenderPipeline).GetShadowAtlasCount() - 1u }
new DebugUI.UIntField { displayName = "Shadow Atlas Index", getter = () => lightingDebugSettings.shadowAtlasIndex, setter = value => lightingDebugSettings.shadowAtlasIndex = value, min = () => 0u, max = () => (uint)(RenderPipelineManager.currentPipeline as HDRenderPipeline).GetShadowAtlasCount() - 1u }
}
});
}

7
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugDisplay.hlsl


}
// 4. Display leading 0
for (int i = 0; i < leading0; ++i)
if (leading0 > 0)
DrawCharacter('0', fontColor, currentUnormCoord, fixedUnormCoord, flipY, color, -1);
for (int i = 0; i < leading0; ++i)
{
DrawCharacter('0', fontColor, currentUnormCoord, fixedUnormCoord, flipY, color, -1);
}
}
// 5. Display sign

1
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugDisplayLatlong.shader


{
SubShader
{
Tags{ "RenderPipeline" = "HDRenderPipeline" }
Pass
{
ZWrite On

1
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugFullScreen.shader


{
SubShader
{
Tags{ "RenderPipeline" = "HDRenderPipeline" }
Pass
{
ZWrite Off

1
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugViewMaterialGBuffer.shader


{
SubShader
{
Tags{ "RenderPipeline" = "HDRenderPipeline" }
Pass
{
ZWrite Off

2
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugViewTiles.shader


{
SubShader
{
Tags{ "RenderPipeline" = "HDRenderPipeline" }
Pass
{
ZWrite Off

9
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/MaterialDebug.cs


VertexNormalWS,
VertexColor,
VertexColorAlpha,
Last,
// if you add more values here, fix the first entry of next enum
};
// Number must be contiguous

None = 0,
Depth = DebugViewVarying.Last,
Depth = DebugViewVarying.VertexColorAlpha + 1,
Last,
// if you add more values here, fix the first entry of next enum
}
// Number must be contiguous

None = 0,
Tessellation = DebugViewGbuffer.Last,
Tessellation = DebugViewGbuffer.BakeShadowMask3 + 1,
PixelDisplacement,
VertexDisplacement,
TessellationDisplacement,

Last,
}
}

32
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Decal/DecalProjectorComponent.cs


public float m_DrawDistance = 1000.0f;
public float m_FadeScale = 0.9f;
private Material m_OldMaterial = null;
public const int kInvalidIndex = -1;
private int m_CullIndex = kInvalidIndex;
private DecalSystem.DecalHandle m_Handle = null;
public int CullIndex
public DecalSystem.DecalHandle Handle
return this.m_CullIndex;
return this.m_Handle;
this.m_CullIndex = value;
this.m_Handle = value;
}
}

m_Material = hdrp != null ? hdrp.GetDefaultDecalMaterial() : null;
}
DecalSystem.instance.AddDecal(this);
}
public void Start()
{
DecalSystem.instance.AddDecal(this);
if(m_Handle != null)
DecalSystem.instance.RemoveDecal(m_Handle);
m_Handle = DecalSystem.instance.AddDecal(transform, m_DrawDistance, m_FadeScale, m_Material);
DecalSystem.instance.RemoveDecal(this);
DecalSystem.instance.RemoveDecal(m_Handle);
m_Handle = null;
}
// Declare the method signature of the delegate to call.

// handle material changes
if (m_OldMaterial != m_Material)
{
Material tempMaterial = m_Material;
m_Material = m_OldMaterial;
if(m_Material != null)
DecalSystem.instance.RemoveDecal(this);
m_Material = tempMaterial;
DecalSystem.instance.AddDecal(this);
if( m_Handle != null)
DecalSystem.instance.RemoveDecal(m_Handle);
m_Handle = DecalSystem.instance.AddDecal(transform, m_DrawDistance, m_FadeScale, m_Material);
m_OldMaterial = m_Material;
// notify the editor that material has changed so it can update the shader foldout

{
DrawGizmo(true);
// if this object is selected there is a chance the transform was changed so update culling info
DecalSystem.instance.UpdateCachedData(this);
DecalSystem.instance.UpdateCachedData(transform, m_DrawDistance, m_FadeScale, m_Handle);
}
public void OnDrawGizmos()

146
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Decal/DecalSystem.cs


{
public class DecalSystem
{
public const int kInvalidIndex = -1;
public class DecalHandle
{
public DecalHandle(int index, int materialID)
{
m_MaterialID = materialID;
m_Index = index;
}
public static bool IsValid(DecalHandle handle)
{
if (handle == null)
return false;
if (handle.m_Index == kInvalidIndex)
return false;
return true;
}
public int m_MaterialID; // identifies decal set
public int m_Index; // identifies decal within the set
}
static DecalSystem m_Instance;
static public DecalSystem instance
{

{
if (m_DecalAtlas == null)
{
m_DecalAtlas = new TextureCache2D();
m_DecalAtlas = new TextureCache2D("DecalAtlas");
public Camera CurrentCamera
{

return res;
}
public void UpdateCachedData(DecalProjectorComponent decal)
public void UpdateCachedData(Transform transform, float drawDistance, float fadeScale, DecalHandle handle)
m_CachedDecalToWorld[decal.CullIndex] = decal.transform.localToWorldMatrix;
int index = handle.m_Index;
m_CachedDecalToWorld[index] = transform.localToWorldMatrix;
Matrix4x4 decalRotation = Matrix4x4.Rotate(decal.transform.rotation);
Matrix4x4 decalRotation = Matrix4x4.Rotate(transform.rotation);
// z/y axis swap for normal to decal space, Unity is column major
float y0 = decalRotation.m01;
float y1 = decalRotation.m11;

decalRotation.m12 = y1;
decalRotation.m22 = y2;
m_CachedNormalToWorld[decal.CullIndex] = decalRotation;
m_CachedNormalToWorld[index] = decalRotation;
m_CachedDrawDistances[decal.CullIndex].x = decal.m_DrawDistance < instance.DrawDistance
? decal.m_DrawDistance
m_CachedDrawDistances[index].x = drawDistance < instance.DrawDistance
? drawDistance
m_CachedDrawDistances[decal.CullIndex].y = decal.m_FadeScale;
m_BoundingSpheres[decal.CullIndex] = GetDecalProjectBoundingSphere(m_CachedDecalToWorld[decal.CullIndex]);
m_CachedDrawDistances[index].y = fadeScale;
m_BoundingSpheres[index] = GetDecalProjectBoundingSphere(m_CachedDecalToWorld[index]);
public void AddDecal(DecalProjectorComponent decal)
public DecalHandle AddDecal(Transform transform, float drawDistance, float fadeScale, int materialID)
if (m_DecalsCount == m_Decals.Length)
if (m_DecalsCount == m_Handles.Length)
DecalProjectorComponent[] newDecals = new DecalProjectorComponent[m_DecalsCount + kDecalBlockSize];
DecalHandle[] newHandles = new DecalHandle[m_DecalsCount + kDecalBlockSize];
BoundingSphere[] newSpheres = new BoundingSphere[m_DecalsCount + kDecalBlockSize];
Matrix4x4[] newCachedTransforms = new Matrix4x4[m_DecalsCount + kDecalBlockSize];
Matrix4x4[] newCachedNormalToWorld = new Matrix4x4[m_DecalsCount + kDecalBlockSize];

m_Decals.CopyTo(newDecals, 0);
m_Handles.CopyTo(newHandles, 0);
m_Decals = newDecals;
m_Handles = newHandles;
m_BoundingSpheres = newSpheres;
m_CachedDecalToWorld = newCachedTransforms;
m_CachedNormalToWorld = newCachedNormalToWorld;

m_Decals[m_DecalsCount] = decal;
m_Decals[m_DecalsCount].CullIndex = m_DecalsCount;
UpdateCachedData(m_Decals[m_DecalsCount]);
DecalHandle decalHandle = new DecalHandle(m_DecalsCount, materialID);
m_Handles[m_DecalsCount] = decalHandle;
UpdateCachedData(transform, drawDistance, fadeScale, decalHandle);
return decalHandle;
public void RemoveDecal(DecalProjectorComponent decal)
public void RemoveDecal(DecalHandle handle)
int removeAtIndex = decal.CullIndex;
int removeAtIndex = handle.m_Index;
m_Decals[removeAtIndex] = m_Decals[m_DecalsCount - 1]; // move the last decal in list
m_Decals[removeAtIndex].CullIndex = removeAtIndex;
m_Decals[m_DecalsCount - 1] = null;
m_Handles[removeAtIndex] = m_Handles[m_DecalsCount - 1]; // move the last decal in list
m_Handles[removeAtIndex].m_Index = removeAtIndex;
m_Handles[m_DecalsCount - 1] = null;
// update the bounding spheres array
// update cached data
decal.CullIndex = DecalProjectorComponent.kInvalidIndex;
handle.m_Index = kInvalidIndex;
}
public void BeginCull()

private void GetDecalVolumeDataAndBound(Matrix4x4 decalToWorld, Matrix4x4 worldToView)
{
var influenceX = decalToWorld.GetColumn(0) * 0.5f;
var influenceY = decalToWorld.GetColumn(1) * 0.5f;
var influenceZ = decalToWorld.GetColumn(2) * 0.5f;

normalToWorldBatch[instanceCount].m23 = m_NormalTexIndex;
normalToWorldBatch[instanceCount].m33 = m_MaskTexIndex;
// clustered forward data
m_DecalDatas[m_DecalDatasCount].worldToDecal = decalToWorldBatch[instanceCount].inverse;
m_DecalDatas[m_DecalDatasCount].normalToWorld = normalToWorldBatch[instanceCount];

void UpdateTextureCache(CommandBuffer cmd)
{
if (m_DiffuseTexture != null)
{
m_DiffuseTexIndex = instance.TextureAtlas.FetchSlice(cmd, m_DiffuseTexture);
}
else
{
m_DiffuseTexIndex = -1;
}
if (m_NormalTexture != null)
{
m_NormalTexIndex = instance.TextureAtlas.FetchSlice(cmd, m_NormalTexture);
}
else
{
m_NormalTexIndex = -1;
}
if (m_MaskTexture != null)
{
m_MaskTexIndex = instance.TextureAtlas.FetchSlice(cmd, m_MaskTexture);
}
else
{
m_MaskTexIndex = -1;
}
m_DiffuseTexIndex = (m_DiffuseTexture != null) ? instance.TextureAtlas.FetchSlice(cmd, m_DiffuseTexture) : -1;
m_NormalTexIndex = (m_NormalTexture != null) ? instance.TextureAtlas.FetchSlice(cmd, m_NormalTexture) : -1;
m_MaskTexIndex = (m_MaskTexture != null) ? instance.TextureAtlas.FetchSlice(cmd, m_MaskTexture) : -1;
}
public void RemoveFromTextureCache()

private CullingGroup m_CullingGroup = null;
private BoundingSphere[] m_BoundingSpheres = new BoundingSphere[kDecalBlockSize];
private DecalProjectorComponent[] m_Decals = new DecalProjectorComponent[kDecalBlockSize];
private DecalHandle[] m_Handles = new DecalHandle[kDecalBlockSize];
private int[] m_ResultIndices = new int[kDecalBlockSize];
private int m_NumResults = 0;
private int m_DecalsCount = 0;

private int m_MaskTexIndex = -1;
}
public void AddDecal(DecalProjectorComponent decal)
{
if (decal.CullIndex != DecalProjectorComponent.kInvalidIndex) //do not add the same decal more than once
return;
if(!decal.IsValid())
return;
public DecalHandle AddDecal(Transform transform, float drawDistance, float fadeScale, Material material)
{
int key = decal.m_Material.GetInstanceID();
int key = material.GetInstanceID();
decalSet = new DecalSet(decal.m_Material);
decalSet = new DecalSet(material);
decalSet.AddDecal(decal);
return decalSet.AddDecal(transform, drawDistance, fadeScale, key);
public void RemoveDecal(DecalProjectorComponent decal)
public void RemoveDecal(DecalHandle handle)
if (decal.CullIndex == DecalProjectorComponent.kInvalidIndex) // check if we have this decal
return;
if (!DecalHandle.IsValid(handle))
return;
int key = decal.m_Material.GetInstanceID();
int key = handle.m_MaterialID;
decalSet.RemoveDecal(decal);
decalSet.RemoveDecal(handle);
if (decalSet.Count == 0)
{
decalSet.RemoveFromTextureCache();

}
public void UpdateCachedData(DecalProjectorComponent decal)
public void UpdateCachedData(Transform transform, float drawDistance, float fadeScale, DecalHandle handle)
if (decal.CullIndex == DecalProjectorComponent.kInvalidIndex) // check if we have this decal
if(!DecalHandle.IsValid(handle))
int key = decal.m_Material.GetInstanceID();
int key = handle.m_MaterialID;
decalSet.UpdateCachedData(decal);
decalSet.UpdateCachedData(transform, drawDistance, fadeScale, handle);
}
}

pair.Value.EndCull();
}
}
// need a better way than passing light loop here
public void RenderIntoDBuffer(CommandBuffer cmd)
{
if (m_DecalMesh == null)

if (m_DecalAtlas != null)
m_DecalAtlas.Release();
CoreUtils.Destroy(m_DecalMesh);
// set to null so that they get recreated
m_DecalAtlas = null;
m_DecalMesh = null;
}
}
}

15
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Camera/HDCameraEditor.cs


using System;
using System;
using UnityEngine.Assertions;
using UnityEngine.Experimental.Rendering;
using UnityEngine.Experimental.Rendering.HDPipeline;
using UnityEngine.Rendering.PostProcessing;

[CanEditMultipleObjects]
partial class HDCameraEditor : Editor
{
[MenuItem("CONTEXT/Camera/Remove HD Camera", false, 0)]
static void RemoveLight(MenuCommand menuCommand)
{
GameObject go = ((Camera)menuCommand.context).gameObject;
Assert.IsNotNull(go);
Undo.SetCurrentGroupName("Remove HD Camera");
Undo.DestroyObjectImmediate(go.GetComponent<Camera>());
Undo.DestroyObjectImmediate(go.GetComponent<HDAdditionalCameraData>());
}
SerializedHDCamera m_SerializedCamera;
HDCameraUI m_UIState = new HDCameraUI();

3
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Camera/HDCameraUI.cs


Inspector = new []
{
SectionPrimarySettings,
SectionPhysicalSettings,
// Not used for now
//SectionPhysicalSettings,
SectionCaptureSettings,
SectionOutputSettings,
SectionXRSettings,

5
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/EditorRenderPipelineResources/ReflectionProbesPreview.shader


_Exposure("_Exposure", Range(-10.0,10.0)) = 0.0
}
SubShader
SubShader
Tags{ "RenderType" = "Opaque" "Queue" = "Transparent" }
Tags{ "RenderPipeline" = "HDRenderPipeline" "RenderType" = "Opaque" "Queue" = "Transparent" }
ZWrite On
Cull Back

2
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/HDAssetFactory.cs


newAsset.computeGgxIblSampleData = Load<ComputeShader>(HDRenderPipelinePath + "Material/GGXConvolution/ComputeGgxIblSampleData.compute");
newAsset.GGXConvolve = Load<Shader>(HDRenderPipelinePath + "Material/GGXConvolution/GGXConvolve.shader");
newAsset.opaqueAtmosphericScattering = Load<Shader>(HDRenderPipelinePath + "Sky/OpaqueAtmosphericScattering.shader");
newAsset.hdriSky = Load<Shader>(HDRenderPipelinePath + "Sky/HDRISky/HDRISky.shader");
newAsset.proceduralSky = Load<Shader>(HDRenderPipelinePath + "Sky/ProceduralSky/ProceduralSky.shader");
// Utilities / Core
newAsset.encodeBC6HCS = Load<ComputeShader>(CorePath + "CoreResources/EncodeBC6H.compute");

2
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/HDRenderPipelineMenuItems.cs


}
else if (add.lightTypeExtent == LightTypeExtent.Line)
{
add.areaIntensity = l.intensity / LightUtils.calculateLineLightArea(1.0f, add.shapeWidth);
add.areaIntensity = l.intensity / LightUtils.CalculateLineLightIntensity(1.0f, add.shapeWidth);
}
}

6
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Lighting/HDLightEditor.Styles.cs


public readonly GUIContent indirectBounceShadowWarning = new GUIContent("Realtime indirect bounce shadowing is not supported for Spot and Point lights.");
// Additional light data
public readonly GUIContent directionalIntensity = new GUIContent("Intensity (Lux)", "");
public readonly GUIContent punctualIntensity = new GUIContent("Intensity (Lumen)", "");
public readonly GUIContent areaIntensity = new GUIContent("Intensity (Lumen)", "");
public readonly GUIContent directionalIntensity = new GUIContent("Intensity (Lux)", "Illuminance of the directional light at ground level in lux.");
public readonly GUIContent punctualIntensity = new GUIContent("Intensity (Lumen)", "Luminous power of the light in lumen. Spotlight are considered as point light with barndoor so match intensity of a point light.");
public readonly GUIContent areaIntensity = new GUIContent("Intensity (Lumen)", "Luminous power of the light in lumen.");
public readonly GUIContent maxSmoothness = new GUIContent("Max Smoothness", "Very low cost way of faking spherical area lighting. This will modify the roughness of the material lit. This is useful when the specular highlight is too small or too sharp.");
public readonly GUIContent affectDiffuse = new GUIContent("Affect Diffuse", "This will disable diffuse lighting for this light. Doesn't save performance, diffuse lighting is still computed.");

29
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Lighting/HDLightEditor.cs


using System;
using UnityEngine;
using UnityEngine.Assertions;
using UnityEngine.Experimental.Rendering;
using UnityEngine.Experimental.Rendering.HDPipeline;

[CustomEditorForRenderPipeline(typeof(Light), typeof(HDRenderPipelineAsset))]
sealed partial class HDLightEditor : LightEditor
{
[MenuItem("CONTEXT/Light/Remove HD Light", false,0)]
static void RemoveLight(MenuCommand menuCommand)
{
GameObject go = ( (Light) menuCommand.context ).gameObject;
Assert.IsNotNull(go);
Undo.IncrementCurrentGroup();
Undo.DestroyObjectImmediate(go.GetComponent<Light>());
Undo.DestroyObjectImmediate(go.GetComponent<HDAdditionalLightData>());
Undo.DestroyObjectImmediate(go.GetComponent<AdditionalShadowData>());
}
sealed class SerializedLightData
{
public SerializedProperty directionalIntensity;

base.OnEnable();
// Get & automatically add additional HD data if not present
var lightData = CoreEditorUtils.GetAdditionalData<HDAdditionalLightData>(targets);
var lightData = CoreEditorUtils.GetAdditionalData<HDAdditionalLightData>(targets, HDAdditionalLightData.InitDefaultHDAdditionalLightData);
var shadowData = CoreEditorUtils.GetAdditionalData<AdditionalShadowData>(targets, HDAdditionalShadowData.InitDefaultHDAdditionalShadowData);
m_SerializedAdditionalLightData = new SerializedObject(lightData);
m_SerializedAdditionalShadowData = new SerializedObject(shadowData);

m_SerializedAdditionalLightData.Update();
m_SerializedAdditionalShadowData.Update();
// Disable the default light editor for the release, it is just use for development
/*
// Temporary toggle to go back to the old editor & separated additional datas
bool useOldInspector = m_AdditionalLightData.useOldInspector.boolValue;

m_SerializedAdditionalLightData.ApplyModifiedProperties();
return;
}
*/
// New editor
ApplyAdditionalComponentsVisibility(true);

case LightShape.Rectangle:
// TODO: Currently if we use Area type as it is offline light in legacy, the light will not exist at runtime
//m_BaseData.type.enumValueIndex = (int)LightType.Area;
// In case of change, think to update InitDefaultHDAdditionalLightData()
settings.lightType.enumValueIndex = (int)LightType.Point;
m_AdditionalLightData.lightTypeExtent.enumValueIndex = (int)LightTypeExtent.Rectangle;
EditorGUILayout.PropertyField(m_AdditionalLightData.shapeWidth, s_Styles.shapeWidthRect);

}
}
// Caution: this function must match the one in HDAdditionalLightData.ConvertPhysicalLightIntensityToLightIntensity - any change need to be replicated
void UpdateLightIntensity()
{
switch (m_LightShape)

break;
case LightShape.Line:
settings.intensity.floatValue = LightUtils.calculateLineLightArea(m_AdditionalLightData.areaIntensity.floatValue, m_AdditionalLightData.shapeWidth.floatValue);
settings.intensity.floatValue = LightUtils.CalculateLineLightIntensity(m_AdditionalLightData.areaIntensity.floatValue, m_AdditionalLightData.shapeWidth.floatValue);
break;
}
}

if (EditorGUI.EndChangeCheck())
{
m_AdditionalLightData.fadeDistance.floatValue = Mathf.Max(m_AdditionalLightData.fadeDistance.floatValue, 0.01f);
((Light)target).SetLightDirty(); // Should be apply only to parameter that's affect GI, but make the code cleaner
}
}

if (EditorGUI.EndChangeCheck())
{
// Link min to max and don't expose normalBiasScale (useless when min == max)
m_AdditionalShadowData.normalBiasMax = m_AdditionalShadowData.normalBiasMin;
m_AdditionalShadowData.normalBiasMax.floatValue = m_AdditionalShadowData.normalBiasMin.floatValue;
}
//EditorGUILayout.PropertyField(m_AdditionalShadowData.normalBiasMax, s_Styles.normalBiasMax);
//EditorGUILayout.PropertyField(m_AdditionalShadowData.normalBiasScale, s_Styles.normalBiasScale);

var type = settings.lightType;
// Special case for multi-selection: don't resolve light shape or it'll corrupt lights
if (type.hasMultipleDifferentValues)
if (type.hasMultipleDifferentValues
|| m_AdditionalLightData.lightTypeExtent.hasMultipleDifferentValues)
{
m_LightShape = (LightShape)(-1);
return;

11
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Lighting/Reflection/HDReflectionProbeEditor.ProbeUtility.cs


using UnityEngine;
using UnityEngine;
using UnityEngine.Experimental.Rendering;
using Object = UnityEngine.Object;

{
void ChangeVisibilityOfAllTargets(bool visibility)
{
if (targets.Length == 0) return;
var p = (ReflectionProbe)targets[i];
HDReflectionProbeEditorUtility.ChangeVisibility(p, visibility);
if (targets[i] != null)
{
var p = (ReflectionProbe)targets[i];
HDReflectionProbeEditorUtility.ChangeVisibility(p, visibility);
}
}
}

17
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Lighting/Reflection/HDReflectionProbeEditor.cs


using System.Collections.Generic;
using System.Collections.Generic;
using UnityEngine.Assertions;
using UnityEngine.Experimental.Rendering;
using UnityEngine.Experimental.Rendering.HDPipeline;
using UnityEngine.Rendering;

[CanEditMultipleObjects]
partial class HDReflectionProbeEditor : Editor
{
[MenuItem("CONTEXT/ReflectionProbe/Remove HD Reflection Probe", false, 0)]
static void RemoveLight(MenuCommand menuCommand)
{
GameObject go = ((ReflectionProbe)menuCommand.context).gameObject;
Assert.IsNotNull(go);
Undo.SetCurrentGroupName("Remove HD Reflection Probe");
Undo.DestroyObjectImmediate(go.GetComponent<ReflectionProbe>());
Undo.DestroyObjectImmediate(go.GetComponent<HDAdditionalReflectionData>());
Undo.DestroyObjectImmediate(go.GetComponent<MeshRenderer>());
Undo.DestroyObjectImmediate(go.GetComponent<MeshFilter>());
}
static Dictionary<ReflectionProbe, HDReflectionProbeEditor> s_ReflectionProbeEditors = new Dictionary<ReflectionProbe, HDReflectionProbeEditor>();
static HDReflectionProbeEditor GetEditorFor(ReflectionProbe p)

17
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Lighting/Reflection/HDReflectionProbeUI.Drawers.cs


static void Drawer_AdditionalSettings(HDReflectionProbeUI s, SerializedHDReflectionProbe p, Editor owner)
{
EditorGUILayout.PropertyField(p.dimmer);
EditorGUILayout.PropertyField(p.weight);
EditorGUI.BeginChangeCheck();
EditorGUILayout.PropertyField(p.multiplier);
if (EditorGUI.EndChangeCheck())
p.multiplier.floatValue = Mathf.Max(0.0f, p.multiplier.floatValue);
if (p.so.targetObjects.Length == 1)
{

var blendDistance = p.blendDistancePositive.vector3Value.x;
EditorGUI.BeginChangeCheck();
EditorGUI.showMixedValue = p.blendDistancePositive.hasMultipleDifferentValues;
blendDistance = EditorGUILayout.Slider(CoreEditorUtils.GetContent("Blend Distance|Area around the probe where it is blended with other probes. Only used in deferred probes."), blendDistance, 0, maxBlendDistance);
if (EditorGUI.EndChangeCheck())
{

var blendNormalDistance = p.blendNormalDistancePositive.vector3Value.x;
EditorGUI.BeginChangeCheck();
EditorGUI.showMixedValue = p.blendNormalDistancePositive.hasMultipleDifferentValues;
blendNormalDistance = EditorGUILayout.Slider(CoreEditorUtils.GetContent("Blend Normal Distance|Area around the probe where the normals influence the probe. Only used in deferred probes."), blendNormalDistance, 0, maxBlendDistance);
if (EditorGUI.EndChangeCheck())
{

EditorGUI.showMixedValue = false;
EditorGUILayout.PropertyField(p.influenceSphereRadius, CoreEditorUtils.GetContent("Radius"));
EditorGUILayout.PropertyField(p.boxOffset, CoreEditorUtils.GetContent("Sphere Offset|The center of the sphere in which the reflections will be applied to objects. The value is relative to the position of the Game Object."));

{
EditorGUILayout.PropertyField(p.renderDynamicObjects, CoreEditorUtils.GetContent("Dynamic Objects|If enabled dynamic objects are also rendered into the cubemap"));
p.customBakedTexture.objectReferenceValue = EditorGUILayout.ObjectField(CoreEditorUtils.GetContent("Cubemap"), p.customBakedTexture.objectReferenceValue, typeof(Cubemap), false);
EditorGUI.showMixedValue = p.customBakedTexture.hasMultipleDifferentValues;
EditorGUI.BeginChangeCheck();
var customBakedTexture = EditorGUILayout.ObjectField(CoreEditorUtils.GetContent("Cubemap"), p.customBakedTexture.objectReferenceValue, typeof(Cubemap), false);
EditorGUI.showMixedValue = false;
if (EditorGUI.EndChangeCheck())
p.customBakedTexture.objectReferenceValue = customBakedTexture;
}
static void Drawer_ModeSettingsRealtime(HDReflectionProbeUI s, SerializedHDReflectionProbe p, Editor owner)

4
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Lighting/Reflection/HDReflectionProbeUI.cs


{
operations = 0;
SetModeTarget(data.mode.intValue);
SetShapeTarget(data.influenceShape.intValue);
SetModeTarget(data.mode.hasMultipleDifferentValues ? -1 : data.mode.intValue);
SetShapeTarget(data.influenceShape.hasMultipleDifferentValues ? -1 : data.influenceShape.intValue);
isSectionExpandedSeparateProjection.value = data.useSeparateProjectionVolume.boolValue;
base.Update();

8
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Lighting/Reflection/PlanarReflectionProbeUI.Drawers.cs


static void Drawer_SectionInfluenceSettings(PlanarReflectionProbeUI s, SerializedPlanarReflectionProbe d, Editor o)
{
EditorGUILayout.PropertyField(d.dimmer, _.GetContent("Dimmer"));
EditorGUILayout.PropertyField(d.weight, _.GetContent("Weight"));
EditorGUI.BeginChangeCheck();
EditorGUILayout.PropertyField(d.multiplier, _.GetContent("Multiplier"));
if (EditorGUI.EndChangeCheck())
d.multiplier.floatValue = Mathf.Max(0.0f, d.multiplier.floatValue);
}
static void Drawer_FieldCaptureType(PlanarReflectionProbeUI s, SerializedPlanarReflectionProbe d, Editor o)

6
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Lighting/Reflection/SerializedHDReflectionProbe.cs


internal SerializedProperty blendNormalDistanceNegative;
internal SerializedProperty boxSideFadePositive;
internal SerializedProperty boxSideFadeNegative;
internal SerializedProperty dimmer;
internal SerializedProperty weight;
internal SerializedProperty multiplier;
internal SerializedProperty proxyVolumeComponent;

boxReprojectionVolumeSize = addso.Find((HDAdditionalReflectionData d) => d.boxReprojectionVolumeSize);
boxReprojectionVolumeCenter = addso.Find((HDAdditionalReflectionData d) => d.boxReprojectionVolumeCenter);
sphereReprojectionVolumeRadius = addso.Find((HDAdditionalReflectionData d) => d.sphereReprojectionVolumeRadius);
dimmer = addso.Find((HDAdditionalReflectionData d) => d.dimmer);
weight = addso.Find((HDAdditionalReflectionData d) => d.weight);
multiplier = addso.Find((HDAdditionalReflectionData d) => d.multiplier);
blendDistancePositive = addso.Find((HDAdditionalReflectionData d) => d.blendDistancePositive);
blendDistanceNegative = addso.Find((HDAdditionalReflectionData d) => d.blendDistanceNegative);
blendNormalDistancePositive = addso.Find((HDAdditionalReflectionData d) => d.blendNormalDistancePositive);

6
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Lighting/Reflection/SerializedPlanarReflectionProbe.cs


public SerializedProperty capturePositionMode;
public SerializedProperty captureMirrorPlaneLocalPosition;
public SerializedProperty captureMirrorPlaneLocalNormal;
public SerializedProperty dimmer;
public SerializedProperty weight;
public SerializedProperty multiplier;
public SerializedProperty mode;
public SerializedProperty refreshMode;
public SerializedProperty customTexture;

capturePositionMode = serializedObject.Find((PlanarReflectionProbe p) => p.capturePositionMode);
captureMirrorPlaneLocalPosition = serializedObject.Find((PlanarReflectionProbe p) => p.captureMirrorPlaneLocalPosition);
captureMirrorPlaneLocalNormal = serializedObject.Find((PlanarReflectionProbe p) => p.captureMirrorPlaneLocalNormal);
dimmer = serializedObject.Find((PlanarReflectionProbe p) => p.dimmer);
weight = serializedObject.Find((PlanarReflectionProbe p) => p.weight);
multiplier = serializedObject.Find((PlanarReflectionProbe p) => p.multiplier);
mode = serializedObject.Find((PlanarReflectionProbe p) => p.mode);
refreshMode = serializedObject.Find((PlanarReflectionProbe p) => p.refreshMode);
customTexture = serializedObject.Find((PlanarReflectionProbe p) => p.customTexture);

1
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Material/DiffusionProfile/DrawDiffusionProfile.shader


{
SubShader
{
Tags{ "RenderPipeline" = "HDRenderPipeline" }
Pass
{
Cull Off

1
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Material/DiffusionProfile/DrawTransmittanceGraph.shader


{
SubShader
{
Tags{ "RenderPipeline" = "HDRenderPipeline" }
Pass
{
Cull Off

4
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Material/Lit/LitUI.cs


float amplitude = (heightMax[layerIndex].floatValue - heightMin[layerIndex].floatValue);
heightAmplitude[layerIndex].floatValue = amplitude * 0.01f; // Conversion centimeters to meters.
heightCenter[layerIndex].floatValue = -(heightMin[layerIndex].floatValue + offset) / amplitude;
heightCenter[layerIndex].floatValue = -(heightMin[layerIndex].floatValue + offset) / Mathf.Max(1e-6f, amplitude);
heightCenter[layerIndex].floatValue = -heightOffset[layerIndex].floatValue / amplitude + heightTessCenter[layerIndex].floatValue;
heightCenter[layerIndex].floatValue = -heightOffset[layerIndex].floatValue / Mathf.Max(1e-6f, amplitude) + heightTessCenter[layerIndex].floatValue;
}
}
}

44
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Material/Lit/StandardsToHDLitMaterialUpgrader.cs


RenameTexture("_SpecGlossMap", "_SpecularColorMap");
}
}
public override void Convert(Material srcMaterial, Material dstMaterial)
{
dstMaterial.hideFlags = HideFlags.DontUnloadUnusedAsset;

// Metallic
bool hasMetallic = false;
Texture metallicMap;
Texture metallicMap = Texture2D.blackTexture;
if (hasMetallic) metallicMap = TextureCombiner.GetTextureSafe(srcMaterial, "_MetallicGlossMap", Color.white);
if (hasMetallic)
{
metallicMap = TextureCombiner.GetTextureSafe(srcMaterial, "_MetallicGlossMap", Color.white);
}
else
{
float metallicValue = Mathf.Pow(srcMaterial.GetFloat("_Metallic"), 2.2f); // Convert _Metallic value from Gamma to Linear
dstMaterial.SetFloat("_Metallic", metallicValue);
metallicMap = TextureCombiner.TextureFromColor(Color.white * metallicValue);
}
else
metallicMap = Texture2D.blackTexture;
Texture occlusionMap;
Texture occlusionMap = Texture2D.whiteTexture;
Texture detailMaskMap;
Texture detailMaskMap = Texture2D.whiteTexture;
if (hasDetailMask) detailMaskMap = TextureCombiner.GetTextureSafe(srcMaterial, "_DetailMask", Color.white);
// Smoothness

if (hasSmoothness)
smoothnessMap = (Texture2D)TextureCombiner.GetTextureSafe(srcMaterial, "_SpecGlossMap", Color.grey);
}
}
else
{
string smoothnessTextureChannel = "_MainTex";

Texture2D maskMap;
TextureCombiner maskMapCombiner = new TextureCombiner(
TextureCombiner.GetTextureSafe(srcMaterial, "_MetallicGlossMap", Color.white), 4, // Metallic
TextureCombiner.GetTextureSafe(srcMaterial, "_OcclusionMap", Color.white), 4, // Occlusion
TextureCombiner.GetTextureSafe(srcMaterial, "_DetailMask", Color.white), 4, // Detail Mask
smoothnessMap, (srcMaterial.shader.name == Standard_Rough)?-4:3 // Smoothness Texture
metallicMap, 0, // R: Metallic from red
occlusionMap, 0, // G: Occlusion from red
detailMaskMap, 0, // B: Detail Mask from red
smoothnessMap, (srcMaterial.shader.name == Standard_Rough)?-4:3 // A: Smoothness Texture from inverse greyscale for roughness setup, or alpha
dstMaterial.SetFloat("_Metallic", 1f); // Force _Metallic value to 1, to use the value stored in the mask map without modification
string maskMapPath = AssetDatabase.GetAssetPath(srcMaterial);
maskMapPath = maskMapPath.Remove(maskMapPath.Length-4) + "_MaskMap.png";
maskMap = maskMapCombiner.Combine( maskMapPath );

dstMaterial.SetFloat("_AlphaCutoffEnable", 0);
dstMaterial.SetFloat("_EnableBlendModePreserveSpecularLighting", 0);
break;
case 3: // Transparent -> Alpha
case 3: // Transparent -> Alpha
dstMaterial.SetFloat("_SurfaceType", 1);
dstMaterial.SetFloat("_BlendMode", 0);
dstMaterial.SetFloat("_AlphaCutoffEnable", 0);

// Emission: Convert the HDR emissive color to ldr color + intensity
Color hdrEmission = srcMaterial.GetColor("_EmissionColor");
float intensity = Mathf.Max(hdrEmission.r, Mathf.Max(hdrEmission.g, hdrEmission.b));
if (intensity > 1f)
{
hdrEmission.r /= intensity;

intensity = 1f;
intensity = Mathf.Pow(intensity, 2.2f); // Gamma to Linear conversion
}
}

12
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Material/Unlit/BaseUnlitUI.cs


protected const string kEnableMotionVectorForVertexAnimation = "_EnableMotionVectorForVertexAnimation";
protected const string kZTestDepthEqualForOpaque = "_ZTestDepthEqualForOpaque";
protected const string kZTestGBuffer = "_ZTestGBuffer";
protected const string kZTestModeDistortion = "_ZTestModeDistortion";
// See comment in LitProperties.hlsl

CoreUtils.SetKeyword(material, "_BLENDMODE_ALPHA", false);
CoreUtils.SetKeyword(material, "_BLENDMODE_ADD", false);
CoreUtils.SetKeyword(material, "_BLENDMODE_PRE_MULTIPLY", false);
// Alpha tested materials always have a prepass where we perform the clip.
// Then during Gbuffer pass we don't perform the clip test, so we need to use depth equal in this case.
if (alphaTestEnable)
{
material.SetInt(kZTestGBuffer, (int)UnityEngine.Rendering.CompareFunction.Equal);
}
else
{
material.SetInt(kZTestGBuffer, (int)UnityEngine.Rendering.CompareFunction.LessEqual);
}
// If the material use the kZTestDepthEqualForOpaque it mean it require depth equal test for opaque but transparent are not affected
if (material.HasProperty(kZTestDepthEqualForOpaque))

4
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Material/Unlit/UnlitsToHDUnlitUpgrader.cs


public class UnlitsToHDUnlitUpgrader : MaterialUpgrader
{
string Unlit_Color = "Unlit/Color";
string Unlit_Texture = "Unlit/Texture";
//string Unlit_Texture = "Unlit/Texture";
string Unlit_Transparent = "Unlit/Transparent";
string Unlit_Cutout = "Unlit/Transparent Cutout";

//dstMaterial.hideFlags = HideFlags.DontUnloadUnusedAsset;
base.Convert(srcMaterial, dstMaterial);
HDEditorUtils.ResetMaterialKeywords(dstMaterial);
}
}

28
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/RenderLoopSettings/FrameSettingsUI.cs


CED.FadeGroup(
(s, d, o, i) => s.isSectionExpandedUseForwardOnly,
FadeOption.None,
CED.Action(Drawer_FieldUseDepthPrepassWithDefferedRendering),
CED.FadeGroup(
(s, d, o, i) => s.isSectionExpandedUseDepthPrepass,
FadeOption.Indent,
CED.Action(Drawer_FieldRenderAlphaTestOnlyInDeferredPrepass))),
CED.Action(Drawer_FieldUseDepthPrepassWithDefferedRendering)
),
CED.Action(Drawer_SectionOtherRenderingSettings)
)
);

public AnimBool isSectionExpandedXRSettings { get { return m_AnimBools[3]; } }
public AnimBool isSectionExpandedXRSupported { get { return m_AnimBools[4]; } }
public AnimBool isSectionExpandedUseForwardOnly { get { return m_AnimBools[5]; } }
public AnimBool isSectionExpandedUseDepthPrepass { get { return m_AnimBools[6]; } }
public LightLoopSettingsUI lightLoopSettings = new LightLoopSettingsUI();

{
isSectionExpandedXRSupported.target = PlayerSettings.virtualRealitySupported;
isSectionExpandedUseForwardOnly.target = !data.enableForwardRenderingOnly.boolValue;
isSectionExpandedUseDepthPrepass.target = data.enableDepthPrepassWithDeferredRendering.boolValue;
lightLoopSettings.Update();
}

EditorGUILayout.PropertyField(p.enablePostprocess, _.GetContent("Enable Postprocess"));
}
static void Drawer_SectionRenderingSettings(FrameSettingsUI s, SerializedFrameSettings p, Editor owner)
{
EditorGUILayout.PropertyField(p.enableForwardRenderingOnly, _.GetContent("Enable Forward Rendering Only"));
EditorGUILayout.PropertyField(p.enableDepthPrepassWithDeferredRendering, _.GetContent("Enable Depth Prepass With Deferred Rendering"));
EditorGUILayout.PropertyField(p.enableAlphaTestOnlyInDeferredPrepass, _.GetContent("Enable Alpha Test Only In Deferred Prepass"));
EditorGUILayout.PropertyField(p.enableAsyncCompute, _.GetContent("Enable Async Compute"));
EditorGUILayout.PropertyField(p.enableOpaqueObjects, _.GetContent("Enable Opaque Objects"));
EditorGUILayout.PropertyField(p.enableTransparentObjects, _.GetContent("Enable Transparent Objects"));
EditorGUILayout.PropertyField(p.enableMSAA, _.GetContent("Enable MSAA"));
}
static void Drawer_FieldForwardRenderingOnly(FrameSettingsUI s, SerializedFrameSettings p, Editor owner)
{
EditorGUILayout.PropertyField(p.enableForwardRenderingOnly, _.GetContent("Enable Forward Rendering Only"));

{
EditorGUILayout.PropertyField(p.enableDepthPrepassWithDeferredRendering, _.GetContent("Enable Depth Prepass With Deferred Rendering"));
}
static void Drawer_FieldRenderAlphaTestOnlyInDeferredPrepass(FrameSettingsUI s, SerializedFrameSettings p, Editor owner)
{
EditorGUILayout.PropertyField(p.enableAlphaTestOnlyInDeferredPrepass, _.GetContent("Enable Alpha Test Only In Deferred Prepass"));
}
static void Drawer_SectionOtherRenderingSettings(FrameSettingsUI s, SerializedFrameSettings p, Editor owner)

9
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/RenderLoopSettings/LightLoopSettingsUI.cs


static void Drawer_SectionLightLoopSettings(LightLoopSettingsUI s, SerializedLightLoopSettings p, Editor owner)
{
EditorGUILayout.PropertyField(p.enableTileAndCluster, _.GetContent("Enable Tile And Cluster"));
// Uncomment if you re-enable LIGHTLOOP_SINGLE_PASS multi_compile in lit*.shader
//EditorGUILayout.PropertyField(p.enableTileAndCluster, _.GetContent("Enable Tile And Cluster"));
//EditorGUI.indentLevel++;
EditorGUI.indentLevel++;
EditorGUILayout.PropertyField(p.enableFptlForForwardOpaque, _.GetContent("Enable FPTL For Forward Opaque"));
EditorGUILayout.PropertyField(p.enableBigTilePrepass, _.GetContent("Enable Big Tile Prepass"));
EditorGUILayout.PropertyField(p.enableComputeLightEvaluation, _.GetContent("Enable Compute Light Evaluation"));

}
EditorGUILayout.EndFadeGroup();
GUILayout.EndVertical();
EditorGUI.indentLevel--;
//EditorGUI.indentLevel--;
}
}
}

2
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/RenderLoopSettings/RenderPipelineSettingsUI.cs


EditorGUILayout.PropertyField(d.supportForwardOnly, _.GetContent("Support Forward Only"));
EditorGUILayout.PropertyField(d.supportMotionVectors, _.GetContent("Support Motion Vectors"));
EditorGUILayout.PropertyField(d.supportStereo, _.GetContent("Support Stereo Rendering"));
EditorGUILayout.PropertyField(d.enableUltraQualitySSS, _.GetContent("Enable Ultra Quality SSS"));
EditorGUILayout.PropertyField(d.enableUltraQualitySSS, _.GetContent("Increase SSS Sample Count"));
--EditorGUI.indentLevel;
}
}

2
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/RenderLoopSettings/SerializedFrameSettings.cs


public SerializedProperty enableForwardRenderingOnly;
public SerializedProperty enableDepthPrepassWithDeferredRendering;
public SerializedProperty enableAlphaTestOnlyInDeferredPrepass;
public SerializedProperty enableTransparentPrepass;
public SerializedProperty enableMotionVectors;

specularGlobalDimmer = root.Find((FrameSettings d) => d.specularGlobalDimmer);
enableForwardRenderingOnly = root.Find((FrameSettings d) => d.enableForwardRenderingOnly);
enableDepthPrepassWithDeferredRendering = root.Find((FrameSettings d) => d.enableDepthPrepassWithDeferredRendering);
enableAlphaTestOnlyInDeferredPrepass = root.Find((FrameSettings d) => d.enableAlphaTestOnlyInDeferredPrepass);
enableTransparentPrepass = root.Find((FrameSettings d) => d.enableTransparentPrepass);
enableMotionVectors = root.Find((FrameSettings d) => d.enableMotionVectors);
enableObjectMotionVectors = root.Find((FrameSettings d) => d.enableObjectMotionVectors);

6
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Editor/Sky/HDRISky/HDRISkyEditor.cs


public class HDRISkyEditor
: SkySettingsEditor
{
SerializedDataParameter m_SkyHDRI;
SerializedDataParameter m_hdriSky;
public override void OnEnable()
{

m_SkyHDRI = Unpack(o.Find(x => x.skyHDRI));
m_hdriSky = Unpack(o.Find(x => x.hdriSky));
PropertyField(m_SkyHDRI);
PropertyField(m_hdriSky);
EditorGUILayout.Space();

101
ScriptableRenderPipeline/HDRenderPipeline/HDRP/HDRenderPipeline.cs


}
RenderStateBlock m_DepthStateOpaque;
RenderStateBlock m_DepthStateOpaqueWithPrepass;
// Detect when windows size is changing
int m_CurrentWidth;

InitializeDebugMaterials();
m_MaterialList.ForEach(material => material.Build(asset));
m_IBLFilterGGX = new IBLFilterGGX(asset.renderPipelineResources);

m_DebugDisplaySettings.RegisterDebug();
#if UNITY_EDITOR
// We don't need the debug of Default camera at runtime (each camera have its own debug settings)
FrameSettings.RegisterDebug("Default Camera", m_Asset.GetFrameSettings());
// We don't need the debug of Scene View at runtime (each camera have its own debug settings)
FrameSettings.RegisterDebug("Scene View", m_Asset.GetFrameSettings());
#endif
InitializeRenderTextures();

void SetRenderingFeatures()
{
// Set subshader pipeline tag
Shader.globalRenderPipeline = "HDRenderPipeline";
// HD use specific GraphicsSettings
GraphicsSettings.lightsUseLinearIntensity = true;
GraphicsSettings.lightsUseColorTemperature = true;

supportedLightmapsModes = LightmapsMode.NonDirectional | LightmapsMode.CombinedDirectional,
rendererSupportsLightProbeProxyVolumes = true,
rendererSupportsMotionVectors = true,
rendererSupportsReceiveShadows = true,
rendererSupportsReceiveShadows = false,
rendererSupportsReflectionProbes = true
};

#endif
}
void UnsetRenderingFeatures()
{
Shader.globalRenderPipeline = "";
SupportedRenderingFeatures.active = new SupportedRenderingFeatures();
Lightmapping.ResetDelegate();
}
void InitializeDebugMaterials()
{
m_DebugViewMaterialGBuffer = CoreUtils.CreateEngineMaterial(m_Asset.renderPipelineResources.debugViewMaterialGBufferShader);

depthState = new DepthState(true, CompareFunction.LessEqual),
mask = RenderStateMask.Depth
};
// When doing a prepass, we don't need to write the depth anymore.
// Moreover, we need to use DepthEqual because for alpha tested materials we don't do the clip in the shader anymore (otherwise HiZ does not work on PS4)
m_DepthStateOpaqueWithPrepass = new RenderStateBlock
{
depthState = new DepthState(false, CompareFunction.Equal),
mask = RenderStateMask.Depth
};
}
public void OnSceneLoad()

m_SSSBufferManager.Cleanup();
m_SkyManager.Cleanup();
m_VolumetricLightingModule.Cleanup();
m_IBLFilterGGX.Cleanup();
SupportedRenderingFeatures.active = new SupportedRenderingFeatures();
Lightmapping.ResetDelegate();
UnsetRenderingFeatures();
FrameSettings.UnRegisterDebug("Scene View");
#endif
}

}
}
bool IsConsolePlatform()
{
return SystemInfo.graphicsDeviceType == GraphicsDeviceType.PlayStation4 ||
SystemInfo.graphicsDeviceType == GraphicsDeviceType.XboxOne ||
SystemInfo.graphicsDeviceType == GraphicsDeviceType.XboxOneD3D12;
}
// For now we consider only PS4 to be able to read from a bound depth buffer.
// TODO: test/implement for other platforms.
return SystemInfo.graphicsDeviceType != GraphicsDeviceType.PlayStation4 &&
SystemInfo.graphicsDeviceType != GraphicsDeviceType.XboxOne &&
SystemInfo.graphicsDeviceType != GraphicsDeviceType.XboxOneD3D12;
// For now we consider all console to be able to read from a bound depth buffer.
return !IsConsolePlatform();
}
bool NeedStencilBufferCopy()

}
}
// Disable postprocess if we enable debug mode
if (m_CurrentDebugDisplaySettings.fullScreenDebugMode == FullScreenDebugMode.None && m_CurrentDebugDisplaySettings.IsDebugDisplayEnabled())
{
m_FrameSettings.enablePostprocess = false;
}
var postProcessLayer = camera.GetComponent<PostProcessLayer>();
var hdCamera = HDCamera.Get(camera, postProcessLayer, m_FrameSettings);

// RenderDepthPrepass render both opaque and opaque alpha tested based on engine configuration.
// Forward only renderer: We always render everything
// Deferred renderer: We render a depth prepass only if engine request it. We can decide if we render everything or only opaque alpha tested object.
// Deferred renderer: We always render depth prepass for alpha tested (optimization), other object are render based on engine configuration.
// Forward opaque with deferred renderer (DepthForwardOnly pass): We always render everything
void RenderDepthPrepass(CullResults cull, HDCamera hdCamera, ScriptableRenderContext renderContext, CommandBuffer cmd, bool forcePrepass)
{

// Guidelines: In deferred by default there is no opaque in forward. However it is possible to force an opaque material to render in forward
// by using the pass "ForwardOnly". In this case the .shader should not have "Forward" but only a "ForwardOnly" pass.
// It must also have a "DepthForwardOnly" and no "DepthOnly" pass as forward material (either deferred or forward only rendering) have always a depth pass.
// In case of forward only rendering we have a depth prepass. In case of deferred renderer, it is optional
bool addFullDepthPrepass = m_FrameSettings.enableForwardRenderingOnly || m_FrameSettings.enableDepthPrepassWithDeferredRendering;
bool addAlphaTestedOnly = !m_FrameSettings.enableForwardRenderingOnly && m_FrameSettings.enableDepthPrepassWithDeferredRendering && m_FrameSettings.enableAlphaTestOnlyInDeferredPrepass;
// If a forward material have no depth prepass, then lighting can be incorrect (deferred sahdowing, SSAO), this may be acceptable depends on usage
bool addFullDepthPrepass = forcePrepass || m_FrameSettings.enableForwardRenderingOnly || m_FrameSettings.enableDepthPrepassWithDeferredRendering;
using (new ProfilingSample(cmd, addAlphaTestedOnly ? "Depth Prepass alpha test" : "Depth Prepass", CustomSamplerId.DepthPrepass.GetSampler()))
using (new ProfilingSample(cmd, !addFullDepthPrepass ? "Depth Prepass alpha test" : "Depth Prepass", CustomSamplerId.DepthPrepass.GetSampler()))
if (forcePrepass || (addFullDepthPrepass && !addAlphaTestedOnly)) // Always true in case of forward rendering, use in case of deferred rendering if requesting a full depth prepass
if (addFullDepthPrepass) // Always true in case of forward rendering, use in case of deferred rendering if requesting a full depth prepass
{
// We render first the opaque object as opaque alpha tested are more costly to render and could be reject by early-z (but not Hi-z as it is disable with clip instruction)
// This is handled automatically with the RenderQueue value (OpaqueAlphaTested have a different value and thus are sorted after Opaque)

// We always do a DepthForwardOnly pass with all the opaque (including alpha test)
RenderOpaqueRenderList(cull, camera, renderContext, cmd, m_DepthForwardOnlyPassNames, 0, HDRenderQueue.k_RenderQueue_AllOpaque);
// Render Alpha test only if requested
if (addAlphaTestedOnly)
{
var renderQueueRange = new RenderQueueRange { min = (int)RenderQueue.AlphaTest, max = (int)RenderQueue.GeometryLast - 1 };
RenderOpaqueRenderList(cull, camera, renderContext, cmd, m_DepthOnlyPassNames, 0, renderQueueRange);
}
// Alpha tested materials always have a prepass.
var renderQueueRange = new RenderQueueRange { min = (int)RenderQueue.AlphaTest, max = (int)RenderQueue.GeometryLast - 1 };
RenderOpaqueRenderList(cull, camera, renderContext, cmd, m_DepthOnlyPassNames, 0, renderQueueRange);
}
}

{
// setup GBuffer for rendering
HDUtils.SetRenderTarget(cmd, hdCamera, m_GbufferManager.GetBuffersRTI(enableShadowMask), m_CameraDepthStencilBuffer);
// Render opaque objects into GBuffer
if (m_FrameSettings.enableDepthPrepassWithDeferredRendering)
{
// When using depth prepass for opaque alpha test only we need to use regular depth test for normal opaque objects.
RenderOpaqueRenderList(cull, camera, renderContext, cmd, HDShaderPassNames.s_GBufferName, m_currentRendererConfigurationBakedLighting, HDRenderQueue.k_RenderQueue_OpaqueNoAlphaTest, m_FrameSettings.enableAlphaTestOnlyInDeferredPrepass ? m_DepthStateOpaque : m_DepthStateOpaqueWithPrepass);
// but for opaque alpha tested object we use a depth equal and no depth write. And we rely on the shader pass GbufferWithDepthPrepass
RenderOpaqueRenderList(cull, camera, renderContext, cmd, HDShaderPassNames.s_GBufferWithPrepassName, m_currentRendererConfigurationBakedLighting, HDRenderQueue.k_RenderQueue_OpaqueAlphaTest, m_DepthStateOpaqueWithPrepass);
}
else
{
// No depth prepass, use regular depth test - Note that we will render opaque then opaque alpha tested (based on the RenderQueue system)
RenderOpaqueRenderList(cull, camera, renderContext, cmd, HDShaderPassNames.s_GBufferName, m_currentRendererConfigurationBakedLighting, HDRenderQueue.k_RenderQueue_AllOpaque, m_DepthStateOpaque);
}
RenderOpaqueRenderList(cull, camera, renderContext, cmd, HDShaderPassNames.s_GBufferName, m_currentRendererConfigurationBakedLighting, HDRenderQueue.k_RenderQueue_AllOpaque);
m_GbufferManager.BindBufferAsTextures(cmd);
}

Vector2 pyramidScale = m_BufferPyramid.GetPyramidToScreenScale(hdCamera);
PushFullScreenDebugTextureMip(cmd, m_BufferPyramid.depthPyramid, m_BufferPyramid.GetPyramidLodCount(hdCamera), new Vector4(pyramidScale.x, pyramidScale.y, 0.0f, 0.0f), hdCamera, debugMode);
cmd.SetGlobalTexture(HDShaderIDs._PyramidDepthTexture, m_BufferPyramid.depthPyramid);
}
void RenderPostProcess(HDCamera hdcamera, CommandBuffer cmd, PostProcessLayer layer)

RenderTargetIdentifier source = m_CameraColorBuffer;
#if UNITY_EDITOR
bool tempHACK = true;
#else
// In theory in the player the only place where we have post process is the main camera with the RTHandle reference size, so we won't need to copy.
bool tempHACK = false;
#endif
// For console we are not allowed to resize the windows, so don't use our hack.
bool tempHACK = !IsConsolePlatform();
if (tempHACK)
{
// TEMPORARY:

2
ScriptableRenderPipeline/HDRenderPipeline/HDRP/HDRenderPipelineAsset.asset


specularGlobalDimmer: 1
enableForwardRenderingOnly: 0
enableDepthPrepassWithDeferredRendering: 0
enableAlphaTestOnlyInDeferredPrepass: 0
enableTransparentPrepass: 1
enableMotionVectors: 1
enableObjectMotionVectors: 1

supportSSAO: 1
supportSubsurfaceScattering: 1
supportForwardOnly: 0
enableUltraQualitySSS: 0
supportDBuffer: 1
supportMSAA: 0
msaaSampleCount: 1

4
ScriptableRenderPipeline/HDRenderPipeline/HDRP/HDStringConstants.cs


public static readonly int _DensityVolumeIndexShift = Shader.PropertyToID("_DensityVolumeIndexShift");
public static readonly int g_isOrthographic = Shader.PropertyToID("g_isOrthographic");
public static readonly int g_iNrVisibLights = Shader.PropertyToID("g_iNrVisibLights");
public static readonly int g_mScrProjectionArr = Shader.PropertyToID("g_mScrProjectionArr");
public static readonly int g_mInvScrProjectionArr = Shader.PropertyToID("g_mInvScrProjectionArr");
public static readonly int g_iLog2NumClusters = Shader.PropertyToID("g_iLog2NumClusters");
public static readonly int g_screenSize = Shader.PropertyToID("g_screenSize");
public static readonly int g_iNumSamplesMSAA = Shader.PropertyToID("g_iNumSamplesMSAA");

1
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Deferred.shader


SubShader
{
Tags{ "RenderPipeline" = "HDRenderPipeline" }
Pass
{
Stencil

55
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Light/HDAdditionalLightData.cs


}
#endif
// Caution: this function must match the one in HDLightEditor.UpdateLightIntensity - any change need to be replicated
public void ConvertPhysicalLightIntensityToLightIntensity()
{
var light = gameObject.GetComponent<Light>();
if (lightTypeExtent == LightTypeExtent.Punctual)
{
switch (light.type)
{
case LightType.Directional:
light.intensity = directionalIntensity;
break;
case LightType.Point:
light.intensity = LightUtils.ConvertPointLightIntensity(punctualIntensity);
break;
case LightType.Spot:
// Spot should used conversion which take into account the angle, and thus the intensity vary with angle.
// This is not easy to manipulate for lighter, so we simply consider any spot light as just occluded point light. So reuse the same code.
light.intensity = LightUtils.ConvertPointLightIntensity(punctualIntensity);
// TODO: What to do with box shape ?
// var spotLightShape = (SpotLightShape)m_AdditionalspotLightShape.enumValueIndex;
break;
}
}
else if (lightTypeExtent == LightTypeExtent.Rectangle)
{
light.intensity = LightUtils.ConvertRectLightIntensity(areaIntensity, shapeWidth, shapeHeight);
}
else if (lightTypeExtent == LightTypeExtent.Line)
{
light.intensity = LightUtils.CalculateLineLightIntensity(areaIntensity, shapeWidth);
}
}
// As we have our own default value, we need to initialize the light intensity correctly
public static void InitDefaultHDAdditionalLightData(HDAdditionalLightData lightData)
{
// At first init we need to initialize correctly the default value
lightData.ConvertPhysicalLightIntensityToLightIntensity();
// Special treatment for Unity builtin area light. Change it to our rectangle light
var light = lightData.gameObject.GetComponent<Light>();
// Sanity check: lightData.lightTypeExtent is init to LightTypeExtent.Punctual (in case for unknow reasons we recreate additional data on an existing line)
if (light.type == LightType.Area && lightData.lightTypeExtent == LightTypeExtent.Punctual)
{
lightData.lightTypeExtent = LightTypeExtent.Rectangle;
light.type = LightType.Point; // Same as in HDLightEditor
}
}
}
}

263
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightDefinition.cs


public struct EnvLightData
{
// Packing order depends on chronological access to avoid cache misses
// Caution: The struct need to be align on byte16 (not strictly needed for structured buffer but if we do array later better).
public float capturePositionWSX;
public float capturePositionWSY;
public float capturePositionWSZ;
public Vector3 capturePositionWS;
public float proxyExtentsX;
public float proxyExtentsY;
public float proxyExtentsZ;
public Vector3 proxyExtents;
public float proxyPositionWSX;
public float proxyPositionWSY;
public float proxyPositionWSZ;
public float proxyForwardX;
public float proxyForwardY;
public float proxyForwardZ;
public float proxyUpX;
public float proxyUpY;
public float proxyUpZ;
public float proxyRightX;
public float proxyRightY;
public float proxyRightZ;
public Vector3 proxyPositionWS;
public Vector3 proxyForward;
public Vector3 proxyUp;
public Vector3 proxyRight;
public float influencePositionWSX;
public float influencePositionWSY;
public float influencePositionWSZ;
public float influenceForwardX;
public float influenceForwardY;
public float influenceForwardZ;
public float influenceUpX;
public float influenceUpY;
public float influenceUpZ;
public float influenceRightX;
public float influenceRightY;
public float influenceRightZ;
public Vector3 influencePositionWS;
public Vector3 influenceForward;
public Vector3 influenceUp;
public Vector3 influenceRight;
public float influenceExtentsX;
public float influenceExtentsY;
public float influenceExtentsZ;
public Vector3 influenceExtents;
public float blendDistancePositiveX;
public float blendDistancePositiveY;
public float blendDistancePositiveZ;
public float blendDistanceNegativeX;
public float blendDistanceNegativeY;
public float blendDistanceNegativeZ;
public float blendNormalDistancePositiveX;
public float blendNormalDistancePositiveY;
public float blendNormalDistancePositiveZ;
public float blendNormalDistanceNegativeX;
public float blendNormalDistanceNegativeY;
public float blendNormalDistanceNegativeZ;
public Vector3 blendDistancePositive;
public Vector3 blendDistanceNegative;
public Vector3 blendNormalDistancePositive;
public Vector3 blendNormalDistanceNegative;
public float boxSideFadePositiveX;
public float boxSideFadePositiveY;
public float boxSideFadePositiveZ;
public float boxSideFadeNegativeX;
public float boxSideFadeNegativeY;
public float boxSideFadeNegativeZ;
public float dimmer;
public float unused01;
public Vector3 boxSideFadePositive;
public Vector3 boxSideFadeNegative;
public float weight;
public float multiplier;
public float sampleDirectionDiscardWSX;
public float sampleDirectionDiscardWSY;
public float sampleDirectionDiscardWSZ;
public Vector3 sampleDirectionDiscardWS;
public Vector3 capturePositionWS
{
get { return new Vector3(capturePositionWSX, capturePositionWSY, capturePositionWSZ); }
set
{
capturePositionWSX = value.x;
capturePositionWSY = value.y;
capturePositionWSZ = value.z;
}
}
public Vector3 proxyExtents
{
get { return new Vector3(proxyExtentsX, proxyExtentsY, proxyExtentsZ); }
set
{
proxyExtentsX = value.x;
proxyExtentsY = value.y;
proxyExtentsZ = value.z;
}
}
public Vector3 proxyPositionWS
{
get { return new Vector3(proxyPositionWSX, proxyPositionWSY, proxyPositionWSZ); }
set
{
proxyPositionWSX = value.x;
proxyPositionWSY = value.y;
proxyPositionWSZ = value.z;
}
}
public Vector3 proxyForward
{
get { return new Vector3(proxyForwardX, proxyForwardY, proxyForwardZ); }
set
{
proxyForwardX = value.x;
proxyForwardY = value.y;
proxyForwardZ = value.z;
}
}
public Vector3 proxyUp
{
get { return new Vector3(proxyUpX, proxyUpY, proxyUpZ); }
set
{
proxyUpX = value.x;
proxyUpY = value.y;
proxyUpZ = value.z;
}
}
public Vector3 proxyRight
{
get { return new Vector3(proxyRightX, proxyRightY, proxyRightZ); }
set
{
proxyRightX = value.x;
proxyRightY = value.y;
proxyRightZ = value.z;
}
}
public Vector3 influenceExtents
{
get { return new Vector3(influenceExtentsX, influenceExtentsY, influenceExtentsZ); }
set
{
influenceExtentsX = value.x;
influenceExtentsY = value.y;
influenceExtentsZ = value.z;
}
}
public Vector3 influencePositionWS
{
get { return new Vector3(influencePositionWSX, influencePositionWSY, influencePositionWSZ); }
set
{
influencePositionWSX = value.x;
influencePositionWSY = value.y;
influencePositionWSZ = value.z;
}
}
public Vector3 influenceForward
{
get { return new Vector3(influenceForwardX, influenceForwardY, influenceForwardZ); }
set
{
influenceForwardX = value.x;
influenceForwardY = value.y;
influenceForwardZ = value.z;
}
}
public Vector3 influenceUp
{
get { return new Vector3(influenceUpX, influenceUpY, influenceUpZ); }
set
{
influenceUpX = value.x;
influenceUpY = value.y;
influenceUpZ = value.z;
}
}
public Vector3 influenceRight
{
get { return new Vector3(influenceRightX, influenceRightY, influenceRightZ); }
set
{
influenceRightX = value.x;
influenceRightY = value.y;
influenceRightZ = value.z;
}
}
public Vector3 blendDistancePositive
{
get { return new Vector3(blendDistancePositiveX, blendDistancePositiveY, blendDistancePositiveZ); }
set
{
blendDistancePositiveX = value.x;
blendDistancePositiveY = value.y;
blendDistancePositiveZ = value.z;
}
}
public Vector3 blendDistanceNegative
{
get { return new Vector3(blendDistanceNegativeX, blendDistanceNegativeY, blendDistanceNegativeZ); }
set
{
blendDistanceNegativeX = value.x;
blendDistanceNegativeY = value.y;
blendDistanceNegativeZ = value.z;
}
}
public Vector3 blendNormalDistancePositive
{
get { return new Vector3(blendNormalDistancePositiveX, blendNormalDistancePositiveY, blendNormalDistancePositiveZ); }
set
{
blendNormalDistancePositiveX = value.x;
blendNormalDistancePositiveY = value.y;
blendNormalDistancePositiveZ = value.z;
}
}
public Vector3 blendNormalDistanceNegative
{
get { return new Vector3(blendNormalDistanceNegativeX, blendNormalDistanceNegativeY, blendNormalDistanceNegativeZ); }
set
{
blendNormalDistanceNegativeX = value.x;
blendNormalDistanceNegativeY = value.y;
blendNormalDistanceNegativeZ = value.z;
}
}
public Vector3 boxSideFadePositive
{
get { return new Vector3(boxSideFadePositiveX, boxSideFadePositiveY, boxSideFadePositiveZ); }
set
{
boxSideFadePositiveX = value.x;
boxSideFadePositiveY = value.y;
boxSideFadePositiveZ = value.z;
}
}
public Vector3 boxSideFadeNegative
{
get { return new Vector3(boxSideFadeNegativeX, boxSideFadeNegativeY, boxSideFadeNegativeZ); }
set
{
boxSideFadeNegativeX = value.x;
boxSideFadeNegativeY = value.y;
boxSideFadeNegativeZ = value.z;
}
}
public Vector3 sampleDirectionDiscardWS
{
get { return new Vector3(sampleDirectionDiscardWSX, sampleDirectionDiscardWSY, sampleDirectionDiscardWSZ); }
set
{
sampleDirectionDiscardWSX = value.x;
sampleDirectionDiscardWSY = value.y;
sampleDirectionDiscardWSZ = value.z;
}
}
};
[GenerateHLSL]

301
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightDefinition.cs.hlsl


// PackingRules = Exact
struct EnvLightData
{
float capturePositionWSX;
float capturePositionWSY;
float capturePositionWSZ;
float3 capturePositionWS;
float proxyExtentsX;
float proxyExtentsY;
float proxyExtentsZ;
float3 proxyExtents;
float proxyPositionWSX;
float proxyPositionWSY;
float proxyPositionWSZ;
float proxyForwardX;
float proxyForwardY;
float proxyForwardZ;
float proxyUpX;
float proxyUpY;
float proxyUpZ;
float proxyRightX;
float proxyRightY;
float proxyRightZ;
float influencePositionWSX;
float influencePositionWSY;
float influencePositionWSZ;
float influenceForwardX;
float influenceForwardY;
float influenceForwardZ;
float influenceUpX;
float influenceUpY;
float influenceUpZ;
float influenceRightX;
float influenceRightY;
float influenceRightZ;
float influenceExtentsX;
float influenceExtentsY;
float influenceExtentsZ;
float3 proxyPositionWS;
float3 proxyForward;
float3 proxyUp;
float3 proxyRight;
float3 influencePositionWS;
float3 influenceForward;
float3 influenceUp;
float3 influenceRight;
float3 influenceExtents;
float blendDistancePositiveX;
float blendDistancePositiveY;
float blendDistancePositiveZ;
float blendDistanceNegativeX;
float blendDistanceNegativeY;
float blendDistanceNegativeZ;
float blendNormalDistancePositiveX;
float blendNormalDistancePositiveY;
float blendNormalDistancePositiveZ;
float blendNormalDistanceNegativeX;
float blendNormalDistanceNegativeY;
float blendNormalDistanceNegativeZ;
float boxSideFadePositiveX;
float boxSideFadePositiveY;
float boxSideFadePositiveZ;
float boxSideFadeNegativeX;
float boxSideFadeNegativeY;
float boxSideFadeNegativeZ;
float dimmer;
float unused01;
float sampleDirectionDiscardWSX;
float sampleDirectionDiscardWSY;
float sampleDirectionDiscardWSZ;
float3 blendDistancePositive;
float3 blendDistanceNegative;
float3 blendNormalDistancePositive;
float3 blendNormalDistanceNegative;
float3 boxSideFadePositive;
float3 boxSideFadeNegative;
float weight;
float multiplier;
float3 sampleDirectionDiscardWS;
int envIndex;
};

//
// Accessors for UnityEngine.Experimental.Rendering.HDPipeline.EnvLightData
//
float GetCapturePositionWSX(EnvLightData value)
{
return value.capturePositionWSX;
}
float GetCapturePositionWSY(EnvLightData value)
float3 GetCapturePositionWS(EnvLightData value)
return value.capturePositionWSY;
}
float GetCapturePositionWSZ(EnvLightData value)
{
return value.capturePositionWSZ;
return value.capturePositionWS;
float GetProxyExtentsX(EnvLightData value)
{
return value.proxyExtentsX;
}
float GetProxyExtentsY(EnvLightData value)
{
return value.proxyExtentsY;
}
float GetProxyExtentsZ(EnvLightData value)
float3 GetProxyExtents(EnvLightData value)
return value.proxyExtentsZ;
return value.proxyExtents;
float GetProxyPositionWSX(EnvLightData value)
float3 GetProxyPositionWS(EnvLightData value)
return value.proxyPositionWSX;
return value.proxyPositionWS;
float GetProxyPositionWSY(EnvLightData value)
float3 GetProxyForward(EnvLightData value)
return value.proxyPositionWSY;
return value.proxyForward;
float GetProxyPositionWSZ(EnvLightData value)
float3 GetProxyUp(EnvLightData value)
return value.proxyPositionWSZ;
return value.proxyUp;
float GetProxyForwardX(EnvLightData value)
float3 GetProxyRight(EnvLightData value)
return value.proxyForwardX;
return value.proxyRight;
float GetProxyForwardY(EnvLightData value)
float3 GetInfluencePositionWS(EnvLightData value)
return value.proxyForwardY;
return value.influencePositionWS;
float GetProxyForwardZ(EnvLightData value)
float3 GetInfluenceForward(EnvLightData value)
return value.proxyForwardZ;
return value.influenceForward;
float GetProxyUpX(EnvLightData value)
float3 GetInfluenceUp(EnvLightData value)
return value.proxyUpX;
return value.influenceUp;
float GetProxyUpY(EnvLightData value)
float3 GetInfluenceRight(EnvLightData value)
return value.proxyUpY;
return value.influenceRight;
float GetProxyUpZ(EnvLightData value)
{
return value.proxyUpZ;
}
float GetProxyRightX(EnvLightData value)
{
return value.proxyRightX;
}
float GetProxyRightY(EnvLightData value)
float3 GetInfluenceExtents(EnvLightData value)
return value.proxyRightY;
}
float GetProxyRightZ(EnvLightData value)
{
return value.proxyRightZ;
}
float GetInfluencePositionWSX(EnvLightData value)
{
return value.influencePositionWSX;
}
float GetInfluencePositionWSY(EnvLightData value)
{
return value.influencePositionWSY;
}
float GetInfluencePositionWSZ(EnvLightData value)
{
return value.influencePositionWSZ;
}
float GetInfluenceForwardX(EnvLightData value)
{
return value.influenceForwardX;
}
float GetInfluenceForwardY(EnvLightData value)
{
return value.influenceForwardY;
}
float GetInfluenceForwardZ(EnvLightData value)
{
return value.influenceForwardZ;
}
float GetInfluenceUpX(EnvLightData value)
{
return value.influenceUpX;
}
float GetInfluenceUpY(EnvLightData value)
{
return value.influenceUpY;
}
float GetInfluenceUpZ(EnvLightData value)
{
return value.influenceUpZ;
}
float GetInfluenceRightX(EnvLightData value)
{
return value.influenceRightX;
}
float GetInfluenceRightY(EnvLightData value)
{
return value.influenceRightY;
}
float GetInfluenceRightZ(EnvLightData value)
{
return value.influenceRightZ;
}
float GetInfluenceExtentsX(EnvLightData value)
{
return value.influenceExtentsX;
}
float GetInfluenceExtentsY(EnvLightData value)
{
return value.influenceExtentsY;
}
float GetInfluenceExtentsZ(EnvLightData value)
{
return value.influenceExtentsZ;
return value.influenceExtents;
float GetBlendDistancePositiveX(EnvLightData value)
{
return value.blendDistancePositiveX;
}
float GetBlendDistancePositiveY(EnvLightData value)
{
return value.blendDistancePositiveY;
}
float GetBlendDistancePositiveZ(EnvLightData value)
{
return value.blendDistancePositiveZ;
}
float GetBlendDistanceNegativeX(EnvLightData value)
{
return value.blendDistanceNegativeX;
}
float GetBlendDistanceNegativeY(EnvLightData value)
{
return value.blendDistanceNegativeY;
}
float GetBlendDistanceNegativeZ(EnvLightData value)
float3 GetBlendDistancePositive(EnvLightData value)
return value.blendDistanceNegativeZ;
return value.blendDistancePositive;
float GetBlendNormalDistancePositiveX(EnvLightData value)
float3 GetBlendDistanceNegative(EnvLightData value)
return value.blendNormalDistancePositiveX;
return value.blendDistanceNegative;
float GetBlendNormalDistancePositiveY(EnvLightData value)
float3 GetBlendNormalDistancePositive(EnvLightData value)
return value.blendNormalDistancePositiveY;
return value.blendNormalDistancePositive;
float GetBlendNormalDistancePositiveZ(EnvLightData value)
float3 GetBlendNormalDistanceNegative(EnvLightData value)
return value.blendNormalDistancePositiveZ;
return value.blendNormalDistanceNegative;
float GetBlendNormalDistanceNegativeX(EnvLightData value)
float3 GetBoxSideFadePositive(EnvLightData value)
return value.blendNormalDistanceNegativeX;
return value.boxSideFadePositive;
float GetBlendNormalDistanceNegativeY(EnvLightData value)
float3 GetBoxSideFadeNegative(EnvLightData value)
return value.blendNormalDistanceNegativeY;
return value.boxSideFadeNegative;
float GetBlendNormalDistanceNegativeZ(EnvLightData value)
float GetWeight(EnvLightData value)
return value.blendNormalDistanceNegativeZ;
return value.weight;
float GetBoxSideFadePositiveX(EnvLightData value)
float GetMultiplier(EnvLightData value)
return value.boxSideFadePositiveX;
return value.multiplier;
float GetBoxSideFadePositiveY(EnvLightData value)
float3 GetSampleDirectionDiscardWS(EnvLightData value)
return value.boxSideFadePositiveY;
}
float GetBoxSideFadePositiveZ(EnvLightData value)
{
return value.boxSideFadePositiveZ;
}
float GetBoxSideFadeNegativeX(EnvLightData value)
{
return value.boxSideFadeNegativeX;
}
float GetBoxSideFadeNegativeY(EnvLightData value)
{
return value.boxSideFadeNegativeY;
}
float GetBoxSideFadeNegativeZ(EnvLightData value)
{
return value.boxSideFadeNegativeZ;
}
float GetDimmer(EnvLightData value)
{
return value.dimmer;
}
float GetUnused01(EnvLightData value)
{
return value.unused01;
}
float GetSampleDirectionDiscardWSX(EnvLightData value)
{
return value.sampleDirectionDiscardWSX;
}
float GetSampleDirectionDiscardWSY(EnvLightData value)
{
return value.sampleDirectionDiscardWSY;
}
float GetSampleDirectionDiscardWSZ(EnvLightData value)
{
return value.sampleDirectionDiscardWSZ;
return value.sampleDirectionDiscardWS;
}
int GetEnvIndex(EnvLightData value)
{

#endif
#include "LightDefinition.cs.custom.hlsl"

16
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightLoop/ClusteredUtils.hlsl


return max(g_fClustBase, suggested_base);
}
uint GenerateLogBaseBufferIndex(uint2 tileIndex, uint numTilesX, uint numTilesY, uint eyeIndex)
{
uint eyeOffset = eyeIndex * numTilesX * numTilesY;
return (eyeOffset + (tileIndex.y * numTilesX) + tileIndex.x);
}
uint GenerateLayeredOffsetBufferIndex(uint lightCategory, uint2 tileIndex, uint clusterIndex, uint numTilesX, uint numTilesY, int numClusters, uint eyeIndex)
{
// Each eye is split into category, cluster, x, y
uint eyeOffset = eyeIndex * LIGHTCATEGORY_COUNT * numClusters * numTilesX * numTilesY;
int lightOffset = ((lightCategory * numClusters + clusterIndex) * numTilesY + tileIndex.y) * numTilesX + tileIndex.x;
return (eyeOffset + lightOffset);
}
#endif

157
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightLoop/LightLoop.cs


atlasInit.shadowClearShader = resources.shadowClearShader;
atlasInit.shadowBlurMoments = resources.shadowBlurMoments;
/*
// Code kept here for reference if we want to add VSM/MSM later on
varianceInit.baseInit.shadowmapFormat = ShadowVariance.GetFormat( false, false, true );
varianceInit.baseInit.shadowmapFormat = ShadowVariance.GetFormat(false, false, true);
varianceInit2.baseInit.shadowmapFormat = ShadowVariance.GetFormat( true, true, false );
varianceInit2.baseInit.shadowmapFormat = ShadowVariance.GetFormat(true, true, false);
varianceInit3.baseInit.shadowmapFormat = ShadowVariance.GetFormat( true, false, true );
varianceInit3.baseInit.shadowmapFormat = ShadowVariance.GetFormat(true, false, true);
m_Shadowmaps = new ShadowmapBase[] { new ShadowAtlas(ref atlasInit), new ShadowVariance(ref varianceInit), new ShadowVariance(ref varianceInit2), new ShadowVariance(ref varianceInit3) };
*/
m_Shadowmaps = new ShadowmapBase[] { new ShadowVariance(ref varianceInit), new ShadowVariance(ref varianceInit2), new ShadowVariance(ref varianceInit3), new ShadowAtlas(ref atlasInit) };
m_Shadowmaps = new ShadowmapBase[] { new ShadowAtlas(ref atlasInit) };
ShadowContext.SyncDel syncer = (ShadowContext sc) =>
{

cb.SetGlobalBuffer(HDShaderIDs._ShadowDatasExp, s_ShadowDataBuffer);
cb.SetGlobalBuffer(HDShaderIDs._ShadowPayloads, s_ShadowPayloadBuffer);
// bind textures
cb.SetGlobalTexture(HDShaderIDs._ShadowmapExp_VSM_0, tex[0]);
cb.SetGlobalTexture(HDShaderIDs._ShadowmapExp_VSM_1, tex[1]);
cb.SetGlobalTexture(HDShaderIDs._ShadowmapExp_VSM_2, tex[2]);
cb.SetGlobalTexture(HDShaderIDs._ShadowmapExp_PCF, tex[3]);
cb.SetGlobalTexture(HDShaderIDs._ShadowmapExp_PCF, tex[0]);
// Code kept here for reference if we want to add VSM/MSM later on
//cb.SetGlobalTexture(HDShaderIDs._ShadowmapExp_VSM_0, tex[1]);
//cb.SetGlobalTexture(HDShaderIDs._ShadowmapExp_VSM_1, tex[2]);
//cb.SetGlobalTexture(HDShaderIDs._ShadowmapExp_VSM_2, tex[3])
// TODO: Currently samplers are hard coded in ShadowContext.hlsl, so we can't really set them here
};

{
if (m_Shadowmaps != null)
{
(m_Shadowmaps[0] as ShadowAtlas).Dispose();
(m_Shadowmaps[1] as ShadowAtlas).Dispose();
(m_Shadowmaps[2] as ShadowAtlas).Dispose();
(m_Shadowmaps[3] as ShadowAtlas).Dispose();
foreach(var shadowMap in m_Shadowmaps)
{
(shadowMap as ShadowAtlas).Dispose();
}
m_Shadowmaps = null;
}
m_ShadowMgr = null;

public static readonly Vector3 k_BoxCullingExtentThreshold = Vector3.one * 0.01f;
// Static keyword is required here else we get a "DestroyBuffer can only be called from the main thread"
static ComputeBuffer s_DirectionalLightDatas = null;
static ComputeBuffer s_LightDatas = null;
static ComputeBuffer s_EnvLightDatas = null;
static ComputeBuffer s_shadowDatas = null;
static ComputeBuffer s_DecalDatas = null;
ComputeBuffer m_DirectionalLightDatas = null;
ComputeBuffer m_LightDatas = null;
ComputeBuffer m_EnvLightDatas = null;
ComputeBuffer m_shadowDatas = null;
ComputeBuffer m_DecalDatas = null;
static Texture2DArray s_DefaultTexture2DArray;
static Cubemap s_DefaultTextureCube;
Texture2DArray m_DefaultTexture2DArray;
Cubemap m_DefaultTextureCube;
PlanarReflectionProbeCache m_ReflectionPlanarProbeCache;
ReflectionProbeCache m_ReflectionProbeCache;

};
// clustered light list specific buffers and data end
static int[] s_TempIntArray = new int[2]; // Used to avoid GC stress when calling SetComputeIntParams
static int[] s_TempScreenDimArray = new int[2]; // Used to avoid GC stress when calling SetComputeIntParams
FrameSettings m_FrameSettings = null;
RenderPipelineResources m_Resources = null;

for (int i = 0, c = Mathf.Max(1, hdAsset.renderPipelineSettings.lightLoopSettings.maxPlanarReflectionProbes); i < c; ++i)
m_Env2DCaptureVP.Add(Matrix4x4.identity);
s_DirectionalLightDatas = new ComputeBuffer(k_MaxDirectionalLightsOnScreen, System.Runtime.InteropServices.Marshal.SizeOf(typeof(DirectionalLightData)));
s_LightDatas = new ComputeBuffer(k_MaxPunctualLightsOnScreen + k_MaxAreaLightsOnScreen, System.Runtime.InteropServices.Marshal.SizeOf(typeof(LightData)));
s_EnvLightDatas = new ComputeBuffer(k_MaxEnvLightsOnScreen, System.Runtime.InteropServices.Marshal.SizeOf(typeof(EnvLightData)));
s_shadowDatas = new ComputeBuffer(k_MaxCascadeCount + k_MaxShadowOnScreen, System.Runtime.InteropServices.Marshal.SizeOf(typeof(ShadowData)));
s_DecalDatas = new ComputeBuffer(k_MaxDecalsOnScreen, System.Runtime.InteropServices.Marshal.SizeOf(typeof(DecalData)));
m_DirectionalLightDatas = new ComputeBuffer(k_MaxDirectionalLightsOnScreen, System.Runtime.InteropServices.Marshal.SizeOf(typeof(DirectionalLightData)));
m_LightDatas = new ComputeBuffer(k_MaxPunctualLightsOnScreen + k_MaxAreaLightsOnScreen, System.Runtime.InteropServices.Marshal.SizeOf(typeof(LightData)));
m_EnvLightDatas = new ComputeBuffer(k_MaxEnvLightsOnScreen, System.Runtime.InteropServices.Marshal.SizeOf(typeof(EnvLightData)));
m_shadowDatas = new ComputeBuffer(k_MaxCascadeCount + k_MaxShadowOnScreen, System.Runtime.InteropServices.Marshal.SizeOf(typeof(ShadowData)));
m_DecalDatas = new ComputeBuffer(k_MaxDecalsOnScreen, System.Runtime.InteropServices.Marshal.SizeOf(typeof(DecalData)));
m_CookieTexArray = new TextureCache2D();
m_CookieTexArray = new TextureCache2D("Cookie");
m_CubeCookieTexArray = new TextureCacheCubemap();
m_CubeCookieTexArray = new TextureCacheCubemap("Cookie");
m_CubeCookieTexArray.AllocTextureArray(gLightLoopSettings.cubeCookieTexArraySize, gLightLoopSettings.pointCookieSize, TextureFormat.RGBA32, true, m_CubeToPanoMaterial);
TextureFormat probeCacheFormat = gLightLoopSettings.reflectionCacheCompressed ? TextureFormat.BC6H : TextureFormat.RGBAHalf;

int index = GetDeferredLightingMaterialIndex(outputSplitLighting, lightLoopTilePass, shadowMask, debugDisplay);
m_deferredLightingMaterial[index] = CoreUtils.CreateEngineMaterial(m_Resources.deferredShader);
m_deferredLightingMaterial[index].name = string.Format("{0}_{1}", m_Resources.deferredShader.name, index);
CoreUtils.SetKeyword(m_deferredLightingMaterial[index], "OUTPUT_SPLIT_LIGHTING", outputSplitLighting == 1);
CoreUtils.SelectKeyword(m_deferredLightingMaterial[index], "LIGHTLOOP_TILE_PASS", "LIGHTLOOP_SINGLE_PASS", lightLoopTilePass == 1);
CoreUtils.SetKeyword(m_deferredLightingMaterial[index], "SHADOWS_SHADOWMASK", shadowMask == 1);

}
}
s_DefaultTexture2DArray = new Texture2DArray(1, 1, 1, TextureFormat.ARGB32, false);
s_DefaultTexture2DArray.SetPixels32(new Color32[1] { new Color32(128, 128, 128, 128) }, 0);
s_DefaultTexture2DArray.Apply();
m_DefaultTexture2DArray = new Texture2DArray(1, 1, 1, TextureFormat.ARGB32, false);
m_DefaultTexture2DArray.hideFlags = HideFlags.HideAndDontSave;
m_DefaultTexture2DArray.name = CoreUtils.GetTextureAutoName(1, 1, TextureFormat.ARGB32, depth: 1, dim: TextureDimension.Tex2DArray, name: "LightLoopDefault");
m_DefaultTexture2DArray.SetPixels32(new Color32[1] { new Color32(128, 128, 128, 128) }, 0);
m_DefaultTexture2DArray.Apply();
s_DefaultTextureCube = new Cubemap(16, TextureFormat.ARGB32, false);
s_DefaultTextureCube.Apply();
m_DefaultTextureCube = new Cubemap(16, TextureFormat.ARGB32, false);
m_DefaultTextureCube.Apply();
InitShadowSystem(hdAsset, shadowSettings);
}

DeinitShadowSystem();
CoreUtils.SafeRelease(s_DirectionalLightDatas);
CoreUtils.SafeRelease(s_LightDatas);
CoreUtils.SafeRelease(s_EnvLightDatas);
CoreUtils.SafeRelease(s_shadowDatas);
CoreUtils.SafeRelease(s_DecalDatas);
CoreUtils.Destroy(m_DefaultTexture2DArray);
CoreUtils.Destroy(m_DefaultTextureCube);
CoreUtils.SafeRelease(m_DirectionalLightDatas);
CoreUtils.SafeRelease(m_LightDatas);
CoreUtils.SafeRelease(m_EnvLightDatas);
CoreUtils.SafeRelease(m_shadowDatas);
CoreUtils.SafeRelease(m_DecalDatas);
if (m_ReflectionProbeCache != null)
{

m_CurrentSunLightShadowIndex = shadowIdx;
}
// TODO: Currently m_maxShadowDistance is based on shadow settings, but this value is define for a whole level. We should be able to change this value during gameplay
float scale;
float bias;
GetScaleAndBiasForLinearDistanceFade(m_maxShadowDistance, out scale, out bias);

if (additionalshadowData)
{
float shadowDistanceFade = ComputeLinearDistanceFade(distanceToCamera, additionalshadowData.shadowFadeDistance);
float shadowDistanceFade = ComputeLinearDistanceFade(distanceToCamera, Mathf.Min(shadowSettings.maxShadowDistance, additionalshadowData.shadowFadeDistance));
lightData.shadowDimmer = additionalshadowData.shadowDimmer * shadowDistanceFade;
}
else

envLightData.influenceShapeType = probe.influenceShapeType;
envLightData.dimmer = probe.dimmer;
envLightData.weight = probe.weight;
envLightData.multiplier = probe.multiplier;
envLightData.influenceExtents = probe.influenceExtents;
envLightData.blendNormalDistancePositive = probe.blendNormalDistancePositive;
envLightData.blendNormalDistanceNegative = probe.blendNormalDistanceNegative;

{
// If any light require it, we need to enabled bake shadow mask feature
m_enableBakeShadowMask = false;
m_maxShadowDistance = shadowSettings.maxShadowDistance;
m_lightList.Clear();

UpdateDataBuffers();
m_maxShadowDistance = shadowSettings.maxShadowDistance;
return m_enableBakeShadowMask;
}
}

return (uint)logVolume << 20 | (uint)lightVolumeType << 17 | listType << 16 | ((uint)probeIndex & 0xFFFF);
}
void VoxelLightListGeneration(CommandBuffer cmd, HDCamera hdCamera, Matrix4x4 projscr, Matrix4x4 invProjscr, RenderTargetIdentifier cameraDepthBufferRT)
void VoxelLightListGeneration(CommandBuffer cmd, HDCamera hdCamera, Matrix4x4[] projscrArr, Matrix4x4[] invProjscrArr, RenderTargetIdentifier cameraDepthBufferRT)
{
Camera camera = hdCamera.camera;
// clear atomic offset index

cmd.SetComputeIntParam(buildPerVoxelLightListShader, HDShaderIDs._DecalIndexShift, m_lightList.lights.Count + m_lightList.envLights.Count);
cmd.SetComputeIntParam(buildPerVoxelLightListShader, HDShaderIDs._DensityVolumeIndexShift, m_lightList.lights.Count + m_lightList.envLights.Count + decalDatasCount);
cmd.SetComputeIntParam(buildPerVoxelLightListShader, HDShaderIDs.g_iNrVisibLights, m_lightCount);
cmd.SetComputeMatrixParam(buildPerVoxelLightListShader, HDShaderIDs.g_mScrProjection, projscr);
cmd.SetComputeMatrixParam(buildPerVoxelLightListShader, HDShaderIDs.g_mInvScrProjection, invProjscr);
cmd.SetComputeMatrixArrayParam(buildPerVoxelLightListShader, HDShaderIDs.g_mScrProjectionArr, projscrArr);
cmd.SetComputeMatrixArrayParam(buildPerVoxelLightListShader, HDShaderIDs.g_mInvScrProjectionArr, invProjscrArr);
cmd.SetComputeIntParam(buildPerVoxelLightListShader, HDShaderIDs.g_iLog2NumClusters, k_Log2NumClusters);

var numTilesX = GetNumTileClusteredX(hdCamera);
var numTilesY = GetNumTileClusteredY(hdCamera);
cmd.DispatchCompute(buildPerVoxelLightListShader, s_GenListPerVoxelKernel, numTilesX, numTilesY, 1);
int numEyes = m_FrameSettings.enableStereo ? 2 : 1;
//cmd.DispatchCompute(buildPerVoxelLightListShader, s_GenListPerVoxelKernel, numTilesX, numTilesY, 1);
cmd.DispatchCompute(buildPerVoxelLightListShader, s_GenListPerVoxelKernel, numTilesX, numTilesY, numEyes);
}
public void BuildGPULightListsCommon(HDCamera hdCamera, CommandBuffer cmd, RenderTargetIdentifier cameraDepthBufferRT, RenderTargetIdentifier stencilTextureRT, bool skyEnabled)

var w = (int)hdCamera.screenSize.x;
var h = (int)hdCamera.screenSize.y;
s_TempIntArray[0] = w;
s_TempIntArray[1] = h;
s_TempScreenDimArray[0] = w;
s_TempScreenDimArray[1] = h;
var numBigTilesX = (w + 63) / 64;
var numBigTilesY = (h + 63) / 64;

var invProjscrArr = new Matrix4x4[2];
if (m_FrameSettings.enableStereo)
{
// XRTODO: If possible, we could generate a non-oblique stereo projection
// matrix. It's ok if it's not the exact same matrix, as long as it encompasses
// the same FOV as the original projection matrix (which would mean padding each half
// of the frustum with the max half-angle). We don't need the light information in
// real projection space. We just use screen space to figure out what is proximal
// to a cluster or tile.
// Once we generate this non-oblique projection matrix, it can be shared across both eyes (un-array)
for (int eyeIndex = 0; eyeIndex < 2; eyeIndex++)
{
projArr[eyeIndex] = CameraProjectionStereoLHS(hdCamera.camera, (Camera.StereoscopicEye)eyeIndex);

// enable coarse 2D pass on 64x64 tiles (used for both fptl and clustered).
if (m_FrameSettings.lightLoopSettings.enableBigTilePrepass)
{
cmd.SetComputeIntParam(buildPerBigTileLightListShader, HDShaderIDs.g_iNrVisibLights, m_lightCount);
cmd.SetComputeIntParams(buildPerBigTileLightListShader, HDShaderIDs.g_viDimensions, s_TempIntArray);
cmd.SetComputeIntParams(buildPerBigTileLightListShader, HDShaderIDs.g_viDimensions, s_TempScreenDimArray);
// TODO: These two aren't actually used...
cmd.SetComputeIntParam(buildPerBigTileLightListShader, HDShaderIDs.g_iNrVisibLights, m_lightCount);
cmd.SetComputeMatrixParam(buildPerBigTileLightListShader, HDShaderIDs.g_mScrProjection, projscrArr[0]);
cmd.SetComputeMatrixParam(buildPerBigTileLightListShader, HDShaderIDs.g_mInvScrProjection, invProjscrArr[0]);
cmd.SetComputeMatrixArrayParam(buildPerBigTileLightListShader, HDShaderIDs.g_mScrProjectionArr, projscrArr);
cmd.SetComputeMatrixArrayParam(buildPerBigTileLightListShader, HDShaderIDs.g_mInvScrProjectionArr, invProjscrArr);
cmd.SetComputeFloatParam(buildPerBigTileLightListShader, HDShaderIDs.g_fNearPlane, camera.nearClipPlane);
cmd.SetComputeFloatParam(buildPerBigTileLightListShader, HDShaderIDs.g_fFarPlane, camera.farClipPlane);

cmd.SetComputeBufferParam(buildPerBigTileLightListShader, s_GenListPerBigTileKernel, HDShaderIDs.g_data, s_ConvexBoundsBuffer);
cmd.DispatchCompute(buildPerBigTileLightListShader, s_GenListPerBigTileKernel, numBigTilesX, numBigTilesY, 1);
int tgZ = m_FrameSettings.enableStereo ? 2 : 1;
cmd.DispatchCompute(buildPerBigTileLightListShader, s_GenListPerBigTileKernel, numBigTilesX, numBigTilesY, tgZ);
}
var numTilesX = GetNumTileFtplX(hdCamera);

if (m_FrameSettings.lightLoopSettings.isFptlEnabled)
{
cmd.SetComputeIntParam(buildPerTileLightListShader, HDShaderIDs.g_isOrthographic, isOrthographic ? 1 : 0);
cmd.SetComputeIntParams(buildPerTileLightListShader, HDShaderIDs.g_viDimensions, s_TempIntArray);
cmd.SetComputeIntParams(buildPerTileLightListShader, HDShaderIDs.g_viDimensions, s_TempScreenDimArray);
cmd.SetComputeIntParam(buildPerTileLightListShader, HDShaderIDs._EnvLightIndexShift, m_lightList.lights.Count);
cmd.SetComputeIntParam(buildPerTileLightListShader, HDShaderIDs._DecalIndexShift, m_lightList.lights.Count + m_lightList.envLights.Count);
cmd.SetComputeIntParam(buildPerTileLightListShader, HDShaderIDs.g_iNrVisibLights, m_lightCount);

}
// Cluster
VoxelLightListGeneration(cmd, hdCamera, projscrArr[0], invProjscrArr[0], cameraDepthBufferRT);
VoxelLightListGeneration(cmd, hdCamera, projscrArr, invProjscrArr, cameraDepthBufferRT);
if (enableFeatureVariants)
{

}
cmd.SetComputeIntParam(buildMaterialFlagsShader, HDShaderIDs.g_BaseFeatureFlags, (int)baseFeatureFlags);
cmd.SetComputeIntParams(buildMaterialFlagsShader, HDShaderIDs.g_viDimensions, s_TempIntArray);
cmd.SetComputeIntParams(buildMaterialFlagsShader, HDShaderIDs.g_viDimensions, s_TempScreenDimArray);
cmd.SetComputeBufferParam(buildMaterialFlagsShader, buildMaterialFlagsKernel, HDShaderIDs.g_TileFeatureFlags, s_TileFeatureFlags);
cmd.SetComputeTextureParam(buildMaterialFlagsShader, buildMaterialFlagsKernel, HDShaderIDs._StencilTexture, stencilTextureRT);

void UpdateDataBuffers()
{
s_DirectionalLightDatas.SetData(m_lightList.directionalLights);
s_LightDatas.SetData(m_lightList.lights);
s_EnvLightDatas.SetData(m_lightList.envLights);
s_shadowDatas.SetData(m_lightList.shadows);
s_DecalDatas.SetData(DecalSystem.m_DecalDatas, 0, 0, Math.Min(DecalSystem.m_DecalDatasCount, k_MaxDecalsOnScreen)); // don't add more than the size of the buffer
m_DirectionalLightDatas.SetData(m_lightList.directionalLights);
m_LightDatas.SetData(m_lightList.lights);
m_EnvLightDatas.SetData(m_lightList.envLights);
m_shadowDatas.SetData(m_lightList.shadows);
m_DecalDatas.SetData(DecalSystem.m_DecalDatas, 0, 0, Math.Min(DecalSystem.m_DecalDatasCount, k_MaxDecalsOnScreen)); // don't add more than the size of the buffer
// These two buffers have been set in Rebuild()
s_ConvexBoundsBuffer.SetData(m_lightList.bounds);

cmd.SetGlobalTexture(HDShaderIDs._Env2DTextures, m_ReflectionPlanarProbeCache.GetTexCache());
cmd.SetGlobalMatrixArray(HDShaderIDs._Env2DCaptureVP, m_Env2DCaptureVP);
cmd.SetGlobalBuffer(HDShaderIDs._DirectionalLightDatas, s_DirectionalLightDatas);
cmd.SetGlobalBuffer(HDShaderIDs._DirectionalLightDatas, m_DirectionalLightDatas);
cmd.SetGlobalBuffer(HDShaderIDs._LightDatas, s_LightDatas);
cmd.SetGlobalBuffer(HDShaderIDs._LightDatas, m_LightDatas);
cmd.SetGlobalBuffer(HDShaderIDs._EnvLightDatas, s_EnvLightDatas);
cmd.SetGlobalBuffer(HDShaderIDs._EnvLightDatas, m_EnvLightDatas);
cmd.SetGlobalBuffer(HDShaderIDs._DecalDatas, s_DecalDatas);
cmd.SetGlobalBuffer(HDShaderIDs._DecalDatas, m_DecalDatas);
cmd.SetGlobalBuffer(HDShaderIDs._ShadowDatas, s_shadowDatas);
cmd.SetGlobalBuffer(HDShaderIDs._ShadowDatas, m_shadowDatas);
cmd.SetGlobalInt(HDShaderIDs._NumTileFtplX, GetNumTileFtplX(hdCamera));
cmd.SetGlobalInt(HDShaderIDs._NumTileFtplY, GetNumTileFtplY(hdCamera));

14
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightLoop/LightLoopDef.hlsl


// these uniforms are only needed for when OPAQUES_ONLY is NOT defined
// but there's a problem with our front-end compilation of compute shaders with multiple kernels causing it to error
//#ifdef USE_CLUSTERED_LIGHTLIST
float4x4 g_mInvScrProjection;
float4x4 g_mInvScrProjection; // TODO: remove, unused in HDRP
float g_fClustScale;
float g_fClustBase;

TEXTURE2D_ARRAY(_Env2DTextures);
float4x4 _Env2DCaptureVP[MAX_ENV2D_LIGHT];
// XRTODO: Need to stereo-ize access
TEXTURE2D(_DeferredShadowTexture);
CBUFFER_START(UnityPerLightLoop)

float logBase = g_fClustBase;
if (g_isLogBaseBufferEnabled)
{
// XRTODO: Stereo-ize access to g_logBaseBuffer
logBase = g_logBaseBuffer[tileIndex.y * _NumTileClusteredX + tileIndex.x];
}

float logBase = g_fClustBase;
if (g_isLogBaseBufferEnabled)
{
logBase = g_logBaseBuffer[tileIndex.y * _NumTileClusteredX + tileIndex.x];
const uint logBaseIndex = GenerateLogBaseBufferIndex(tileIndex, _NumTileClusteredX, _NumTileClusteredY, unity_StereoEyeIndex);
logBase = g_logBaseBuffer[logBaseIndex];
}
return SnapToClusterIdxFlex(linearDepth, logBase, g_isLogBaseBufferEnabled != 0);

{
int nrClusters = (1 << g_iLog2NumClusters);
const int idx = ((lightCategory * nrClusters + clusterIndex) * _NumTileClusteredY + tileIndex.y) * _NumTileClusteredX + tileIndex.x;
const int idx = GenerateLayeredOffsetBufferIndex(lightCategory, tileIndex, clusterIndex, _NumTileClusteredX, _NumTileClusteredY, nrClusters, unity_StereoEyeIndex);
uint dataPair = g_vLayeredOffsetsBuffer[idx];
start = dataPair & 0x7ffffff;
lightCount = (dataPair >> 27) & 31;

{
// Note: XR depends on unity_StereoEyeIndex already being defined,
// which means ShaderVariables.hlsl needs to be defined ahead of this!
uint2 tileIndex = posInput.tileCoord;
uint clusterIndex = GetLightClusterIndex(tileIndex, posInput.linearDepth);

40
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightLoop/LightLoopSettings.cs


using System;
using System.Collections.Generic;
namespace UnityEngine.Experimental.Rendering.HDPipeline
{

// Setup by system
public bool isFptlEnabled = true;
static DebugUI.Widget[] s_DebugEntries;
public void CopyTo(LightLoopSettings lightLoopSettings)
{

aggregate.isFptlEnabled = !aggregateFrameSettings.enableForwardRenderingOnly || aggregate.enableFptlForForwardOpaque;
}
public static void RegisterDebug(string menuName, LightLoopSettings lightLoopSettings)
public static void RegisterDebug(LightLoopSettings lightLoopSettings, List<DebugUI.Widget> widgets)
s_DebugEntries = new DebugUI.Widget[]
widgets.AddRange(new []
new DebugUI.BoolField { displayName = "Enable Fptl for Forward Opaque", getter = () => lightLoopSettings.enableFptlForForwardOpaque, setter = value => lightLoopSettings.enableFptlForForwardOpaque = value },
new DebugUI.BoolField { displayName = "Enable Tile/Cluster", getter = () => lightLoopSettings.enableTileAndCluster, setter = value => lightLoopSettings.enableTileAndCluster = value },
new DebugUI.BoolField { displayName = "Enable Big Tile", getter = () => lightLoopSettings.enableBigTilePrepass, setter = value => lightLoopSettings.enableBigTilePrepass = value },
new DebugUI.BoolField { displayName = "Enable Compute Lighting", getter = () => lightLoopSettings.enableComputeLightEvaluation, setter = value => lightLoopSettings.enableComputeLightEvaluation = value },
new DebugUI.BoolField { displayName = "Enable Light Classification", getter = () => lightLoopSettings.enableComputeLightVariants, setter = value => lightLoopSettings.enableComputeLightVariants = value },
new DebugUI.BoolField { displayName = "Enable Material Classification", getter = () => lightLoopSettings.enableComputeMaterialVariants, setter = value => lightLoopSettings.enableComputeMaterialVariants = value }
};
var panel = DebugManager.instance.GetPanel(menuName, true);
panel.children.Add(s_DebugEntries);
}
public static void UnRegisterDebug(string menuName)
{
var panel = DebugManager.instance.GetPanel(menuName);
if (panel != null)
panel.children.Remove(s_DebugEntries);
new DebugUI.Container
{
displayName = "Lighting Settings",
children =
{
// Uncomment if you re-enable LIGHTLOOP_SINGLE_PASS multi_compile in lit*.shader
//new DebugUI.BoolField { displayName = "Enable Tile/Cluster", getter = () => lightLoopSettings.enableTileAndCluster, setter = value => lightLoopSettings.enableTileAndCluster = value },
new DebugUI.BoolField { displayName = "Enable Fptl for Forward Opaque", getter = () => lightLoopSettings.enableFptlForForwardOpaque, setter = value => lightLoopSettings.enableFptlForForwardOpaque = value },
new DebugUI.BoolField { displayName = "Enable Big Tile", getter = () => lightLoopSettings.enableBigTilePrepass, setter = value => lightLoopSettings.enableBigTilePrepass = value },
new DebugUI.BoolField { displayName = "Enable Compute Lighting", getter = () => lightLoopSettings.enableComputeLightEvaluation, setter = value => lightLoopSettings.enableComputeLightEvaluation = value },
new DebugUI.BoolField { displayName = "Enable Light Classification", getter = () => lightLoopSettings.enableComputeLightVariants, setter = value => lightLoopSettings.enableComputeLightVariants = value },
new DebugUI.BoolField { displayName = "Enable Material Classification", getter = () => lightLoopSettings.enableComputeMaterialVariants, setter = value => lightLoopSettings.enableComputeMaterialVariants = value }
}
}
});
}
}
}

25
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightLoop/Shadow.hlsl


//#define SHADOW_DISPATCH_USE_SEPARATE_PUNC_ALGOS // enables separate resources and algorithms for spot and point lights
// directional
#define SHADOW_DISPATCH_DIR_TEX 3
#define SHADOW_DISPATCH_DIR_TEX 0
#define SHADOW_DISPATCH_DIR_SMP 0
#define SHADOW_DISPATCH_DIR_ALG GPUSHADOWALGORITHM_PCF_TENT_5X5 // all cascades
#define SHADOW_DISPATCH_DIR_ALG_0 GPUSHADOWALGORITHM_PCF_TENT_7X7 // 1st cascade

// point
#define SHADOW_DISPATCH_POINT_TEX 3
#define SHADOW_DISPATCH_POINT_TEX 0
#define SHADOW_DISPATCH_SPOT_TEX 3
#define SHADOW_DISPATCH_SPOT_TEX 0
#define SHADOW_DISPATCH_PUNC_TEX 3
#define SHADOW_DISPATCH_PUNC_TEX 0
#define SHADOW_DISPATCH_PUNC_SMP 0
#define SHADOW_DISPATCH_PUNC_ALG GPUSHADOWALGORITHM_PCF_TENT_3X3

{
return GetDirectionalShadowAttenuation( shadowContext, positionWS, normalWS, shadowDataIndex, L );
}
float3 GetDirectionalShadowClosestSample( ShadowContext shadowContext, real3 positionWS, real3 normalWS, int index, real4 L )
{
return EvalShadow_GetClosestSample_Cascade( shadowContext, shadowContext.tex2DArray[SHADOW_DISPATCH_DIR_TEX], positionWS, normalWS, index, L );
}
#endif

{
return GetPunctualShadowAttenuation( shadowContext, positionWS, normalWS, shadowDataIndex, L, L_dist );
}
float3 GetPunctualShadowClosestSample( ShadowContext shadowContext, real3 positionWS, int index, real3 L )
{
return EvalShadow_GetClosestSample_Punctual( shadowContext, shadowContext.tex2DArray[SHADOW_DISPATCH_PUNC_TEX], positionWS, index, L );
}
float GetPunctualShadowClosestDistance( ShadowContext shadowContext, SamplerState sampl, real3 positionWS, int index, float3 L, float3 lightPositionWS)
{
return EvalShadow_SampleClosestDistance_Punctual( shadowContext, shadowContext.tex2DArray[SHADOW_DISPATCH_PUNC_TEX], sampl, positionWS, index, L, lightPositionWS );
}
#endif
// cleanup the defines

22
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightLoop/ShadowContext.hlsl


#ifndef LIGHTLOOP_SHADOW_CONTEXT_HLSL
#define LIGHTLOOP_SHADOW_CONTEXT_HLSL
#define SHADOWCONTEXT_MAX_TEX2DARRAY 4
#define SHADOWCONTEXT_MAX_TEX2DARRAY 1
#define SHADOWCONTEXT_MAX_SAMPLER 3
#define SHADOWCONTEXT_MAX_SAMPLER 0
#if SHADOWCONTEXT_MAX_TEX2DARRAY == 4
TEXTURE2D_ARRAY(_ShadowmapExp_VSM_0);
SAMPLER(sampler_ShadowmapExp_VSM_0);

TEXTURE2D_ARRAY(_ShadowmapExp_VSM_2);
SAMPLER(sampler_ShadowmapExp_VSM_2);
#endif
TEXTURE2D_ARRAY(_ShadowmapExp_PCF);
SAMPLER_CMP(sampler_ShadowmapExp_PCF);

// Currently we only use the PCF atlas.
// Keeping all other bindings for reference and for future PC dynamic shadow configuration as it's harmless anyway.
sc.tex2DArray[0] = _ShadowmapExp_VSM_0;
sc.tex2DArray[1] = _ShadowmapExp_VSM_1;
sc.tex2DArray[2] = _ShadowmapExp_VSM_2;
sc.tex2DArray[3] = _ShadowmapExp_PCF;
sc.tex2DArray[0] = _ShadowmapExp_PCF;
sc.compSamplers[0] = sampler_ShadowmapExp_PCF;
#if SHADOWCONTEXT_MAX_TEX2DARRAY == 4
sc.tex2DArray[1] = _ShadowmapExp_VSM_0;
sc.tex2DArray[2] = _ShadowmapExp_VSM_1;
sc.tex2DArray[3] = _ShadowmapExp_VSM_2;
#endif
#if SHADOWCONTEXT_MAX_SAMPLER == 3
sc.compSamplers[0] = sampler_ShadowmapExp_PCF;
#endif
return sc;
}

71
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightLoop/lightlistbuild-bigtile.compute


#include "LightLoop.cs.hlsl"
#include "LightingConvexHullUtils.hlsl"
#include "SortingComputeUtils.hlsl"
#include "LightCullUtils.hlsl"
#pragma only_renderers d3d11 ps4 xboxone vulkan metal

uniform int g_iNrVisibLights;
uniform uint2 g_viDimensions;
uniform float4x4 g_mInvScrProjection;
uniform float4x4 g_mScrProjection;
uniform float4x4 g_mInvScrProjectionArr[2];
uniform float4x4 g_mScrProjectionArr[2];
uniform float g_fNearPlane;
uniform float g_fFarPlane;
uniform uint g_isOrthographic;

groupshared unsigned int lightsListLDS[MAX_NR_BIG_TILE_LIGHTS_PLUS_ONE];
groupshared uint lightOffs;
// TODO: Remove this function and g_mInvScrProjectionArr from constants.
// Only usage of that constant.
float4x4 g_mInvScrProjection = g_mInvScrProjectionArr[0];
// for perspective projection m22 is zero and m23 is +1/-1 (depends on left/right hand proj)
// however this function must also work for orthographic projection so we keep it like this.
float m22 = g_mInvScrProjection[2].z, m23 = g_mInvScrProjection[2].w;

//return v4Pres.z / v4Pres.w;
}
float3 GetViewPosFromLinDepth(float2 v2ScrPos, float fLinDepth)
float3 GetViewPosFromLinDepth(float2 v2ScrPos, float fLinDepth, uint eyeIndex)
float4x4 g_mScrProjection = g_mScrProjectionArr[eyeIndex];
bool isOrthographic = g_isOrthographic!=0;
float fSx = g_mScrProjection[0].x;
float fSy = g_mScrProjection[1].y;

return float3(isOrthographic ? p.xy : (fLinDepth*p.xy), fLinDepth);
}
float GetOnePixDiagWorldDistAtDepthOne()
float GetOnePixDiagWorldDistAtDepthOne(uint eyeIndex)
float4x4 g_mScrProjection = g_mScrProjectionArr[eyeIndex];
float fSx = g_mScrProjection[0].x;
float fSy = g_mScrProjection[1].y;

#ifdef PERFORM_SPHERICAL_INTERSECTION_TESTS
void SphericalIntersectionTests(uint threadID, int iNrCoarseLights, float2 screenCoordinate);
void SphericalIntersectionTests(uint threadID, int iNrCoarseLights, float2 screenCoordinate, uint eyeIndex);
void CullByExactEdgeTests(uint threadID, int iNrCoarseLights, uint2 viTilLL, uint2 viTilUR);
void CullByExactEdgeTests(uint threadID, int iNrCoarseLights, uint2 viTilLL, uint2 viTilUR, uint eyeIndex);
#endif

void BigTileLightListGen(uint threadID : SV_GroupIndex, uint3 u3GroupID : SV_GroupID)
{
uint eyeIndex = u3GroupID.z;
uint2 tileIDX = u3GroupID.xy;
uint t=threadID;

GroupMemoryBarrierWithGroupSync();
#endif
// Raw pixel coordinates of tile
// 'Normalized' coordinates of tile, for use with AABB bounds in g_vBoundsBuffer
float2 vTileLL = float2(viTilLL.x/(float) iWidth, viTilLL.y/(float) iHeight);
float2 vTileUR = float2(viTilUR.x/(float) iWidth, viTilUR.y/(float) iHeight);

const float2 vMi = g_vBoundsBuffer[l].xy;
const float2 vMa = g_vBoundsBuffer[l+g_iNrVisibLights].xy;
const ScreenSpaceBoundsIndices boundsIndices = GenerateScreenSpaceBoundsIndices(l, g_iNrVisibLights, eyeIndex);
const float2 vMi = g_vBoundsBuffer[boundsIndices.min].xy;
const float2 vMa = g_vBoundsBuffer[boundsIndices.max].xy;
if( all(vMa>vTileLL) && all(vMi<vTileUR))
{

if(uIndex<MAX_NR_BIGTILE_LIGHTS) lightsListLDS[uIndex] = l; // add to light list
}
}

int iNrCoarseLights = min(lightOffs,MAX_NR_BIGTILE_LIGHTS);
#ifdef PERFORM_SPHERICAL_INTERSECTION_TESTS
SphericalIntersectionTests( t, iNrCoarseLights, float2(min(viTilLL.xy+uint2(64/2,64/2), uint2(iWidth-1, iHeight-1))) );
SphericalIntersectionTests( t, iNrCoarseLights, float2(min(viTilLL.xy+uint2(64/2,64/2), uint2(iWidth-1, iHeight-1))), eyeIndex );
CullByExactEdgeTests(t, iNrCoarseLights, viTilLL.xy, viTilUR.xy);
CullByExactEdgeTests(t, iNrCoarseLights, viTilLL.xy, viTilUR.xy, eyeIndex);
#endif

GroupMemoryBarrierWithGroupSync();
iNrCoarseLights = lightOffs;
int offs = tileIDX.y*nrBigTilesX + tileIDX.x;
int offs = tileIDX.y*nrBigTilesX + tileIDX.x + (eyeIndex * nrBigTilesX * nrBigTilesY);
for(i=t; i<(iNrCoarseLights+1); i+=NR_THREADS)
g_vLightList[MAX_NR_BIG_TILE_LIGHTS_PLUS_ONE*offs + i] = i==0 ? iNrCoarseLights : lightsListLDS[max(i-1, 0)];

#ifdef PERFORM_SPHERICAL_INTERSECTION_TESTS
void SphericalIntersectionTests(uint threadID, int iNrCoarseLights, float2 screenCoordinate)
void SphericalIntersectionTests(uint threadID, int iNrCoarseLights, float2 screenCoordinate, uint eyeIndex)
float3 V = GetViewPosFromLinDepth( screenCoordinate, 1.0);
float3 V = GetViewPosFromLinDepth( screenCoordinate, 1.0, eyeIndex);
float3 V = GetViewPosFromLinDepth( screenCoordinate, -1.0);
float3 V = GetViewPosFromLinDepth( screenCoordinate, -1.0, eyeIndex);
float onePixDiagDist = GetOnePixDiagWorldDistAtDepthOne();
float onePixDiagDist = GetOnePixDiagWorldDistAtDepthOne(eyeIndex);
SFiniteLightBound lgtDat = g_data[lightsListLDS[l]];
const int boundIndex = GenerateLightCullDataIndex(lightsListLDS[l], g_iNrVisibLights, eyeIndex);
SFiniteLightBound lgtDat = g_data[boundIndex];
if( !DoesSphereOverlapTile(V, halfTileSizeAtZDistOne, lgtDat.center.xyz, lgtDat.radius, g_isOrthographic!=0) )
lightsListLDS[l]=UINT_MAX;

#ifdef EXACT_EDGE_TESTS
float3 GetTileVertex(uint2 viTilLL, uint2 viTilUR, int i, float fTileFarPlane)
float3 GetTileVertex(uint2 viTilLL, uint2 viTilUR, int i, float fTileFarPlane, uint eyeIndex)
{
float x = (i&1)==0 ? viTilLL.x : viTilUR.x;
float y = (i&2)==0 ? viTilLL.y : viTilUR.y;

#endif
return GetViewPosFromLinDepth( float2(x, y), z);
return GetViewPosFromLinDepth( float2(x, y), z, eyeIndex);
void GetFrustEdge(out float3 vP0, out float3 vE0, const int e0, uint2 viTilLL, uint2 viTilUR, float fTileFarPlane)
void GetFrustEdge(out float3 vP0, out float3 vE0, const int e0, uint2 viTilLL, uint2 viTilUR, float fTileFarPlane, uint eyeIndex)
vP0 = GetTileVertex(uint2(viTilLL.x, viTilUR.y), uint2(viTilUR.x, viTilLL.y), i, fTileFarPlane);
vP0 = GetTileVertex(uint2(viTilLL.x, viTilUR.y), uint2(viTilUR.x, viTilLL.y), i, fTileFarPlane, eyeIndex);
#if USE_LEFT_HAND_CAMERA_SPACE
float3 edgeSectionZero = g_isOrthographic==0 ? vP0 : float3(0.0,0.0,1.0);

vE0 = iSection == 0 ? edgeSectionZero : (((iSwizzle & 0x2) == 0 ? 1.0f : (-1.0f)) * ((int)(iSwizzle & 0x1) == (iSwizzle >> 1) ? float3(1, 0, 0) : float3(0, 1, 0)));
}
void CullByExactEdgeTests(uint threadID, int iNrCoarseLights, uint2 viTilLL, uint2 viTilUR)
void CullByExactEdgeTests(uint threadID, int iNrCoarseLights, uint2 viTilLL, uint2 viTilUR, uint eyeIndex)
{
const bool bOnlyNeedFrustumSideEdges = true;
const int nrFrustEdges = bOnlyNeedFrustumSideEdges ? 4 : 8; // max 8 since we never need to test 4 far edges of frustum since they are identical vectors to near edges and plane is placed at vP0 on light hull.

{
const uint idxCoarse = lightsListLDS[l];
const int bufIdxCoarse = GenerateLightCullDataIndex(idxCoarse, g_iNrVisibLights, eyeIndex);
if(canEnter) canEnter = _LightVolumeData[idxCoarse].lightVolume != LIGHTVOLUMETYPE_SPHERE; // don't bother doing edge tests for sphere lights since these have camera aligned bboxes.
if(canEnter) canEnter = _LightVolumeData[bufIdxCoarse].lightVolume != LIGHTVOLUMETYPE_SPHERE; // don't bother doing edge tests for sphere lights since these have camera aligned bboxes.
SFiniteLightBound lgtDat = g_data[idxCoarse];
SFiniteLightBound lgtDat = g_data[bufIdxCoarse];
const float3 boxX = lgtDat.boxAxisX.xyz;
const float3 boxY = lgtDat.boxAxisY.xyz;

float3 vP1, vE1;
GetFrustEdge(vP1, vE1, e1, viTilLL, viTilUR, g_fFarPlane);
GetFrustEdge(vP1, vE1, e1, viTilLL, viTilUR, g_fFarPlane, eyeIndex);
// potential separation plane
float3 vN = cross(vE0, vE1);

positive=0; negative=0;
for(int j=0; j<8; j++)
{
float3 vPf = GetTileVertex(viTilLL, viTilUR, j, g_fFarPlane);
float3 vPf = GetTileVertex(viTilLL, viTilUR, j, g_fFarPlane, eyeIndex);
float fSignDist = dot(vN, vPf-vP0);
if(fSignDist>0) ++positive; else if(fSignDist<0) ++negative;
}

155
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightLoop/lightlistbuild-clustered.compute


#include "ShaderBase.hlsl"
#include "LightLoop.cs.hlsl"
#include "LightingConvexHullUtils.hlsl"
#include "LightCullUtils.hlsl"
#if !defined(SHADER_API_XBOXONE) && !defined(SHADER_API_PSSL)
#include "SortingComputeUtils.hlsl"

CBUFFER_START(UnityLightListClustered)
int g_iNrVisibLights;
float4x4 g_mInvScrProjection;
float4x4 g_mScrProjection;
float4x4 g_mInvScrProjectionArr[2];
float4x4 g_mScrProjectionArr[2];
uint g_isOrthographic;
int _EnvLightIndexShift;
int _DecalIndexShift;

CBUFFER_END
// ClusteredUtils.hlsl is dependent on the constants declared in UnityLightListClustered :/
// g_fClustBase, g_fNearPlane, g_fFarPlane, g_iLog2NumClusters
#ifdef MSAA_ENABLED
Texture2DMS<float> g_depth_tex : register( t0 );

#define NR_THREADS 64
// output buffer
RWStructuredBuffer<uint> g_vLayeredLightList : register( u0 ); // don't support RWBuffer yet in unity
RWStructuredBuffer<uint> g_LayeredOffset : register( u1 ); // don't support RWBuffer yet in unity
RWStructuredBuffer<uint> g_LayeredSingleIdxBuffer : register( u2 ); // don't support RWBuffer yet in unity

groupshared unsigned int coarseList[MAX_NR_COARSE_ENTRIES];
groupshared unsigned int clusterIdxs[MAX_NR_COARSE_ENTRIES/2];
groupshared float4 lightPlanes[4*6];
groupshared float4 lightPlanes[4*6]; // Each plane is defined by a float4. 6 planes per light, 4 lights (24 planes)
groupshared uint lightOffs;

groupshared uint lightOffsSph;
#endif
float GetLinearDepth(float zDptBufSpace) // 0 is near 1 is far
float GetLinearDepth(float zDptBufSpace, uint eyeIndex) // 0 is near 1 is far
float4x4 g_mInvScrProjection = g_mInvScrProjectionArr[eyeIndex];
// for perspective projection m22 is zero and m23 is +1/-1 (depends on left/right hand proj)
// however this function must also work for orthographic projection so we keep it like this.
float m22 = g_mInvScrProjection[2].z, m23 = g_mInvScrProjection[2].w;

//return v4Pres.z / v4Pres.w;
}
float3 GetViewPosFromLinDepth(float2 v2ScrPos, float fLinDepth)
float3 GetViewPosFromLinDepth(float2 v2ScrPos, float fLinDepth, uint eyeIndex)
float4x4 g_mScrProjection = g_mScrProjectionArr[eyeIndex];
bool isOrthographic = g_isOrthographic!=0;
float fSx = g_mScrProjection[0].x;
float fSy = g_mScrProjection[1].y;

return float3(isOrthographic ? p.xy : (fLinDepth*p.xy), fLinDepth);
}
float GetOnePixDiagWorldDistAtDepthOne()
float GetOnePixDiagWorldDistAtDepthOne(uint eyeIndex)
float4x4 g_mScrProjection = g_mScrProjectionArr[eyeIndex];
float fSx = g_mScrProjection[0].x;
float fSy = g_mScrProjection[1].y;

// SphericalIntersectionTests and CullByExactEdgeTests are close to the versions
// in lightlistbuild-bigtile.compute. But would need more re-factoring than needed
// right now.
int CullByExactEdgeTests(uint threadID, int iNrCoarseLights, uint2 viTilLL, uint2 viTilUR, float fTileFarPlane);
int CullByExactEdgeTests(uint threadID, int iNrCoarseLights, uint2 viTilLL, uint2 viTilUR, float fTileFarPlane, uint eyeIndex);
int SphericalIntersectionTests(uint threadID, int iNrCoarseLights, float2 screenCoordinate);
int SphericalIntersectionTests(uint threadID, int iNrCoarseLights, float2 screenCoordinate, uint eyeIndex);
float4 FetchPlane(int l, int p);
float4 FetchPlane(int l, int p, uint eyeIndex);
bool CheckIntersection(int l, int k, uint2 viTilLL, uint2 viTilUR, float suggestedBase)
bool CheckIntersection(int l, int k, uint2 viTilLL, uint2 viTilUR, float suggestedBase, uint eyeIndex)
// If this light's screen space depth bounds intersect this cluster...simple cluster test
// TODO: Unify this code with the code in CheckIntersectionBasic...
unsigned int val = (clusterIdxs[l>>1]>>(16*(l&1)))&0xffff;
bool bIsHit = ((val>>0)&0xff)<=((uint) k) && ((uint) k)<=((val>>8)&0xff);
if(bIsHit)

float x = (i&1)==0 ? viTilLL.x : viTilUR.x;
float y = (i&2)==0 ? viTilLL.y : viTilUR.y;
float z = (i&4)==0 ? depthAtNearZ : depthAtFarZ;
float3 vP = GetViewPosFromLinDepth( float2(x, y), z);
float3 vP = GetViewPosFromLinDepth( float2(x, y), z, eyeIndex);
// Test each corner of the cluster against the light bounding box planes
bAllInvisib = bAllInvisib && dot(plane, float4(vP,1.0))>0;
}

return bIsHit;
}
// l is the coarse light index, k is the cluster index
bool CheckIntersectionBasic(int l, int k)
{
unsigned int val = (clusterIdxs[l>>1]>>(16*(l&1)))&0xffff;

[numthreads(NR_THREADS, 1, 1)]
void LIGHTLISTGEN(uint threadID : SV_GroupIndex, uint3 u3GroupID : SV_GroupID)
{
uint eyeIndex = u3GroupID.z;
uint2 tileIDX = u3GroupID.xy;
uint t=threadID;

// Screen space coordinates of clustered tile
uint2 viTilLL = TILE_SIZE_CLUSTERED*tileIDX;
uint2 viTilUR = min( viTilLL+uint2(TILE_SIZE_CLUSTERED,TILE_SIZE_CLUSTERED), uint2(g_screenSize.x, g_screenSize.y) ); // not width and height minus 1 since viTilUR represents the end of the tile corner.

for(int idx=t; idx<(TILE_SIZE_CLUSTERED*TILE_SIZE_CLUSTERED); idx+=NR_THREADS)
{
// XRTODO: We need to stereo-ize access to g_depth_tex for texture arrays.
// TODO: For stereo double-wide, I need a proper way to insert the second eye width offset. Right now, I can just
// use g_screenSize.x, but that's kinda cheating.
// Additionally, we're going to have a method to select between a doublewide texture or texture array. Doubling
// the kernels seems like a bad idea. We could branch our texture read to switch between different texture declarations.
uint stereoDWOffset = eyeIndex * g_screenSize.x;
uPixCrd.x += stereoDWOffset;
#ifdef MSAA_ENABLED
for(int i=0; i<g_iNumSamplesMSAA; i++)
{

#endif
}
// Why is this a uint? Doesn't InterlockedMax support shared mem floats?
InterlockedMax(ldsZMax, asuint(dpt_ma) );

if(dpt_ma<=0.0) dpt_ma = VIEWPORT_SCALE_Z; // assume sky pixel
#endif
// 'Normalized' coordinates of tile, for use with AABB bounds in g_vBoundsBuffer
float2 vTileLL = float2(viTilLL.x/g_screenSize.x, viTilLL.y/g_screenSize.y);
float2 vTileUR = float2(viTilUR.x/g_screenSize.x, viTilUR.y/g_screenSize.y);

int NrBigTilesX = (nrTilesX+((1<<log2BigTileToClustTileRatio)-1))>>log2BigTileToClustTileRatio;
const int bigTileIdx = (tileIDX.y>>log2BigTileToClustTileRatio)*NrBigTilesX + (tileIDX.x>>log2BigTileToClustTileRatio); // map the idx to 64x64 tiles
int nrBigTileLights = g_vBigTileLightList[MAX_NR_BIG_TILE_LIGHTS_PLUS_ONE*bigTileIdx+0];
int NrBigTilesX = (nrTilesX + ((1<<log2BigTileToClustTileRatio)-1)) >> log2BigTileToClustTileRatio;
int NrBigTilesY = (nrTilesY + ((1<<log2BigTileToClustTileRatio)-1)) >> log2BigTileToClustTileRatio;
const int bigTileBase = eyeIndex * NrBigTilesX * NrBigTilesY;
const int bigTileIdx = bigTileBase + ((tileIDX.y>>log2BigTileToClustTileRatio)*NrBigTilesX) + (tileIDX.x>>log2BigTileToClustTileRatio); // map the idx to 64x64 tiles
int nrBigTileLights = g_vBigTileLightList[MAX_NR_BIG_TILE_LIGHTS_PLUS_ONE*bigTileIdx+0];
for(int l0=(int) t; l0<(int) nrBigTileLights; l0 += NR_THREADS)
{
int l = g_vBigTileLightList[MAX_NR_BIG_TILE_LIGHTS_PLUS_ONE*bigTileIdx+l0+1];

#endif
const float2 vMi = g_vBoundsBuffer[l].xy;
const float2 vMa = g_vBoundsBuffer[l+g_iNrVisibLights].xy;
// TODO: Seems kinda funny that we repeat this exact code here, bigtile, and FPTL...
const ScreenSpaceBoundsIndices boundsIndices = GenerateScreenSpaceBoundsIndices(l, g_iNrVisibLights, eyeIndex);
const float2 vMi = g_vBoundsBuffer[boundsIndices.min].xy;
const float2 vMa = g_vBoundsBuffer[boundsIndices.max].xy;
if( all(vMa>vTileLL) && all(vMi<vTileUR))
{

int iNrCoarseLights = min(lightOffs,MAX_NR_COARSE_ENTRIES);
#ifdef PERFORM_SPHERICAL_INTERSECTION_TESTS
iNrCoarseLights = SphericalIntersectionTests( t, iNrCoarseLights, float2(min(viTilLL.xy+uint2(TILE_SIZE_CLUSTERED/2,TILE_SIZE_CLUSTERED/2), uint2(g_screenSize.x-1, g_screenSize.y-1))) );
iNrCoarseLights = SphericalIntersectionTests( t, iNrCoarseLights, float2(min(viTilLL.xy+uint2(TILE_SIZE_CLUSTERED/2,TILE_SIZE_CLUSTERED/2), uint2(g_screenSize.x-1, g_screenSize.y-1))), eyeIndex );
float fTileFarPlane = GetLinearDepth(dpt_ma);
#else
float fTileFarPlane = -GetLinearDepth(dpt_ma);
float fTileFarPlane = GetLinearDepth(dpt_ma, eyeIndex);
#else // USE_LEFT_HAND_CAMERA_SPACE
float fTileFarPlane = -GetLinearDepth(dpt_ma, eyeIndex);
#else
#else // ENABLE_DEPTH_TEXTURE_BACKPLANE
float fTileFarPlane = g_fFarPlane;
float suggestedBase = g_fClustBase;
#endif

iNrCoarseLights = CullByExactEdgeTests(t, iNrCoarseLights, viTilLL.xy, viTilUR.xy, fTileFarPlane);
iNrCoarseLights = CullByExactEdgeTests(t, iNrCoarseLights, viTilLL.xy, viTilUR.xy, fTileFarPlane, eyeIndex);
// NOTE: Why not sort on console?
#if !defined(SHADER_API_XBOXONE) && !defined(SHADER_API_PSSL)
SORTLIST(coarseList, iNrCoarseLights, MAX_NR_COARSE_ENTRIES, t, NR_THREADS);
#endif

// TODO: We should write some encode/decode functions to help put cluster indices into the shared mem buffer,
// and extract them later. The code that reads from clusterIdx is hairy.
const unsigned int clustIdxMi0 = (const unsigned int) min(255,SnapToClusterIdx(GetLinearDepth(g_vBoundsBuffer[l0].z), suggestedBase));
const unsigned int clustIdxMa0 = (const unsigned int) min(255,SnapToClusterIdx(GetLinearDepth(g_vBoundsBuffer[l0+g_iNrVisibLights].z), suggestedBase));
const unsigned int clustIdxMi1 = (const unsigned int) min(255,SnapToClusterIdx(GetLinearDepth(g_vBoundsBuffer[l1].z), suggestedBase));
const unsigned int clustIdxMa1 = (const unsigned int) min(255,SnapToClusterIdx(GetLinearDepth(g_vBoundsBuffer[l1+g_iNrVisibLights].z), suggestedBase));
const ScreenSpaceBoundsIndices l0Bounds = GenerateScreenSpaceBoundsIndices(l0, g_iNrVisibLights, eyeIndex);
const ScreenSpaceBoundsIndices l1Bounds = GenerateScreenSpaceBoundsIndices(l1, g_iNrVisibLights, eyeIndex);
const unsigned int clustIdxMi0 = (const unsigned int)min(255, SnapToClusterIdx(GetLinearDepth(g_vBoundsBuffer[l0Bounds.min].z, eyeIndex), suggestedBase));
const unsigned int clustIdxMa0 = (const unsigned int)min(255, SnapToClusterIdx(GetLinearDepth(g_vBoundsBuffer[l0Bounds.max].z, eyeIndex), suggestedBase));
const unsigned int clustIdxMi1 = (const unsigned int)min(255, SnapToClusterIdx(GetLinearDepth(g_vBoundsBuffer[l1Bounds.min].z, eyeIndex), suggestedBase));
const unsigned int clustIdxMa1 = (const unsigned int)min(255, SnapToClusterIdx(GetLinearDepth(g_vBoundsBuffer[l1Bounds.max].z, eyeIndex), suggestedBase));
clusterIdxs[l] = (clustIdxMa1<<24) | (clustIdxMi1<<16) | (clustIdxMa0<<8) | (clustIdxMi0<<0);
}
}

int iSum = 0;
if(i<nrClusters)
{
// Each thread checks it's respective cluster against all coarse lights for intersection.
// At the end, 'iSum' represents the number of lights that intersect this cluster!
// We have a limit to the number of lights we will track in a cluster (128). This is how much memory we
// want to allocate out of g_LayeredSingleIdxBuffer.
iSpaceAvail = min(iSum,MAX_NR_COARSE_ENTRIES); // combined storage for both direct lights and reflection
InterlockedAdd(g_LayeredSingleIdxBuffer[0], (uint) iSpaceAvail, start); // alloc list memory
}

int shiftIndex[LIGHTCATEGORY_COUNT];
ZERO_INITIALIZE_ARRAY(int, shiftIndex, LIGHTCATEGORY_COUNT);
// NOTE: Why is this indexed like this?
shiftIndex[LIGHTCATEGORY_COUNT - 3] = _EnvLightIndexShift;
shiftIndex[LIGHTCATEGORY_COUNT - 2] = _DecalIndexShift;
shiftIndex[LIGHTCATEGORY_COUNT - 1] = _DensityVolumeIndexShift;

if(i<24) lightPlanes[6*m+p] = FetchPlane(min(iNrCoarseLights-1,ll+m), p);
if(i<24) lightPlanes[6*m+p] = FetchPlane(min(iNrCoarseLights-1,ll+m), p, eyeIndex);
#if !defined(SHADER_API_XBOXONE) && !defined(SHADER_API_PSSL)
GroupMemoryBarrierWithGroupSync();
#endif

if(offs<(start+iSpaceAvail) && i<nrClusters && CheckIntersection(l, i, viTilLL.xy, viTilUR.xy, suggestedBase) )
if(offs<(start+iSpaceAvail) && i<nrClusters && CheckIntersection(l, i, viTilLL.xy, viTilUR.xy, suggestedBase, eyeIndex) )
uint lightCategory = _LightVolumeData[coarseList[l]].lightCategory;
const int lightVolIndex = GenerateLightCullDataIndex(coarseList[l], g_iNrVisibLights, eyeIndex);
uint lightCategory = _LightVolumeData[lightVolIndex].lightCategory;
++categoryListCount[lightCategory];
g_vLayeredLightList[offs++] = coarseList[l] - shiftIndex[lightCategory];
}

}
uint localOffs=0;
offs = i*nrTilesX*nrTilesY + tileIDX.y*nrTilesX + tileIDX.x;
offs = GenerateLayeredOffsetBufferIndex(0, tileIDX, i, nrTilesX, nrTilesY, nrClusters, eyeIndex);
for(int category=0; category<LIGHTCATEGORY_COUNT; category++)
{
int numLights = min(categoryListCount[category],31); // only allow 5 bits

}
#ifdef ENABLE_DEPTH_TEXTURE_BACKPLANE
if(threadID==0) g_logBaseBuffer[tileIDX.y*nrTilesX + tileIDX.x] = suggestedBase;
const uint logBaseIndex = GenerateLogBaseBufferIndex(tileIDX, nrTilesX, nrTilesY, eyeIndex);
if(threadID==0) g_logBaseBuffer[logBaseIndex] = suggestedBase;
float4 FetchPlane(int l, int p)
float4 FetchPlane(int l, int p, uint eyeIndex)
SFiniteLightBound lgtDat = g_data[coarseList[l]];
const int lightBoundIndex = GenerateLightCullDataIndex(coarseList[l], g_iNrVisibLights, eyeIndex);
SFiniteLightBound lgtDat = g_data[lightBoundIndex];
const float3 boxX = lgtDat.boxAxisX.xyz;
const float3 boxY = lgtDat.boxAxisY.xyz;

int SphericalIntersectionTests(uint threadID, int iNrCoarseLights, float2 screenCoordinate)
int SphericalIntersectionTests(uint threadID, int iNrCoarseLights, float2 screenCoordinate, uint eyeIndex)
float3 V = GetViewPosFromLinDepth( screenCoordinate, 1.0);
float3 V = GetViewPosFromLinDepth( screenCoordinate, 1.0, eyeIndex);
float3 V = GetViewPosFromLinDepth( screenCoordinate, -1.0);
float3 V = GetViewPosFromLinDepth( screenCoordinate, -1.0, eyeIndex);
float onePixDiagDist = GetOnePixDiagWorldDistAtDepthOne();
float onePixDiagDist = GetOnePixDiagWorldDistAtDepthOne(eyeIndex);
SFiniteLightBound lgtDat = g_data[coarseList[l]];
const int lightBoundIndex = GenerateLightCullDataIndex(coarseList[l], g_iNrVisibLights, eyeIndex);
SFiniteLightBound lgtDat = g_data[lightBoundIndex];
if( !DoesSphereOverlapTile(V, halfTileSizeAtZDistOne, lgtDat.center.xyz, lgtDat.radius, g_isOrthographic!=0) )
coarseList[l]=UINT_MAX;

#ifdef EXACT_EDGE_TESTS
float3 GetTileVertex(uint2 viTilLL, uint2 viTilUR, int i, float fTileFarPlane)
float3 GetTileVertex(uint2 viTilLL, uint2 viTilUR, int i, float fTileFarPlane, uint eyeIndex)
{
float x = (i&1)==0 ? viTilLL.x : viTilUR.x;
float y = (i&2)==0 ? viTilLL.y : viTilUR.y;

#endif
return GetViewPosFromLinDepth( float2(x, y), z);
return GetViewPosFromLinDepth( float2(x, y), z, eyeIndex);
void GetFrustEdge(out float3 vP0, out float3 vE0, const int e0, uint2 viTilLL, uint2 viTilUR, float fTileFarPlane)
void GetFrustEdge(out float3 vP0, out float3 vE0, const int e0, uint2 viTilLL, uint2 viTilUR, float fTileFarPlane, uint eyeIndex)
vP0 = GetTileVertex(uint2(viTilLL.x, viTilUR.y), uint2(viTilUR.x, viTilLL.y), i, fTileFarPlane);
vP0 = GetTileVertex(uint2(viTilLL.x, viTilUR.y), uint2(viTilUR.x, viTilLL.y), i, fTileFarPlane, eyeIndex);
#if USE_LEFT_HAND_CAMERA_SPACE
float3 edgeSectionZero = g_isOrthographic==0 ? vP0 : float3(0.0,0.0,1.0);

vE0 = iSection == 0 ? edgeSectionZero : (((iSwizzle & 0x2) == 0 ? 1.0f : (-1.0f)) * ((int)(iSwizzle & 0x1) == (iSwizzle >> 1) ? float3(1, 0, 0) : float3(0, 1, 0)));
}
int CullByExactEdgeTests(uint threadID, int iNrCoarseLights, uint2 viTilLL, uint2 viTilUR, float fTileFarPlane)
int CullByExactEdgeTests(uint threadID, int iNrCoarseLights, uint2 viTilLL, uint2 viTilUR, float fTileFarPlane, uint eyeIndex)
{
if(threadID==0) lightOffs2 = 0;

#if !defined(SHADER_API_XBOXONE) && !defined(SHADER_API_PSSL)
GroupMemoryBarrierWithGroupSync();
#endif
const int idxCoarse = coarseList[l];
UNITY_BRANCH if (_LightVolumeData[idxCoarse].lightVolume != LIGHTVOLUMETYPE_SPHERE) // don't bother doing edge tests for sphere lights since these have camera aligned bboxes.
const int lightCullIndex = GenerateLightCullDataIndex(coarseList[l], g_iNrVisibLights, eyeIndex);
UNITY_BRANCH if (_LightVolumeData[lightCullIndex].lightVolume != LIGHTVOLUMETYPE_SPHERE) // don't bother doing edge tests for sphere lights since these have camera aligned bboxes.
SFiniteLightBound lgtDat = g_data[idxCoarse];
SFiniteLightBound lgtDat = g_data[lightCullIndex];
const float3 boxX = lgtDat.boxAxisX.xyz;
const float3 boxY = lgtDat.boxAxisY.xyz;

float3 vP1, vE1;
GetFrustEdge(vP1, vE1, e1, viTilLL, viTilUR, fTileFarPlane);
GetFrustEdge(vP1, vE1, e1, viTilLL, viTilUR, fTileFarPlane, eyeIndex);
// potential separation plane
float3 vN = cross(vE0, vE1);

positive=0; negative=0;
for(int j=0; j<8; j++)
{
float3 vPf = GetTileVertex(viTilLL, viTilUR, j, fTileFarPlane);
float3 vPf = GetTileVertex(viTilLL, viTilUR, j, fTileFarPlane, eyeIndex);
float fSignDist = dot(vN, vPf-vP0);
if(fSignDist>0) ++positive; else if(fSignDist<0) ++negative;
}

12
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightLoop/scrbound.compute


#include "CoreRP/ShaderLibrary/common.hlsl"
#include "LightLoop.cs.hlsl"
#include "LightCullUtils.hlsl"
#pragma only_renderers d3d11 ps4 xboxone vulkan metal

const int lgtIndex = subLigt+(int) g*8;
const int sideIndex = (int) (t%8);
const int eyeAdjustedLgtIndex = lgtIndex + (eyeIndex * g_iNrVisibLights);
const int eyeAdjustedLgtIndex = GenerateLightCullDataIndex(lgtIndex, g_iNrVisibLights, eyeIndex);
SFiniteLightBound lgtDat = g_data[eyeAdjustedLgtIndex];
const float3 boxX = lgtDat.boxAxisX.xyz;

// Each light's AABB is represented by two float3s, the min and max of the box.
// And for stereo, we have two sets of lights. Therefore, each eye has a set of mins, followed by
// a set of maxs, and each set is equal to g_iNrVisibLights.
const int eyeBaseIndex = eyeIndex * g_iNrVisibLights * 2;
const int minIndex = eyeBaseIndex + lgtIndex + 0;
const int maxIndex = eyeBaseIndex + lgtIndex + (int)g_iNrVisibLights;
g_vBoundsBuffer[minIndex] = float3(0.5*vMin.x + 0.5, 0.5*vMin.y + 0.5, vMin.z*VIEWPORT_SCALE_Z);
g_vBoundsBuffer[maxIndex] = float3(0.5*vMax.x + 0.5, 0.5*vMax.y + 0.5, vMax.z*VIEWPORT_SCALE_Z);
const ScreenSpaceBoundsIndices boundsIndices = GenerateScreenSpaceBoundsIndices(lgtIndex, g_iNrVisibLights, eyeIndex);
g_vBoundsBuffer[boundsIndices.min] = float3(0.5*vMin.x + 0.5, 0.5*vMin.y + 0.5, vMin.z*VIEWPORT_SCALE_Z);
g_vBoundsBuffer[boundsIndices.max] = float3(0.5*vMax.x + 0.5, 0.5*vMax.y + 0.5, vMax.z*VIEWPORT_SCALE_Z);
}
}
}

24
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightUtilities.hlsl


#include "LightDefinition.cs.hlsl"
#define SETTER_FLOAT3(data, field, value)\
data.##field##X = value.x;\
data.##field##Y = value.y;\
data.##field##Z = value.z
// The EnvLightData of the sky light contains a bunch of compile-time constants.
// This function sets them directly to allow the compiler to propagate them and optimize the code.
EnvLightData InitSkyEnvLightData(int envIndex)

output.influenceShapeType = ENVSHAPETYPE_SKY;
output.envIndex = envIndex;
SETTER_FLOAT3(output, influenceForward, float3(0.0, 0.0, 1.0));
SETTER_FLOAT3(output, influenceUp, float3(0.0, 1.0, 0.0));
SETTER_FLOAT3(output, influenceRight, float3(1.0, 0.0, 0.0));
SETTER_FLOAT3(output, influencePositionWS, float3(0.0, 0.0, 0.0));
output.influenceForward = float3(0.0, 0.0, 1.0);
output.influenceUp = float3(0.0, 1.0, 0.0);
output.influenceRight = float3(1.0, 0.0, 0.0);
output.influencePositionWS = float3(0.0, 0.0, 0.0);
output.dimmer = 1.0;
output.weight = 1.0;
output.multiplier = 1.0;
SETTER_FLOAT3(output, proxyForward, float3(0.0, 0.0, 1.0));
SETTER_FLOAT3(output, proxyUp, float3(0.0, 1.0, 0.0));
SETTER_FLOAT3(output, proxyRight, float3(1.0, 0.0, 0.0));
output.proxyForward = float3(0.0, 0.0, 1.0);
output.proxyUp = float3(0.0, 1.0, 0.0);
output.proxyRight = float3(1.0, 0.0, 0.0);
#undef SETTER_FLOAT3
#endif // UNITY_LIGHT_UTILITIES_INCLUDED

33
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/LightUtils.cs


// Physical light unit helper
// All light unit are in lumen (Luminous power)
// Punctual light (point, spot) are convert to candela (cd = lumens / steradian)
// Area light are convert to luminance (cd/(m^2*steradian)) with the following formulation: Luminous Power / (Area * PI * steradian)
// For our isotropic area lights which expect radiance(W / (sr* m^2)) in the shader:
// power = Integral{area, Integral{hemisphere, radiance * <N, L>}},
// power = area * Pi * radiance,
// radiance = power / (area * Pi).
// We use photometric unit, so radiance is luminance and power is luminous power
// Ref: Moving Frostbite to PBR
// Also good ref: https://www.radiance-online.org/community/workshops/2004-fribourg/presentations/Wandachowicz_paper.pdf

}
// convert intensity (lumen) to nits
public static float calculateLineLightArea(float intensity, float lineWidth)
public static float CalculateLineLightIntensity(float intensity, float lineWidth)
// The area of a cylinder is this:
// float lineRadius = 0.01f; // 1cm
//return intensity / (2.0f * Mathf.PI * lineRadius * lineWidth * Mathf.PI);
// But with our current line light algorithm we get an insane gap in intensity
// following formula (fully empirical) give a better match to a rect light of 1cm of width.
// It is basically point light intensity / line width.
//Line lights expect radiance (W / (sr * m^2)) in the shader.
//In the UI, we specify luminous flux (power) in lumens.
//First, it needs to be converted to radiometric units (radiant flux, W).
//Then we must recall how to compute power from radiance:
//radiance = differential_power / (differrential_projected_area * differential_solid_angle),
//radiance = differential_power / (differrential_area * differential_solid_angle * <N, L>),
//power = Integral{area, Integral{hemisphere, radiance * <N, L>}}.
//Unlike tube lights, our line lights have no surface area, so the integral becomes:
//power = Integral{length, Integral{sphere, radiance}}.
//For an isotropic line light, radiance is constant, therefore:
//power = length * (4 * Pi) * radiance,
//radiance = power / (length * (4 * Pi)).
return intensity / (4.0f * Mathf.PI * lineWidth);
}
}

9
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Reflection/HDAdditionalReflectionData.cs


using UnityEngine.Experimental.Rendering.HDPipeline;
using UnityEngine.Serialization;
using UnityEngine.Experimental.Rendering.HDPipeline;
namespace UnityEngine.Experimental.Rendering
{

#pragma warning restore 414
public ShapeType influenceShape;
[Range(0.0f,1.0f)]
public float dimmer = 1.0f;
[FormerlySerializedAsAttribute("dimmer")]
public float multiplier = 1.0f;
[Range(0.0f, 1.0f)]
public float weight = 1.0f;
public float influenceSphereRadius = 3.0f;
public float sphereReprojectionVolumeRadius = 1.0f;
public bool useSeparateProjectionVolume = false;

13
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Reflection/PlanarReflectionProbe.cs


using UnityEngine.Rendering;
using UnityEngine.Serialization;
using UnityEngine.Rendering;
namespace UnityEngine.Experimental.Rendering.HDPipeline
{

[SerializeField]
Vector3 m_CaptureLocalPosition;
[SerializeField]
[Range(0, 1)]
float m_Dimmer = 1;
[FormerlySerializedAsAttribute("m_Dimmer")]
float m_Multiplier = 1.0f;
[SerializeField]
[Range(0.0f, 1.0f)]
float m_Weight = 1.0f;
[SerializeField]
ReflectionProbeMode m_Mode = ReflectionProbeMode.Baked;
[SerializeField]

}
public Bounds bounds { get { return m_InfluenceVolume.GetBoundsAt(transform); } }
public Vector3 captureLocalPosition { get { return m_CaptureLocalPosition; } set { m_CaptureLocalPosition = value; } }
public float dimmer { get { return m_Dimmer; } }
public float weight { get { return m_Weight; } }
public float multiplier { get { return m_Multiplier; } }
public ReflectionProbeMode mode { get { return m_Mode; } }
public Matrix4x4 influenceToWorld
{

23
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Reflection/PlanarReflectionProbeCache.cs


m_ProbeSize = probeSize;
m_CacheSize = cacheSize;
m_TextureCache = new TextureCache2D();
m_TextureCache = new TextureCache2D("PlanarReflectionProbe");
m_TextureCache.AllocTextureArray(cacheSize, probeSize, probeSize, probeFormat, isMipmaped);
m_IBLFilterGGX = iblFilter;

m_TempRenderTexture.dimension = TextureDimension.Tex2D;
m_TempRenderTexture.useMipMap = true;
m_TempRenderTexture.autoGenerateMips = false;
m_TempRenderTexture.name = CoreUtils.GetRenderTargetAutoName(m_ProbeSize, m_ProbeSize, RenderTextureFormat.ARGBHalf, "PlanarReflection", mips : true);
m_TempRenderTexture.name = CoreUtils.GetRenderTargetAutoName(m_ProbeSize, m_ProbeSize, RenderTextureFormat.ARGBHalf, "PlanarReflectionTemp", mips : true);
m_TempRenderTexture.Create();
m_ConvolutionTargetTexture = new RenderTexture(m_ProbeSize, m_ProbeSize, 1, RenderTextureFormat.ARGBHalf);

public void Release()
{
if (m_TextureCache != null)
{
m_TextureCache.Release();
m_TextureCache = null;
}
if (m_TempRenderTexture != null)
{
m_TempRenderTexture.Release();
m_TempRenderTexture = null;
}
if (m_ConvolutionTargetTexture != null)
{
m_ConvolutionTargetTexture.Release();
m_ConvolutionTargetTexture = null;
}
m_TextureCache.Release();
CoreUtils.Destroy(m_TempRenderTexture);
CoreUtils.Destroy(m_ConvolutionTargetTexture);
m_ProbeBakingState = null;
CoreUtils.Destroy(m_ConvertTextureMaterial);

9
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Reflection/ProbeWrapper.cs


public abstract ReflectionProbeMode mode { get; }
public abstract Texture texture { get; }
// Position of the center of the probe in capture space
public abstract float dimmer { get; }
public abstract float weight { get; }
public abstract float multiplier { get; }
public abstract Matrix4x4 influenceToWorld { get; }
public abstract EnvShapeType influenceShapeType { get; }
public abstract Vector3 influenceExtents { get; }

public override Texture texture { get { return probe.texture; } }
public override ReflectionProbeMode mode { get { return probe.probe.mode; } }
public override EnvShapeType influenceShapeType { get { return ConvertShape(additional.influenceShape); } }
public override float dimmer { get { return additional.dimmer; } }
public override float weight { get { return additional.weight; } }
public override float multiplier { get { return additional.multiplier; } }
public override Vector3 influenceExtents
{
get

public override Matrix4x4 influenceToWorld { get { return planarReflectionProbe.influenceToWorld; } }
public override Texture texture { get { return planarReflectionProbe.texture; } }
public override EnvShapeType influenceShapeType { get { return ConvertShape(planarReflectionProbe.influenceVolume.shapeType); } }
public override float dimmer { get { return planarReflectionProbe.dimmer; } }
public override float weight { get { return planarReflectionProbe.weight; } }
public override float multiplier { get { return planarReflectionProbe.multiplier; } }
public override Vector3 influenceExtents
{
get

30
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Reflection/ReflectionProbeCache.cs


m_ProbeSize = probeSize;
m_CacheSize = cacheSize;
m_TextureCache = new TextureCacheCubemap();
m_TextureCache = new TextureCacheCubemap("ReflectionProbe");
m_TextureCache.AllocTextureArray(cacheSize, probeSize, probeFormat, isMipmaped, m_CubeToPano);
m_IBLFilterGGX = iblFilter;

m_TempRenderTexture.dimension = TextureDimension.Cube;
m_TempRenderTexture.useMipMap = true;
m_TempRenderTexture.autoGenerateMips = false;
m_TempRenderTexture.name = CoreUtils.GetRenderTargetAutoName(m_ProbeSize, m_ProbeSize, RenderTextureFormat.ARGBHalf, "PlanarReflection", mips : true);
m_TempRenderTexture.name = CoreUtils.GetRenderTargetAutoName(m_ProbeSize, m_ProbeSize, RenderTextureFormat.ARGBHalf, "ReflectionProbeTemp", mips : true);
m_TempRenderTexture.Create();
m_ConvolutionTargetTexture = new RenderTexture(m_ProbeSize, m_ProbeSize, 1, RenderTextureFormat.ARGBHalf);

m_ConvolutionTargetTexture.autoGenerateMips = false;
m_ConvolutionTargetTexture.name = CoreUtils.GetRenderTargetAutoName(m_ProbeSize, m_ProbeSize, RenderTextureFormat.ARGBHalf, "PlanarReflection", mips : true);
m_ConvolutionTargetTexture.name = CoreUtils.GetRenderTargetAutoName(m_ProbeSize, m_ProbeSize, RenderTextureFormat.ARGBHalf, "ReflectionProbeConvolution", mips : true);
m_ConvolutionTargetTexture.Create();
InitializeProbeBakingStates();

public void Release()
{
if (m_TextureCache != null)
{
m_TextureCache.Release();
m_TextureCache = null;
}
if (m_TempRenderTexture != null)
{
m_TempRenderTexture.Release();
m_TempRenderTexture = null;
}
if (m_ConvolutionTargetTexture != null)
{
m_ConvolutionTargetTexture.Release();
m_ConvolutionTargetTexture = null;
}
m_TextureCache.Release();
CoreUtils.Destroy(m_TempRenderTexture);
CoreUtils.Destroy(m_ConvolutionTargetTexture);
m_ProbeBakingState = null;
CoreUtils.Destroy(m_ConvertTextureMaterial);

bool formatMismatch = cubeTexture.format != TextureFormat.RGBAHalf; // Temporary RT for convolution is always FP16
if (formatMismatch || sizeMismatch)
{
// We comment the following warning as they have no impact on the result but spam the console, it is just that we waste offline time and a bit of quality for nothing.
Debug.LogWarningFormat("Baked Reflection Probe {0} does not match HDRP Reflection Probe Cache size of {1}. Consider baking it at the same size for better loading performance.", texture.name, m_ProbeSize);
// Debug.LogWarningFormat("Baked Reflection Probe {0} does not match HDRP Reflection Probe Cache size of {1}. Consider baking it at the same size for better loading performance.", texture.name, m_ProbeSize);
Debug.LogWarningFormat("Baked Reflection Probe {0} is compressed but the HDRP Reflection Probe Cache is not. Consider removing compression from the input texture for better quality.", texture.name);
// Debug.LogWarningFormat("Baked Reflection Probe {0} is compressed but the HDRP Reflection Probe Cache is not. Consider removing compression from the input texture for better quality.", texture.name);
}
ConvertTexture(cmd, cubeTexture, m_TempRenderTexture);
}

48
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Reflection/VolumeProjection.hlsl


{
return transpose(
float3x3(
GetProxyRight(lightData),
GetProxyUp(lightData),
GetProxyForward(lightData)
lightData.proxyRight,
lightData.proxyUp,
lightData.proxyForward
)
); // worldToLocal assume no scaling
}

float3 positionPS = positionWS - GetProxyPositionWS(lightData);
float3 positionPS = positionWS - lightData.proxyPositionWS;
positionPS = mul(positionPS, worldToPS).xyz;
return positionPS;
}

float sphereOuterDistance = lightData.proxyExtentsX;
float sphereOuterDistance = lightData.proxyExtents.x;
float projectionDistance = IntersectRaySphereSimple(positionPS, dirPS, sphereOuterDistance);
projectionDistance = max(projectionDistance, lightData.minProjectionDistance); // Setup projection to infinite if requested (mean no projection shape)

float IntersectBoxProxy(EnvLightData lightData, float3 dirPS, float3 positionPS)
{
float3 boxOuterDistance = GetProxyExtents(lightData);
float3 boxOuterDistance = lightData.proxyExtents;
float projectionDistance = IntersectRayAABBSimple(positionPS, dirPS, -boxOuterDistance, boxOuterDistance);
projectionDistance = max(projectionDistance, lightData.minProjectionDistance); // Setup projection to infinite if requested (mean no projection shape)

float InfluenceSphereWeight(EnvLightData lightData, BSDFData bsdfData, float3 positionWS, float3 positionLS, float3 dirLS)
{
float lengthPositionLS = length(positionLS);
float sphereInfluenceDistance = lightData.influenceExtentsX - lightData.blendDistancePositiveX;
float sphereInfluenceDistance = lightData.influenceExtents.x - lightData.blendDistancePositive.x;
float alpha = saturate(1.0 - distFade / max(lightData.blendDistancePositiveX, 0.0001)); // avoid divide by zero
float alpha = saturate(1.0 - distFade / max(lightData.blendDistancePositive.x, 0.0001)); // avoid divide by zero
float insideInfluenceNormalVolume = lengthPositionLS <= (lightData.influenceExtentsX - lightData.blendNormalDistancePositiveX) ? 1.0 : 0.0;
float insideWeight = InfluenceFadeNormalWeight(bsdfData.normalWS, normalize(positionWS - GetCapturePositionWS(lightData)));
float insideInfluenceNormalVolume = lengthPositionLS <= (lightData.influenceExtents.x - lightData.blendNormalDistancePositive.x) ? 1.0 : 0.0;
float insideWeight = InfluenceFadeNormalWeight(bsdfData.normalWS, normalize(positionWS - lightData.capturePositionWS));
alpha *= insideInfluenceNormalVolume ? 1.0 : insideWeight;
#endif

float InfluenceBoxWeight(EnvLightData lightData, BSDFData bsdfData, float3 positionWS, float3 positionIS, float3 dirIS)
{
float3 influenceExtents = GetInfluenceExtents(lightData);
float3 influenceExtents = lightData.influenceExtents;
// 2. Process the position influence
// Calculate falloff value, so reflections on the edges of the volume would gradually blend to previous reflection.
#if defined(ENVMAP_FEATURE_PERFACEINFLUENCE) || defined(ENVMAP_FEATURE_INFLUENCENORMAL) || defined(ENVMAP_FEATURE_PERFACEFADE)

#if defined(ENVMAP_FEATURE_PERFACEINFLUENCE)
// Influence falloff for each face
float3 negativeFalloff = negativeDistance / max(0.0001, GetBlendDistanceNegative(lightData));
float3 positiveFalloff = positiveDistance / max(0.0001, GetBlendDistancePositive(lightData));
float3 negativeFalloff = negativeDistance / max(0.0001, lightData.blendDistanceNegative);
float3 positiveFalloff = positiveDistance / max(0.0001, lightData.blendDistancePositive);
// Fallof is the min for all faces
float influenceFalloff = min(

float alpha = saturate(influenceFalloff);
#else
float distFace = DistancePointBox(positionIS, -influenceExtents + lightData.blendDistancePositiveX, influenceExtents - lightData.blendDistancePositiveX);
float alpha = saturate(1.0 - distFace / max(lightData.blendDistancePositiveX, 0.0001));
float distFace = DistancePointBox(positionIS, -influenceExtents + lightData.blendDistancePositive.x, influenceExtents - lightData.blendDistancePositive.x);
float alpha = saturate(1.0 - distFace / max(lightData.blendDistancePositive.x, 0.0001));
float3 belowPositiveInfluenceNormalVolume = positiveDistance / max(0.0001, GetBlendNormalDistancePositive(lightData));
float3 aboveNegativeInfluenceNormalVolume = negativeDistance / max(0.0001, GetBlendNormalDistanceNegative(lightData));
float3 belowPositiveInfluenceNormalVolume = positiveDistance / max(0.0001, lightData.blendNormalDistancePositive);
float3 aboveNegativeInfluenceNormalVolume = negativeDistance / max(0.0001, lightData.blendNormalDistanceNegative);
float insideWeight = InfluenceFadeNormalWeight(bsdfData.normalWS, normalize(positionWS - GetCapturePositionWS(lightData)));
float insideWeight = InfluenceFadeNormalWeight(bsdfData.normalWS, normalize(positionWS - lightData.capturePositionWS));
alpha *= insideInfluenceNormalVolume ? 1.0 : insideWeight;
#endif

// We consider R.x as cos(X) and then fade as angle from 60°(=acos(1/2)) to 75°(=acos(1/4))
// For positive axes: axisFade = (R - 1/4) / (1/2 - 1/4)
// <=> axisFace = 4 * R - 1;
float3 faceFade = saturate((4 * dirIS - 1) * GetBoxSideFadePositive(lightData))
+ saturate((-4 * dirIS - 1) * GetBoxSideFadeNegative(lightData));
float3 faceFade = saturate((4 * dirIS - 1) * lightData.boxSideFadePositive)
+ saturate((-4 * dirIS - 1) * lightData.boxSideFadeNegative);
alpha *= saturate(faceFade.x + faceFade.y + faceFade.z);
#endif

{
return transpose(
float3x3(
GetInfluenceRight(lightData),
GetInfluenceUp(lightData),
GetInfluenceForward(lightData)
lightData.influenceRight,
lightData.influenceUp,
lightData.influenceForward
)
); // worldToLocal assume no scaling
}

float3 positionIS = positionWS - GetInfluencePositionWS(lightData);
float3 positionIS = positionWS - lightData.influencePositionWS;
positionIS = mul(positionIS, worldToIS).xyz;
return positionIS;
}

2
ScriptableRenderPipeline/HDRenderPipeline/HDRP/MRTBufferManager.cs


public abstract void CreateBuffers();
public void BindBufferAsTextures(CommandBuffer cmd)
public virtual void BindBufferAsTextures(CommandBuffer cmd)
{
for (int i = 0; i < m_BufferCount; ++i)
{

6
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Decal/Decal.cs.hlsl


// PackingRules = Exact
struct DecalData
{
float4x4 worldToDecal;
float4x4 worldToDecal;
float4x4 normalToWorld;
};

float4x4 GetWorldToDecal(DecalData value)
{
return value.worldToDecal;
}
float4x4 GetNormalToWorld(DecalData value)
{
return value.normalToWorld;

12
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Decal/Decal.shader


Shader "HDRenderPipeline/Decal"
{
Properties
{
{
_MaskMap("MaskMap", 2D) = "white" {}
_MaskMap("MaskMap", 2D) = "white" {}
_DecalBlend("_DecalBlend", Range(0.0, 1.0)) = 0.5
}

//-------------------------------------------------------------------------------------
// Define
//-------------------------------------------------------------------------------------
#define UNITY_MATERIAL_DECAL
#define UNITY_MATERIAL_DECAL
//-------------------------------------------------------------------------------------
// Include
//-------------------------------------------------------------------------------------

SubShader
{
Tags{ "RenderPipeline" = "HDRenderPipeline"}
Pass
{
Name "DBuffer" // Name is not used

#define SHADERPASS SHADERPASS_DBUFFER
#include "../../ShaderVariables.hlsl"
#include "Decal.hlsl"
#include "ShaderPass/DecalSharePass.hlsl"
#include "ShaderPass/DecalSharePass.hlsl"
#include "DecalData.hlsl"
#include "../../ShaderPass/ShaderPassDBuffer.hlsl"

20
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Decal/DecalUtilities.hlsl


return _DecalDatas[j];
}
// Caution: We can't compute LOD inside a dynamic loop. The gradient are not accessible.
// we need to find a way to calculate mips. For now just fetch first mip of the decals
src.xyz = mul(decalToWorld, UnpackNormalmapRGorAG(SAMPLE_TEXTURE2D_ARRAY(_DecalAtlas, sampler_DecalAtlas, texCoords, sliceIndex))) * 0.5f + 0.5f;
src.xyz = mul(decalToWorld, UnpackNormalmapRGorAG(SAMPLE_TEXTURE2D_ARRAY_LOD(_DecalAtlas, sampler_DecalAtlas, texCoords, sliceIndex, 0 /* ComputeTextureLOD(texCoords) */))) * 0.5f + 0.5f;
src.w = blend;
dst.xyz = src.xyz * src.w + dst.xyz * (1.0f - src.w);
dst.w = dst.w * (1.0f - src.w);

void ApplyBlendDiffuse(inout float4 dst, inout int matMask, float2 texCoords, int sliceIndex, int mapMask, float blend)
{
float4 src = SAMPLE_TEXTURE2D_ARRAY(_DecalAtlas, sampler_DecalAtlas, texCoords, sliceIndex);
float4 src = SAMPLE_TEXTURE2D_ARRAY_LOD(_DecalAtlas, sampler_DecalAtlas, texCoords, sliceIndex, 0 /* ComputeTextureLOD(texCoords) */);
src.w *= blend;
dst.xyz = src.xyz * src.w + dst.xyz * (1.0f - src.w);
dst.w = dst.w * (1.0f - src.w);

void ApplyBlendMask(inout float4 dst, inout int matMask, float2 texCoords, int sliceIndex, int mapMask, float blend)
{
float4 src = SAMPLE_TEXTURE2D_ARRAY(_DecalAtlas, sampler_DecalAtlas, texCoords, sliceIndex);
float4 src = SAMPLE_TEXTURE2D_ARRAY_LOD(_DecalAtlas, sampler_DecalAtlas, texCoords, sliceIndex, 0 /* ComputeTextureLOD(texCoords) */);
src.z = src.w;
src.w = blend;
dst.xyz = src.xyz * src.w + dst.xyz * (1.0f - src.w);

{
if(_EnableDBuffer)
{
DecalSurfaceData decalSurfaceData;
DecalSurfaceData decalSurfaceData;
int mask = 0;
// the code in the macros, gets moved inside the conditionals by the compiler
FETCH_DBUFFER(DBuffer, _DBufferTexture, posInput.positionSS);

decalStart = 0;
#endif
float3 positionWS = GetAbsolutePositionWS(posInput.positionWS);
uint i = 0;
uint i = 0;
DecalData decalData = FetchDecal(decalStart, i);
DecalData decalData = FetchDecal(decalStart, i);
float3 positionDS = mul(decalData.worldToDecal, float4(positionWS, 1.0)).xyz;
positionDS = positionDS * float3(1.0, -1.0, 1.0) + float3(0.5, 0.0f, 0.5);
float decalBlend = decalData.normalToWorld[0][3];

}
}
#else
mask = UnpackByte(LOAD_TEXTURE2D(_DecalHTileTexture, posInput.positionSS / 8));
mask = UnpackByte(LOAD_TEXTURE2D(_DecalHTileTexture, posInput.positionSS / 8).r);
DECODE_FROM_DBUFFER(DBuffer, decalSurfaceData);
DECODE_FROM_DBUFFER(DBuffer, decalSurfaceData);
if(mask & DBUFFERHTILEBIT_NORMAL)
{
surfaceData.normalWS.xyz = normalize(surfaceData.normalWS.xyz * decalSurfaceData.normalWS.w + decalSurfaceData.normalWS.xyz);

21
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/GBufferManager.cs


using UnityEngine.Rendering;
using UnityEngine.Rendering.PostProcessing;
namespace UnityEngine.Experimental.Rendering.HDPipeline
{

bool m_EnableShadowMask = false;
bool m_SupportShadowMask = false;
RenderPipelineMaterial m_DeferredMaterial;
protected RenderTargetIdentifier[] m_RTIDsNoShadowMask;

m_DeferredMaterial = deferredMaterial;
m_GBufferCount = deferredMaterial.GetMaterialGBufferCount();
m_EnableShadowMask = enableBakeShadowMask;
m_SupportShadowMask = enableBakeShadowMask;
m_RTIDsNoShadowMask = new RenderTargetIdentifier[m_GBufferCount];
}

m_RTIDsNoShadowMask[gbufferIndex] = HDShaderIDs._GBufferTexture[gbufferIndex];
}
if (m_EnableShadowMask)
if (m_SupportShadowMask)
}
}
public override void BindBufferAsTextures(CommandBuffer cmd)
{
for (int i = 0; i < m_BufferCount; ++i)
{
cmd.SetGlobalTexture(m_TextureShaderIDs[i], m_RTs[i]);
}
// When shadow mask are disabled (i.e we haven't created shadow mask texture, bind a white texture).
if (!m_SupportShadowMask)
{
cmd.SetGlobalTexture(HDShaderIDs._ShadowMaskTexture, RuntimeUtilities.whiteTexture);
}
}

1
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/GGXConvolution/GGXConvolve.shader


{
SubShader
{
Tags{ "RenderPipeline" = "HDRenderPipeline" }
Pass
{
Cull Off

7
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/GGXConvolution/RuntimeFilterIBL.cs


m_GgxIblSampleData.enableRandomWrite = true;
m_GgxIblSampleData.filterMode = FilterMode.Point;
m_GgxIblSampleData.name = CoreUtils.GetRenderTargetAutoName(m_GgxIblMaxSampleCount, k_GgxIblMipCountMinusOne, RenderTextureFormat.ARGBHalf, "GGXIblSampleData");
m_GgxIblSampleData.hideFlags = HideFlags.HideAndDontSave;
m_GgxIblSampleData.Create();
m_ComputeGgxIblSampleDataCS.SetTexture(m_ComputeGgxIblSampleDataKernel, "output", m_GgxIblSampleData);

var lookAt = Matrix4x4.LookAt(Vector3.zero, CoreUtils.lookAtList[i], CoreUtils.upVectorList[i]);
m_faceWorldToViewMatrixMatrices[i] = lookAt * Matrix4x4.Scale(new Vector3(1.0f, 1.0f, -1.0f)); // Need to scale -1.0 on Z to match what is being done in the camera.wolrdToCameraMatrix API. ...
}
}
public void Cleanup()
{
CoreUtils.Destroy(m_GgxConvolveMaterial);
CoreUtils.Destroy(m_GgxIblSampleData);
}
void FilterCubemapCommon( CommandBuffer cmd,

49
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/LayeredLit/LayeredLit.shader


[HideInInspector] _ZWrite ("__zw", Float) = 1.0
[HideInInspector] _CullMode("__cullmode", Float) = 2.0
[HideInInspector] _ZTestDepthEqualForOpaque("_ZTestDepthEqualForOpaque", Int) = 4 // Less equal
[HideInInspector] _ZTestGBuffer("_ZTestGBuffer", Int) = 4
[ToggleUI] _EnableFogOnTransparent("Enable Fog", Float) = 1.0
[ToggleUI] _EnableBlendModePreserveSpecularLighting("Enable Blend Mode Preserve Specular Lighting", Float) = 1.0

SubShader
{
// This tags allow to use the shader replacement features
Tags{ "RenderType" = "HDLitShader" }
Tags{ "RenderPipeline" = "HDRenderPipeline" "RenderType" = "HDLitShader" }
// Caution: The outline selection in the editor use the vertex shader/hull/domain shader of the first pass declare. So it should not bethe meta pass.
Pass

Cull [_CullMode]
ZTest[_ZTestGBuffer]
Stencil
{

#pragma multi_compile _ DYNAMICLIGHTMAP_ON
#pragma multi_compile _ SHADOWS_SHADOWMASK
#define SHADERPASS SHADERPASS_GBUFFER
#include "../../ShaderVariables.hlsl"
#ifdef DEBUG_DISPLAY
#include "../../Debug/DebugDisplay.hlsl"
#endif
#include "../../Material/Material.hlsl"
#include "../Lit/ShaderPass/LitSharePass.hlsl"
#include "LayeredLitData.hlsl"
#include "../../ShaderPass/ShaderPassGBuffer.hlsl"
ENDHLSL
}
// This pass is the same as GBuffer only it does not do alpha test (the clip instruction is removed)
// This is due to the fact that on GCN, any shader with a clip instruction cannot benefit from HiZ so when we do a prepass, in order to get the most performance, we need to make a special case in the subsequent GBuffer pass.
Pass
{
Name "GBufferWithPrepass" // Name is not used
Tags { "LightMode" = "GBufferWithPrepass" } // This will be only for opaque object based on the RenderQueue index
Cull [_CullMode]
Stencil
{
WriteMask [_StencilWriteMask]
Ref [_StencilRef]
Comp Always
Pass Replace
}
HLSLPROGRAM
#pragma multi_compile _ DEBUG_DISPLAY
#pragma multi_compile _ LIGHTMAP_ON
#pragma multi_compile _ DIRLIGHTMAP_COMBINED
#pragma multi_compile _ DYNAMICLIGHTMAP_ON
#pragma multi_compile _ SHADOWS_SHADOWMASK
#ifdef _ALPHATEST_ON
// When we have alpha test, we will force a depth prepass so we always bypass the clip instruction in the GBuffer
#define SHADERPASS_GBUFFER_BYPASS_ALPHA_TEST
#endif
#define SHADERPASS_GBUFFER_BYPASS_ALPHA_TEST
#include "../../ShaderVariables.hlsl"
#ifdef DEBUG_DISPLAY
#include "../../Debug/DebugDisplay.hlsl"

#pragma multi_compile _ DYNAMICLIGHTMAP_ON
#pragma multi_compile _ SHADOWS_SHADOWMASK
// #include "../../Lighting/Forward.hlsl"
#pragma multi_compile LIGHTLOOP_SINGLE_PASS LIGHTLOOP_TILE_PASS
//#pragma multi_compile LIGHTLOOP_SINGLE_PASS LIGHTLOOP_TILE_PASS
#define LIGHTLOOP_TILE_PASS
#pragma multi_compile USE_FPTL_LIGHTLIST USE_CLUSTERED_LIGHTLIST
#define SHADERPASS SHADERPASS_FORWARD

52
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/LayeredLit/LayeredLitTessellation.shader


[HideInInspector] _ZWrite ("__zw", Float) = 1.0
[HideInInspector] _CullMode("__cullmode", Float) = 2.0
[HideInInspector] _ZTestDepthEqualForOpaque("_ZTestDepthEqualForOpaque", Int) = 4 // Less equal
[HideInInspector] _ZTestGBuffer("_ZTestGBuffer", Int) = 4
[ToggleUI] _EnableFogOnTransparent("Enable Fog", Float) = 1.0
[ToggleUI] _EnableBlendModePreserveSpecularLighting("Enable Blend Mode Preserve Specular Lighting", Float) = 1.0

SubShader
{
// This tags allow to use the shader replacement features
Tags{ "RenderType" = "HDLitShader" }
Tags{ "RenderPipeline" = "HDRenderPipeline" "RenderType" = "HDLitShader" }
// Caution: The outline selection in the editor use the vertex shader/hull/domain shader of the first pass declare. So it should not bethe meta pass.
Pass

Cull [_CullMode]
ZTest[_ZTestGBuffer]
Stencil
{

#pragma multi_compile _ DYNAMICLIGHTMAP_ON
#pragma multi_compile _ SHADOWS_SHADOWMASK
#define SHADERPASS SHADERPASS_GBUFFER
#include "../../ShaderVariables.hlsl"
#ifdef DEBUG_DISPLAY
#include "../../Debug/DebugDisplay.hlsl"
#endif
#include "../../Material/Material.hlsl"
#include "../Lit/ShaderPass/LitSharePass.hlsl"
#include "LayeredLitData.hlsl"
#include "../../ShaderPass/ShaderPassGBuffer.hlsl"
ENDHLSL
}
// This pass is the same as GBuffer only it does not do alpha test (the clip instruction is removed)
// This is due to the fact that on GCN, any shader with a clip instruction cannot benefit from HiZ so when we do a prepass, in order to get the most performance, we need to make a special case in the subsequent GBuffer pass.
Pass
{
Name "GBufferWithPrepass" // Name is not used
Tags { "LightMode" = "GBufferWithPrepass" } // This will be only for opaque object based on the RenderQueue index
Cull [_CullMode]
Stencil
{
WriteMask [_StencilWriteMask]
Ref [_StencilRef]
Comp Always
Pass Replace
}
HLSLPROGRAM
#pragma hull Hull
#pragma domain Domain
#pragma multi_compile _ DEBUG_DISPLAY
#pragma multi_compile _ LIGHTMAP_ON
#pragma multi_compile _ DIRLIGHTMAP_COMBINED
#pragma multi_compile _ DYNAMICLIGHTMAP_ON
#pragma multi_compile _ SHADOWS_SHADOWMASK
#ifdef _ALPHATEST_ON
// When we have alpha test, we will force a depth prepass so we always bypass the clip instruction in the GBuffer
#define SHADERPASS_GBUFFER_BYPASS_ALPHA_TEST
#endif
#define SHADERPASS_GBUFFER_BYPASS_ALPHA_TEST
#include "../../ShaderVariables.hlsl"
#ifdef DEBUG_DISPLAY
#include "../../Debug/DebugDisplay.hlsl"

#pragma multi_compile _ DYNAMICLIGHTMAP_ON
#pragma multi_compile _ SHADOWS_SHADOWMASK
// #include "../../Lighting/Forward.hlsl"
#pragma multi_compile LIGHTLOOP_SINGLE_PASS LIGHTLOOP_TILE_PASS
//#pragma multi_compile LIGHTLOOP_SINGLE_PASS LIGHTLOOP_TILE_PASS
#define LIGHTLOOP_TILE_PASS
#pragma multi_compile USE_FPTL_LIGHTLIST USE_CLUSTERED_LIGHTLIST
#define SHADERPASS SHADERPASS_FORWARD

5
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/Lit.cs


{
hideFlags = HideFlags.HideAndDontSave,
wrapMode = TextureWrapMode.Clamp,
filterMode = FilterMode.Bilinear
filterMode = FilterMode.Bilinear,
name = CoreUtils.GetTextureAutoName(k_LtcLUTResolution, k_LtcLUTResolution, TextureFormat.RGBAHalf, depth: 3, dim: TextureDimension.Tex2DArray, name: "LTC_LUT")
};
LoadLUT(m_LtcData, 0, TextureFormat.RGBAHalf, s_LtcGGXMatrixData);

public override void Cleanup()
{
CoreUtils.Destroy(m_InitPreFGD);
CoreUtils.Destroy(m_PreIntegratedFGD);
CoreUtils.Destroy(m_LtcData);
// TODO: how to delete RenderTexture ? or do we need to do it ?
m_isInit = false;

191
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/Lit.hlsl


// Additional bits set in 'bsdfData.materialFeatures' to save registers and simplify feature tracking.
#define MATERIAL_FEATURE_FLAGS_SSS_OUTPUT_SPLIT_LIGHTING ((MATERIAL_FEATURE_MASK_FLAGS + 1) << 0)
#define MATERIAL_FEATURE_FLAGS_SSS_TEXTURING_MODE_OFFSET FastLog2((MATERIAL_FEATURE_MASK_FLAGS + 1) << 1) // 2 bits
#define MATERIAL_FEATURE_FLAGS_TRANSMISSION_MODE_AUTO_THICKNESS ((MATERIAL_FEATURE_MASK_FLAGS + 1) << 3)
#define MATERIAL_FEATURE_FLAGS_TRANSMISSION_MODE_MIXED_THICKNESS ((MATERIAL_FEATURE_MASK_FLAGS + 1) << 3)
uint FeatureFlagsToTileVariant(uint featureFlags)
{

// the current object. That's not a problem, since large thickness will result in low intensity.
bool useThinObjectMode = IsBitSet(asuint(_TransmissionFlags), diffusionProfile);
bsdfData.materialFeatures |= useThinObjectMode ? 0 : MATERIAL_FEATURE_FLAGS_TRANSMISSION_MODE_AUTO_THICKNESS;
bsdfData.materialFeatures |= useThinObjectMode ? 0 : MATERIAL_FEATURE_FLAGS_TRANSMISSION_MODE_MIXED_THICKNESS;
// Compute transmittance using baked thickness here. It may be overridden for direct lighting
// in the auto-thickness mode (but is always be used for indirect lighting).

// Decompress feature-agnostic data from the G-Buffer.
float3 baseColor = inGBuffer0.rgb;
bsdfData.specularOcclusion = inGBuffer0.a; // Later possibly overwritten by SSS
bsdfData.perceptualRoughness = inGBuffer1.a;
bsdfData.perceptualRoughness = inGBuffer1.a;
bakeDiffuseLighting = inGBuffer3.rgb;

{
FillMaterialTransmission(sssData.diffusionProfile, inGBuffer2.g, bsdfData);
}
}
else
{
bsdfData.specularOcclusion = inGBuffer0.a;
}
// Special handling for anisotropy: When anisotropy is present in a tile, the whole tile will use anisotropy to avoid divergent evaluation of GGX that increase the cost

// bake lighting function
//-----------------------------------------------------------------------------
// GetBakedDiffuseLigthing function compute the bake lighting + emissive color to be store in emissive buffer (Deferred case)
// GetBakedDiffuseLighting function compute the bake lighting + emissive color to be store in emissive buffer (Deferred case)
float3 GetBakedDiffuseLigthing(SurfaceData surfaceData, BuiltinData builtinData, BSDFData bsdfData, PreLightData preLightData)
float3 GetBakedDiffuseLighting(SurfaceData surfaceData, BuiltinData builtinData, BSDFData bsdfData, PreLightData preLightData)
{
if (HasFeatureFlag(bsdfData.materialFeatures, MATERIALFEATUREFLAGS_LIT_SUBSURFACE_SCATTERING))
{

// - we integrate the diffuse reflectance profile w.r.t. the radius (while also accounting
// for the thickness) to compute the transmittance;
// - we multiply the transmitted radiance by the transmittance.
float3 EvaluateTransmission(BSDFData bsdfData, float3 transmittance, float NdotL, float NdotV, float attenuation)
float3 EvaluateTransmission(BSDFData bsdfData, float3 transmittance, float NdotL, float NdotV, float LdotV, float attenuation)
// Apply wrapped lighting to better handle thin objects at grazing angles.
float negatedNdotL = -NdotL;
// Apply wrapped lighting to better handle thin objects (cards) at grazing angles.
bool autoThicknessMode = HasFeatureFlag(bsdfData.materialFeatures, MATERIAL_FEATURE_FLAGS_TRANSMISSION_MODE_AUTO_THICKNESS);
float backNdotL = autoThicknessMode ? negatedNdotL : wrappedNdotL;
// Apply BSDF-specific diffuse transmission to attenuation. See also: [SSS-NOTE-TRSM]
// We don't multiply by 'bsdfData.diffuseColor' here. It's done only once in PostEvaluateBSDF().

attenuation *= INV_PI * F_Transm_Schlick(0, 0.5, NdotV) * F_Transm_Schlick(0, 0.5, abs(backNdotL));
attenuation *= DisneyDiffuse(NdotV, max(0, -NdotL), LdotV, bsdfData.perceptualRoughness);
float intensity = max(0, attenuation * backNdotL); // Warning: attenuation can be greater than 1 due to the inverse square attenuation (when position is close to light)
float intensity = attenuation * wrappedNdotL;
return intensity * transmittance;
}

float3 N = bsdfData.normalWS;
float3 L = -lightData.forward; // Lights point backward in Unity
float NdotL = dot(N, L); // Note: Ideally this N here should be vertex normal - use for transmisison
float3 transmittance = bsdfData.transmittance;
bool autoThicknessMode = HasFeatureFlag(bsdfData.materialFeatures, MATERIAL_FEATURE_FLAGS_TRANSMISSION_MODE_AUTO_THICKNESS);
UNITY_BRANCH
if (autoThicknessMode && NdotL < 0 && lightData.shadowIndex >= 0)
{
// TODO: perform bilinear filtering of the shadow map.
// Recompute transmittance using the thickness value computed from the shadow map.
#if 0
// Does not work, I get a compiler crash...
float3 occluderPosWS = EvalShadow_GetClosestSample_Cascade(lightLoopContext.shadowContext, posInput.positionWS, bsdfData.normalWS, lightData.shadowIndex, float4(L, 0));
#else
#define SHADOW_DISPATCH_DIR_TEX 3 // Manually keep it in sync with Shadow.hlsl...
float3 occluderPosWS = EvalShadow_GetClosestSample_Cascade(lightLoopContext.shadowContext, lightLoopContext.shadowContext.tex2DArray[SHADOW_DISPATCH_DIR_TEX], posInput.positionWS, bsdfData.normalWS, lightData.shadowIndex, float4(L, 0));
#endif
float thicknessInUnits = distance(posInput.positionWS, occluderPosWS);
float thicknessInMeters = thicknessInUnits * _WorldScales[bsdfData.diffusionProfile].x;
float thicknessInMillimeters = thicknessInMeters * MILLIMETERS_PER_METER;
// TODO: optimize.
#if SHADEROPTIONS_USE_DISNEY_SSS
transmittance = ComputeTransmittanceDisney(_ShapeParams[bsdfData.diffusionProfile].rgb,
_TransmissionTintsAndFresnel0[bsdfData.diffusionProfile].rgb,
thicknessInMillimeters);
#else
transmittance = ComputeTransmittanceJimenez(_HalfRcpVariancesAndWeights[bsdfData.diffusionProfile][0].rgb,
_HalfRcpVariancesAndWeights[bsdfData.diffusionProfile][0].a,
_HalfRcpVariancesAndWeights[bsdfData.diffusionProfile][1].rgb,
_HalfRcpVariancesAndWeights[bsdfData.diffusionProfile][1].a,
_TransmissionTintsAndFresnel0[bsdfData.diffusionProfile].rgb,
thicknessInMillimeters);
#endif
// Make sure we do not sample the shadow map twice.
lightData.shadowIndex = -1;
// Note: we do not modify the distance to the light, or the light angle for the back face.
// This is a performance-saving optimization which makes sense as long as the thickness is small.
}
float NdotV = ClampNdotV(preLightData.NdotV);
float NdotL = dot(N, L);
float LdotV = dot(L, V);
float3 color;
float attenuation;

lighting.specular *= intensity * lightData.specularScale;
}
// TODO: move this before BSDF() to save VGPRs.
if (HasFeatureFlag(bsdfData.materialFeatures, MATERIALFEATUREFLAGS_LIT_TRANSMISSION))
// The mixed thickness mode is not supported by directional lights due to poor quality and high performance impact.
bool mixedThicknessMode = HasFeatureFlag(bsdfData.materialFeatures, MATERIAL_FEATURE_FLAGS_TRANSMISSION_MODE_MIXED_THICKNESS);
if (HasFeatureFlag(bsdfData.materialFeatures, MATERIALFEATUREFLAGS_LIT_TRANSMISSION) && !mixedThicknessMode)
lighting.diffuse += EvaluateTransmission(bsdfData, transmittance, NdotL, ClampNdotV(preLightData.NdotV), attenuation * lightData.diffuseScale);
lighting.diffuse += EvaluateTransmission(bsdfData, bsdfData.transmittance, NdotL, NdotV, LdotV, attenuation * lightData.diffuseScale);
}
// Save ALU by applying light and cookie colors only once.

}
float3 N = bsdfData.normalWS;
float NdotV = ClampNdotV(preLightData.NdotV);
float LdotV = dot(L, V);
float3 transmittance = bsdfData.transmittance;
bool autoThicknessMode = HasFeatureFlag(bsdfData.materialFeatures, MATERIAL_FEATURE_FLAGS_TRANSMISSION_MODE_AUTO_THICKNESS);
bool mixedThicknessMode = HasFeatureFlag(bsdfData.materialFeatures, MATERIAL_FEATURE_FLAGS_TRANSMISSION_MODE_MIXED_THICKNESS)
&& NdotL < 0 && lightData.shadowIndex >= 0;
// Save the original version for the transmission code below.
int originalShadowIndex = lightData.shadowIndex;
UNITY_BRANCH
if (autoThicknessMode && NdotL < 0 && lightData.shadowIndex >= 0)
if (mixedThicknessMode)
// TODO: perform bilinear filtering of the shadow map.
// Recompute transmittance using the thickness value computed from the shadow map.
#if 0
// Does not work, I get a compiler crash...
float3 occluderPosWS = EvalShadow_GetClosestSample_Punctual(lightLoopContext.shadowContext, posInput.positionWS, lightData.shadowIndex, L);
#else
#define SHADOW_DISPATCH_PUNC_TEX 3 // Manually keep it in sync with Shadow.hlsl...
float3 occluderPosWS = EvalShadow_GetClosestSample_Punctual(lightLoopContext.shadowContext, lightLoopContext.shadowContext.tex2DArray[SHADOW_DISPATCH_PUNC_TEX], posInput.positionWS, lightData.shadowIndex, L);
#endif
float thicknessInUnits = distance(posInput.positionWS, occluderPosWS);
float thicknessInMeters = thicknessInUnits * _WorldScales[bsdfData.diffusionProfile].x;
float thicknessInMillimeters = thicknessInMeters * MILLIMETERS_PER_METER;
// TODO: optimize.
#if SHADEROPTIONS_USE_DISNEY_SSS
transmittance = ComputeTransmittanceDisney(_ShapeParams[bsdfData.diffusionProfile].rgb,
_TransmissionTintsAndFresnel0[bsdfData.diffusionProfile].rgb,
thicknessInMillimeters);
#else
transmittance = ComputeTransmittanceJimenez(_HalfRcpVariancesAndWeights[bsdfData.diffusionProfile][0].rgb,
_HalfRcpVariancesAndWeights[bsdfData.diffusionProfile][0].a,
_HalfRcpVariancesAndWeights[bsdfData.diffusionProfile][1].rgb,
_HalfRcpVariancesAndWeights[bsdfData.diffusionProfile][1].a,
_TransmissionTintsAndFresnel0[bsdfData.diffusionProfile].rgb,
thicknessInMillimeters);
#endif
// Note: we do not modify the distance to the light, or the light angle for the back face.
// This is a performance-saving optimization which makes sense as long as the thickness is small.
}
float3 color;

// Restore the original shadow index.
lightData.shadowIndex = originalShadowIndex;
float intensity = max(0, attenuation * NdotL); // Warning: attenuation can be greater than 1 due to the inverse square attenuation (when position is close to light)

lighting.specular *= intensity * lightData.specularScale;
}
// TODO: move this before BSDF() to save VGPRs.
float3 transmittance = bsdfData.transmittance;
if (mixedThicknessMode)
{
// Recompute transmittance using the thickness value computed from the shadow map.
// Compute the distance from the light to the back face of the object along the light direction.
float distBackFaceToLight = GetPunctualShadowClosestDistance(lightLoopContext.shadowContext, s_linear_clamp_sampler,
posInput.positionWS, lightData.shadowIndex, L, lightData.positionWS);
// Our subsurface scattering models use the semi-infinite planar slab assumption.
// Therefore, we need to find the thickness along the normal.
float distFrontFaceToLight = distances.x;
float thicknessInUnits = (distFrontFaceToLight - distBackFaceToLight) * -NdotL;
float thicknessInMeters = thicknessInUnits * _WorldScales[bsdfData.diffusionProfile].x;
float thicknessInMillimeters = thicknessInMeters * MILLIMETERS_PER_METER;
#if SHADEROPTIONS_USE_DISNEY_SSS
// We need to make sure it's not less than the baked thickness to minimize light leaking.
float thicknessDelta = max(0, thicknessInMillimeters - bsdfData.thickness);
float3 S = _ShapeParams[bsdfData.diffusionProfile];
// Approximate the decrease of transmittance by e^(-1/3 * dt * S).
#if 0
float3 expOneThird = exp(((-1.0 / 3.0) * thicknessDelta) * S);
#else
// Help the compiler.
float k = (-1.0 / 3.0) * LOG2_E;
float3 p = (k * thicknessDelta) * S;
float3 expOneThird = exp2(p);
#endif
transmittance *= expOneThird;
#else // SHADEROPTIONS_USE_DISNEY_SSS
// We need to make sure it's not less than the baked thickness to minimize light leaking.
thicknessInMillimeters = max(thicknessInMillimeters, bsdfData.thickness);
transmittance = ComputeTransmittanceJimenez(_HalfRcpVariancesAndWeights[bsdfData.diffusionProfile][0].rgb,
_HalfRcpVariancesAndWeights[bsdfData.diffusionProfile][0].a,
_HalfRcpVariancesAndWeights[bsdfData.diffusionProfile][1].rgb,
_HalfRcpVariancesAndWeights[bsdfData.diffusionProfile][1].a,
_TransmissionTintsAndFresnel0[bsdfData.diffusionProfile].rgb,
thicknessInMillimeters);
#endif // SHADEROPTIONS_USE_DISNEY_SSS
}
// Note: we do not modify the distance to the light, or the light angle for the back face.
// This is a performance-saving optimization which makes sense as long as the thickness is small.
lighting.diffuse += EvaluateTransmission(bsdfData, transmittance, NdotL, ClampNdotV(preLightData.NdotV), attenuation * lightData.diffuseScale);
lighting.diffuse += EvaluateTransmission(bsdfData, transmittance, NdotL, NdotV, LdotV, attenuation * lightData.diffuseScale);
}
// Save ALU by applying light and cookie colors only once.

{
projectionDistance = IntersectSphereProxy(lightData, dirPS, positionPS);
// We can reuse dist calculate in LS directly in WS as there is no scaling. Also the offset is already include in lightData.capturePositionWS
float3 capturePositionWS = GetCapturePositionWS(lightData);
float3 capturePositionWS = lightData.capturePositionWS;
R = (positionWS + projectionDistance * R) - capturePositionWS;
// Test again for clear coat

projectionDistance = IntersectBoxProxy(lightData, dirPS, positionPS);
// No need to normalize for fetching cubemap
// We can reuse dist calculate in LS directly in WS as there is no scaling. Also the offset is already include in lightData.capturePositionWS
float3 capturePositionWS = GetCapturePositionWS(lightData);
float3 capturePositionWS = lightData.capturePositionWS;
R = (positionWS + projectionDistance * R) - capturePositionWS;
// TODO: add distance based roughness

float roughness = PerceptualRoughnessToRoughness(preLightData.iblPerceptualRoughness);
R = lerp(R, preLightData.iblR, saturate(smoothstep(0, 1, roughness * roughness)));
float3 sampleDirectionDiscardWS = GetSampleDirectionDiscardWS(lightData);
float3 sampleDirectionDiscardWS = lightData.sampleDirectionDiscardWS;
if (dot(sampleDirectionDiscardWS, R) < 0)
return lighting;

#endif // LIT_DISPLAY_REFERENCE_IBL
weight *= lightData.weight;
envLighting *= weight * lightData.dimmer;
envLighting *= weight * lightData.multiplier;
if (GPUImageBasedLightingType == GPUIMAGEBASEDLIGHTINGTYPE_REFLECTION)
lighting.specularReflected = envLighting;

float3 modifiedDiffuseColor = ApplySubsurfaceScatteringTexturingMode(texturingMode, bsdfData.diffuseColor);
// Apply the albedo to the direct diffuse lighting (only once). The indirect (baked)
// diffuse lighting has already had the albedo applied in GetBakedDiffuseLigthing().
// diffuse lighting has already had the albedo applied in GetBakedDiffuseLighting().
diffuseLighting = modifiedDiffuseColor * lighting.direct.diffuse + bakeDiffuseLighting;
// If refraction is enable we use the transmittanceMask to lerp between current diffuse lighting and refraction value

52
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/Lit.shader


[HideInInspector] _CullModeForward("__cullmodeForward", Float) = 2.0 // This mode is dedicated to Forward to correctly handle backface then front face rendering thin transparent
[HideInInspector] _ZTestDepthEqualForOpaque("_ZTestDepthEqualForOpaque", Int) = 4 // Less equal
[HideInInspector] _ZTestModeDistortion("_ZTestModeDistortion", Int) = 8
[HideInInspector] _ZTestGBuffer("_ZTestGBuffer", Int) = 4
[ToggleUI] _EnableFogOnTransparent("Enable Fog", Float) = 1.0
[ToggleUI] _EnableBlendModePreserveSpecularLighting("Enable Blend Mode Preserve Specular Lighting", Float) = 1.0

SubShader
{
// This tags allow to use the shader replacement features
Tags{ "RenderType" = "HDLitShader" }
Tags{ "RenderPipeline"="HDRenderPipeline" "RenderType" = "HDLitShader" }
// Caution: The outline selection in the editor use the vertex shader/hull/domain shader of the first pass declare. So it should not bethe meta pass.
Pass

Cull [_CullMode]
ZTest [_ZTestGBuffer]
Stencil
{

#pragma multi_compile _ DYNAMICLIGHTMAP_ON
#pragma multi_compile _ SHADOWS_SHADOWMASK
#define SHADERPASS SHADERPASS_GBUFFER
#include "../../ShaderVariables.hlsl"
#ifdef DEBUG_DISPLAY
#include "../../Debug/DebugDisplay.hlsl"
#endif
#include "../../Material/Material.hlsl"
#include "ShaderPass/LitSharePass.hlsl"
#include "LitData.hlsl"
#include "../../ShaderPass/ShaderPassGBuffer.hlsl"
ENDHLSL
}
// This pass is the same as GBuffer only it does not do alpha test (the clip instruction is removed)
// This is due to the fact that on GCN, any shader with a clip instruction cannot benefit from HiZ so when we do a prepass, in order to get the most performance, we need to make a special case in the subsequent GBuffer pass.
Pass
{
Name "GBufferWithPrepass" // Name is not used
Tags { "LightMode" = "GBufferWithPrepass" } // This will be only for opaque object based on the RenderQueue index
Cull [_CullMode]
Stencil
{
WriteMask [_StencilWriteMask]
Ref [_StencilRef]
Comp Always
Pass Replace
}
HLSLPROGRAM
#pragma multi_compile _ DEBUG_DISPLAY
#pragma multi_compile _ LIGHTMAP_ON
#pragma multi_compile _ DIRLIGHTMAP_COMBINED
#pragma multi_compile _ DYNAMICLIGHTMAP_ON
#pragma multi_compile _ SHADOWS_SHADOWMASK
#ifdef _ALPHATEST_ON
// When we have alpha test, we will force a depth prepass so we always bypass the clip instruction in the GBuffer
#define SHADERPASS_GBUFFER_BYPASS_ALPHA_TEST
#endif
#define SHADERPASS_GBUFFER_BYPASS_ALPHA_TEST // This define allow to not perform the alpha test (alpha test is done during depth prepass)
#include "../../ShaderVariables.hlsl"
#ifdef DEBUG_DISPLAY
#include "../../Debug/DebugDisplay.hlsl"

#pragma multi_compile _ DYNAMICLIGHTMAP_ON
#pragma multi_compile _ SHADOWS_SHADOWMASK
// #include "../../Lighting/Forward.hlsl"
#pragma multi_compile LIGHTLOOP_SINGLE_PASS LIGHTLOOP_TILE_PASS
//#pragma multi_compile LIGHTLOOP_SINGLE_PASS LIGHTLOOP_TILE_PASS
#define LIGHTLOOP_TILE_PASS
#pragma multi_compile USE_FPTL_LIGHTLIST USE_CLUSTERED_LIGHTLIST
#define SHADERPASS SHADERPASS_FORWARD

#pragma multi_compile _ DYNAMICLIGHTMAP_ON
#pragma multi_compile _ SHADOWS_SHADOWMASK
// #include "../../Lighting/Forward.hlsl"
#pragma multi_compile LIGHTLOOP_SINGLE_PASS LIGHTLOOP_TILE_PASS
//#pragma multi_compile LIGHTLOOP_SINGLE_PASS LIGHTLOOP_TILE_PASS
#define LIGHTLOOP_TILE_PASS
#pragma multi_compile USE_FPTL_LIGHTLIST USE_CLUSTERED_LIGHTLIST
#define SHADERPASS SHADERPASS_FORWARD

55
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/LitTessellation.shader


[HideInInspector] _CullModeForward("__cullmodeForward", Float) = 2.0 // This mode is dedicated to Forward to correctly handle backface then front face rendering thin transparent
[HideInInspector] _ZTestDepthEqualForOpaque("_ZTestDepthEqualForOpaque", Int) = 4 // Less equal
[HideInInspector] _ZTestModeDistortion("_ZTestModeDistortion", Int) = 8
[HideInInspector] _ZTestGBuffer("_ZTestGBuffer", Int) = 4
[ToggleUI] _EnableFogOnTransparent("Enable Fog", Float) = 1.0
[ToggleUI] _EnableBlendModePreserveSpecularLighting("Enable Blend Mode Preserve Specular Lighting", Float) = 1.0

SubShader
{
// This tags allow to use the shader replacement features
Tags{ "RenderType" = "HDLitShader" }
Tags{ "RenderPipeline" = "HDRenderPipeline" "RenderType" = "HDLitShader" }
// Caution: The outline selection in the editor use the vertex shader/hull/domain shader of the first pass declare. So it should not bethe meta pass.
Pass

Cull [_CullMode]
ZTest[_ZTestGBuffer]
Stencil
{

#pragma multi_compile _ DYNAMICLIGHTMAP_ON
#pragma multi_compile _ SHADOWS_SHADOWMASK
#define SHADERPASS SHADERPASS_GBUFFER
#include "../../ShaderVariables.hlsl"
#ifdef DEBUG_DISPLAY
#include "../../Debug/DebugDisplay.hlsl"
#endif
#include "../../Material/Material.hlsl"
#include "ShaderPass/LitSharePass.hlsl"
#include "LitData.hlsl"
#include "../../ShaderPass/ShaderPassGBuffer.hlsl"
ENDHLSL
}
// This pass is the same as GBuffer only it does not do alpha test (the clip instruction is removed)
// This is due to the fact that on GCN, any shader with a clip instruction cannot benefit from HiZ so when we do a prepass, in order to get the most performance, we need to make a special case in the subsequent GBuffer pass.
Pass
{
Name "GBufferWithPrepass" // Name is not used
Tags { "LightMode" = "GBufferWithPrepass" } // This will be only for opaque object based on the RenderQueue index
Cull [_CullMode]
Stencil
{
WriteMask [_StencilWriteMask]
Ref [_StencilRef]
Comp Always
Pass Replace
}
HLSLPROGRAM
#pragma hull Hull
#pragma domain Domain
#pragma multi_compile _ DEBUG_DISPLAY
#pragma multi_compile _ LIGHTMAP_ON
#pragma multi_compile _ DIRLIGHTMAP_COMBINED
#pragma multi_compile _ DYNAMICLIGHTMAP_ON
#pragma multi_compile _ SHADOWS_SHADOWMASK
#ifdef _ALPHATEST_ON
// When we have alpha test, we will force a depth prepass so we always bypass the clip instruction in the GBuffer
#define SHADERPASS_GBUFFER_BYPASS_ALPHA_TEST
#endif
#define SHADERPASS_GBUFFER_BYPASS_ALPHA_TEST // This define allow to not perform the alpha test (alpha test is done during depth prepass)
#include "../../ShaderVariables.hlsl"
#ifdef DEBUG_DISPLAY
#include "../../Debug/DebugDisplay.hlsl"

#pragma multi_compile _ DYNAMICLIGHTMAP_ON
#pragma multi_compile _ SHADOWS_SHADOWMASK
// #include "../../Lighting/Forward.hlsl"
#pragma multi_compile LIGHTLOOP_SINGLE_PASS LIGHTLOOP_TILE_PASS
//#pragma multi_compile LIGHTLOOP_SINGLE_PASS LIGHTLOOP_TILE_PASS
#define LIGHTLOOP_TILE_PASS
#pragma multi_compile USE_FPTL_LIGHTLIST USE_CLUSTERED_LIGHTLIST
#define SHADERPASS SHADERPASS_FORWARD

#pragma multi_compile _ DYNAMICLIGHTMAP_ON
#pragma multi_compile _ SHADOWS_SHADOWMASK
// #include "../../Lighting/Forward.hlsl"
#pragma multi_compile LIGHTLOOP_SINGLE_PASS LIGHTLOOP_TILE_PASS
//#pragma multi_compile LIGHTLOOP_SINGLE_PASS LIGHTLOOP_TILE_PASS
#define LIGHTLOOP_TILE_PASS
#pragma multi_compile USE_FPTL_LIGHTLIST USE_CLUSTERED_LIGHTLIST
#define SHADERPASS SHADERPASS_FORWARD

部分文件因为文件数量过多而无法显示

正在加载...
取消
保存