浏览代码

Merge pull request #749 from Unity-Technologies/feature/mip-debug

Added mip debug visualisation
/main
GitHub 7 年前
当前提交
ed6465fd
共有 20 个文件被更改,包括 697 次插入340 次删除
  1. 33
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Common.hlsl
  2. 178
      ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Debug.hlsl
  3. 53
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugDisplay.cs
  4. 2
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugDisplay.hlsl
  5. 5
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/MaterialDebug.cs
  6. 1
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/MaterialDebug.cs.hlsl
  7. 7
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/HDRenderPipeline.cs
  8. 1
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/HDStringConstants.cs
  9. 8
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/LayeredLit/LayeredLitData.hlsl
  10. 5
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/Lit.hlsl
  11. 588
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/LitData.hlsl
  12. 5
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/LitProperties.hlsl
  13. 7
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Unlit/UnlitData.hlsl
  14. 70
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Unlit/UnlitProperties.hlsl
  15. 1
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/ShaderPass/FragInputs.hlsl
  16. 3
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/ShaderPass/ShaderPassForward.hlsl
  17. 32
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/MipMapDebug.cs
  18. 18
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/MipMapDebug.cs.hlsl
  19. 9
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/MipMapDebug.cs.hlsl.meta
  20. 11
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/MipMapDebug.cs.meta

33
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Common.hlsl


// The reason is that for compute shader we need to guarantee that the layout of CBs is consistent across kernels. Something that we can't control with the global namespace (uniforms get optimized out if not used, modifying the global CBuffer layout per kernel)
// Structure definition that are share between C# and hlsl.
// These structures need to be align on float4 to respect various packing rules from shader language. This mean that these structure need to be padded.
// Rules: When doing an array for constant buffer variables, we always use float4 to avoid any packing issue, particularly between compute shader and pixel shaders
// i.e don't use SetGlobalFloatArray or SetComputeFloatParams
// The array can be alias in hlsl. Exemple:
// uniform float4 packedArray[3];
// These structures need to be align on float4 to respect various packing rules from shader language. This mean that these structure need to be padded.
// Rules: When doing an array for constant buffer variables, we always use float4 to avoid any packing issue, particularly between compute shader and pixel shaders
// i.e don't use SetGlobalFloatArray or SetComputeFloatParams
// The array can be alias in hlsl. Exemple:
// uniform float4 packedArray[3];
// static float unpackedArray[12] = (float[12])packedArray;
// The function of the shader library are stateless, no uniform decalare in it.

uv *= texelSize;
return ComputeTextureLOD(uv);
}
uint GetMipCount(Texture2D tex)
{
#if defined(SHADER_API_D3D11) || defined(SHADER_API_D3D12) || defined(SHADER_API_D3D11_9X) || defined(SHADER_API_XBOXONE) || defined(SHADER_API_PSSL)
#define MIP_COUNT_SUPPORTED 1
#endif
#if (defined(SHADER_API_OPENGL) || defined(SHADER_API_VULKAN)) && !defined(SHADER_STAGE_COMPUTE)
// OpenGL only supports textureSize for width, height, depth
// textureQueryLevels (GL_ARB_texture_query_levels) needs OpenGL 4.3 or above and doesn't compile in compute shaders
// tex.GetDimensions converted to textureQueryLevels
#define MIP_COUNT_SUPPORTED 1
#endif
// Metal doesn't support high enough OpenGL version
#if defined(MIP_COUNT_SUPPORTED)
uint width, height, depth, mipCount;
width = height = depth = mipCount = 0;
tex.GetDimensions(width, height, depth, mipCount);
return mipCount;
#else
return 0;
#endif
}
// ----------------------------------------------------------------------------

178
ScriptableRenderPipeline/Core/CoreRP/ShaderLibrary/Debug.hlsl


if (number <= 9)
{
return SampleDebugFont(pixCoord - int2(6, 0), number);
}
else
{

float4 GetStreamingMipColor(uint mipCount, float4 mipInfo)
{
// alpha is amount to blend with source color (0.0 = use original, 1.0 = use new color)
// mipInfo :
// x = quality setings minStreamingMipLevel
// y = original mip count for texture
// z = desired on screen mip level
// w = 0
uint originalTextureMipCount = uint(mipInfo.y);
// If material/shader mip info (original mip level) has not been set its not a streamed texture
if (originalTextureMipCount == 0)
return float4(1.0, 1.0, 1.0, 0.0);
uint desiredMipLevel = uint(mipInfo.z);
uint mipCountDesired = uint(originalTextureMipCount)-uint(desiredMipLevel);
if (mipCount == 0)
{
// Magenta if mip count invalid
return float4(1.0, 0.0, 1.0, 1.0);
}
else if (mipCount < mipCountDesired)
{
// red tones when not at the desired mip level (reduction due to budget). Brighter is further from original, alpha 0 when at desired
float ratioToDesired = float(mipCount) / float(mipCountDesired);
return float4(1.0, 0.0, 0.0, 1.0 - ratioToDesired);
}
else if (mipCount >= originalTextureMipCount)
{
// original color when at (or beyond) original mip count
return float4(1.0, 1.0, 1.0, 0.0);
}
else
{
// green tones when not at the original mip level. Brighter is closer to original, alpha 0 when at original
float ratioToOriginal = float(mipCount) / float(originalTextureMipCount);
return float4(0.0, 1.0, 0.0, 1.0 - ratioToOriginal);
}
}
float4 GetSimpleMipCountColor(uint mipCount)
{
// Grey scale for mip counts where mip count of 12 = white
float mipCountColor = float(mipCount) / 12.0;
float4 color = float4(mipCountColor, mipCountColor, mipCountColor, 1.0f);
// alpha is amount to blend with source color (0.0 = use original, 1.0 = use new color)
// Magenta is no valid mip count
// Original colour if greater than 12
return mipCount==0 ? float4(1.0, 0.0, 1.0, 1.0) : (mipCount > 12 ? float4(1.0, 1.0, 1.0, 0.0) : color );
}
float4 GetMipLevelColor(float2 uv, float4 texelSize)
{
// Push down into colors list to "optimal level" in following table.
// .zw is texture width,height so *2 is down one mip, *4 is down two mips
texelSize.zw *= 4.0;
float mipLevel = ComputeTextureLOD(uv, texelSize.wz);
mipLevel = clamp(mipLevel, 0.0, 5.0 - 0.0001);
float4 colors[6] = {
float4(0.0, 0.0, 1.0, 0.8), // 0 BLUE = too little texture detail
float4(0.0, 0.5, 1.0, 0.4), // 1
float4(1.0, 1.0, 1.0, 0.0), // 2 = optimal level
float4(1.0, 0.7, 0.0, 0.2), // 3 (YELLOW tint)
float4(1.0, 0.3, 0.0, 0.6), // 4 (clamped mipLevel 4.9999)
float4(1.0, 0.0, 0.0, 0.8) // 5 RED = too much texture detail (max blended value)
};
int mipLevelInt = floor(mipLevel);
float t = frac(mipLevel);
float4 a = colors[mipLevelInt];
float4 b = colors[mipLevelInt + 1];
float4 color = lerp(a, b, t);
return color;
}
float3 GetDebugMipColor(float3 originalColor, Texture2D tex, float4 texelSize, float2 uv)
{
// https://aras-p.info/blog/2011/05/03/a-way-to-visualize-mip-levels/
float4 mipColor= GetMipLevelColor(uv, texelSize);
return lerp(originalColor, mipColor.rgb, mipColor.a);
}
float3 GetDebugMipCountColor(float3 originalColor, Texture2D tex)
{
uint mipCount = GetMipCount(tex);
float4 mipColor = GetSimpleMipCountColor(mipCount);
return lerp(originalColor, mipColor.rgb, mipColor.a);
}
float3 GetDebugStreamingMipColor(Texture2D tex, float4 mipInfo)
{
uint mipCount = GetMipCount(tex);
return GetStreamingMipColor(mipCount, mipInfo).xyz;
}
float3 GetDebugStreamingMipColorBlended(float3 originalColor, Texture2D tex, float4 mipInfo)
{
uint mipCount = GetMipCount(tex);
float4 mipColor = GetStreamingMipColor(mipCount, mipInfo);
return lerp(originalColor, mipColor.rgb, mipColor.a);
}
float3 GetDebugMipColorIncludingMipReduction(float3 originalColor, Texture2D tex, float4 texelSize, float2 uv, float4 mipInfo)
{
uint originalTextureMipCount = uint(mipInfo.y);
if (originalTextureMipCount != 0)
{
// mipInfo :
// x = quality setings minStreamingMipLevel
// y = original mip count for texture
// z = desired on screen mip level
// w = 0
// Mip count has been reduced but the texelSize was not updated to take that into account
uint mipCount = GetMipCount(tex);
uint mipReductionLevel = originalTextureMipCount - mipCount;
uint mipReductionFactor = 1 << mipReductionLevel;
if (mipReductionFactor)
{
float oneOverMipReductionFactor = 1.0 / mipReductionFactor;
// texelSize.xy *= mipReductionRatio; // Unused in GetDebugMipColor so lets not re-calculate it
texelSize.zw *= oneOverMipReductionFactor;
}
}
return GetDebugMipColor(originalColor, tex, texelSize, uv);
}
float3 GetDebugMipReductionColor(Texture2D tex, float4 mipInfo)
{
uint originalTextureMipCount = uint(mipInfo.y);
if (originalTextureMipCount != 0)
{
// mipInfo :
// x = quality setings minStreamingMipLevel
// y = original mip count for texture
// z = desired on screen mip level
// w = 0
// Mip count has been reduced but the texelSize was not updated to take that into account
uint mipCount = GetMipCount(tex);
uint mipReductionLevel = originalTextureMipCount - mipCount;
float mipCol = float(mipReductionLevel) / 12.0;
return float3(0, mipCol, 0);
}
// Can't calculate without original mip count - return magenta
return float3(1.0, 0.0, 1.0);
}
#ifdef DEBUG_DISPLAY
float3 GetTextureDataDebug(uint paramId, float2 uv, Texture2D tex, float4 texelSize, float4 mipInfo, float3 originalColor)
{
switch (paramId)
{
case DEBUGMIPMAPMODE_MIP_RATIO:
return GetDebugMipColorIncludingMipReduction(originalColor, tex, texelSize, uv, mipInfo);
case DEBUGMIPMAPMODE_MIP_COUNT:
return GetDebugMipCountColor(originalColor, tex);
case DEBUGMIPMAPMODE_MIP_COUNT_REDUCTION:
return GetDebugMipReductionColor(tex, mipInfo);
case DEBUGMIPMAPMODE_STREAMING_MIP_BUDGET:
return GetDebugStreamingMipColor(tex, mipInfo);
case DEBUGMIPMAPMODE_STREAMING_MIP:
return GetDebugStreamingMipColorBlended(originalColor, tex, mipInfo);
}
return originalColor;
}
#endif // DEBUG_DISPLAY
#endif // UNITY_DEBUG_INCLUDED

53
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugDisplay.cs


using System.Collections.Generic;
using System.Collections.Generic;
using UnityEngine;
using System;

public static string kSkyReflectionMipmapDebug = "Sky Reflection Mipmap";
public static string kTileClusterCategoryDebug = "Tile/Cluster Debug By Category";
public static string kTileClusterDebug = "Tile/Cluster Debug";
public static string kMipMapDebugMode = "Mip Map Debug Mode";
public float debugOverlayRatio = 0.33f;

public MaterialDebugSettings materialDebugSettings = new MaterialDebugSettings();
public LightingDebugSettings lightingDebugSettings = new LightingDebugSettings();
public MipMapDebugSettings mipMapDebugSettings = new MipMapDebugSettings();
public static GUIContent[] lightingFullScreenDebugStrings = null;
public static int[] lightingFullScreenDebugValues = null;

return lightingDebugSettings.debugLightingMode;
}
public DebugMipMapMode GetDebugMipMapMode()
{
return mipMapDebugSettings.debugMipMapMode;
}
return materialDebugSettings.IsDebugDisplayEnabled() || lightingDebugSettings.IsDebugDisplayEnabled();
return materialDebugSettings.IsDebugDisplayEnabled() || lightingDebugSettings.IsDebugDisplayEnabled() || mipMapDebugSettings.IsDebugDisplayEnabled();
}
public bool IsDebugMaterialDisplayEnabled()

public bool IsDebugMipMapDisplayEnabled()
{
return mipMapDebugSettings.IsDebugDisplayEnabled();
}
private void DisableNonMaterialDebugSettings()
{
lightingDebugSettings.debugLightingMode = DebugLightingMode.None;
mipMapDebugSettings.debugMipMapMode = DebugMipMapMode.None;
}
lightingDebugSettings.debugLightingMode = DebugLightingMode.None;
DisableNonMaterialDebugSettings();
materialDebugSettings.SetDebugViewMaterial(value);
}

lightingDebugSettings.debugLightingMode = DebugLightingMode.None;
DisableNonMaterialDebugSettings();
materialDebugSettings.SetDebugViewEngine(value);
}

lightingDebugSettings.debugLightingMode = DebugLightingMode.None;
DisableNonMaterialDebugSettings();
materialDebugSettings.SetDebugViewVarying(value);
}

lightingDebugSettings.debugLightingMode = DebugLightingMode.None;
DisableNonMaterialDebugSettings();
materialDebugSettings.SetDebugViewProperties(value);
}

lightingDebugSettings.debugLightingMode = DebugLightingMode.None;
DisableNonMaterialDebugSettings();
if(value != 0)
if (value != 0)
{
mipMapDebugSettings.debugMipMapMode = DebugMipMapMode.None;
}
public void SetMipMapMode(DebugMipMapMode value)
{
if (value != 0)
{
materialDebugSettings.DisableMaterialDebug();
lightingDebugSettings.debugLightingMode = DebugLightingMode.None;
}
mipMapDebugSettings.debugMipMapMode = value;
}
public void UpdateMaterials()
{
//if (mipMapDebugSettings.debugMipMapMode != 0)
// Texture.SetStreamingTextureMaterialDebugProperties();
}
public void RegisterDebug()
{
DebugMenuManager.instance.AddDebugItem<float>("Display Stats", "Frame Rate", () => 1.0f / Time.smoothDeltaTime, null, DebugItemFlag.DynamicDisplay);

DebugMenuManager.instance.AddDebugItem<LightingDebugPanel, LightLoop.TileClusterCategoryDebug>(kTileClusterCategoryDebug,() => lightingDebugSettings.tileClusterDebugByCategory, (value) => lightingDebugSettings.tileClusterDebugByCategory = (LightLoop.TileClusterCategoryDebug)value);
DebugMenuManager.instance.AddDebugItem<int>("Rendering", kFullScreenDebugMode, () => (int)fullScreenDebugMode, (value) => fullScreenDebugMode = (FullScreenDebugMode)value, DebugItemFlag.None, new DebugItemHandlerIntEnum(DebugDisplaySettings.renderingFullScreenDebugStrings, DebugDisplaySettings.renderingFullScreenDebugValues));
DebugMenuManager.instance.AddDebugItem<DebugMipMapMode>("Rendering", "MipMaps", () => mipMapDebugSettings.debugMipMapMode, (value) => SetMipMapMode((DebugMipMapMode)value));
}
public void OnValidate()

2
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/DebugDisplay.hlsl


#include "DebugDisplay.cs.hlsl"
#include "MaterialDebug.cs.hlsl"
#include "LightingDebug.cs.hlsl"
#include "MipMapDebug.cs.hlsl"
int _DebugMipMapMode; // Match enum DebugMipMapMode
float4 _DebugLightingAlbedo; // xyz = albedo for diffuse, w unused
float4 _DebugLightingSmoothness; // x == bool override, y == override value

5
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/MaterialDebug.cs


using System.Collections.Generic;
using System.Collections.Generic;
using UnityEngine;
using System;

TessellationDisplacement,
DepthOffset,
Lightmap,
Last,
}
}

public static int[] debugViewMaterialVaryingValues = null;
public static GUIContent[] debugViewMaterialPropertiesStrings = null;
public static int[] debugViewMaterialPropertiesValues = null;
public static GUIContent[] debugViewMaterialTextureStrings = null;
public static int[] debugViewMaterialTextureValues = null;
public static GUIContent[] debugViewMaterialGBufferStrings = null;
public static int[] debugViewMaterialGBufferValues = null;

1
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/MaterialDebug.cs.hlsl


#define DEBUGVIEWPROPERTIES_TESSELLATION_DISPLACEMENT (19)
#define DEBUGVIEWPROPERTIES_DEPTH_OFFSET (20)
#define DEBUGVIEWPROPERTIES_LIGHTMAP (21)
#define DEBUGVIEWPROPERTIES_LAST (22)
#endif

7
ScriptableRenderPipeline/HDRenderPipeline/HDRP/HDRenderPipeline.cs


Resize(hdCamera);
if (m_CurrentDebugDisplaySettings.IsDebugDisplayEnabled())
{
m_CurrentDebugDisplaySettings.UpdateMaterials();
}
renderContext.SetupCameraProperties(camera);
PushGlobalParams(hdCamera, cmd, diffusionProfileSettings);

UpdateSkyEnvironment(hdCamera, cmd);
RenderPyramidDepth(camera, cmd, renderContext, FullScreenDebugMode.DepthPyramid);
if (m_CurrentDebugDisplaySettings.IsDebugMaterialDisplayEnabled())
{

cmd.SetGlobalInt(HDShaderIDs._DebugViewMaterial, (int)m_CurrentDebugDisplaySettings.GetDebugMaterialIndex());
cmd.SetGlobalInt(HDShaderIDs._DebugLightingMode, (int)m_CurrentDebugDisplaySettings.GetDebugLightingMode());
cmd.SetGlobalInt(HDShaderIDs._DebugMipMapMode, (int)m_CurrentDebugDisplaySettings.GetDebugMipMapMode());
cmd.SetGlobalVector(HDShaderIDs._DebugLightingAlbedo, debugAlbedo);
cmd.SetGlobalVector(HDShaderIDs._DebugLightingSmoothness, debugSmoothness);
}

1
ScriptableRenderPipeline/HDRenderPipeline/HDRP/HDStringConstants.cs


public static readonly int _DebugLightingAlbedo = Shader.PropertyToID("_DebugLightingAlbedo");
public static readonly int _DebugLightingSmoothness = Shader.PropertyToID("_DebugLightingSmoothness");
public static readonly int _AmbientOcclusionTexture = Shader.PropertyToID("_AmbientOcclusionTexture");
public static readonly int _DebugMipMapMode = Shader.PropertyToID("_DebugMipMapMode");
public static readonly int _UseTileLightList = Shader.PropertyToID("_UseTileLightList");
public static readonly int _Time = Shader.PropertyToID("_Time");

8
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/LayeredLit/LayeredLitData.hlsl


AddDecalContribution(posInput.positionSS, surfaceData);
#if defined(DEBUG_DISPLAY)
if (_DebugMipMapMode != DEBUGMIPMAPMODE_NONE)
{
surfaceData.baseColor = GetTextureDataDebug(_DebugMipMapMode, layerTexCoord.base0.uv, _BaseColorMap0, _BaseColorMap0_TexelSize, _BaseColorMap0_MipInfo, surfaceData.baseColor);
surfaceData.metallic = 0;
}
#endif
GetBuiltinData(input, surfaceData, alpha, bentNormalWS, depthOffset, builtinData);
}

5
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/Lit.hlsl


specularLighting = float3(0.0, 0.0, 0.0); // Disable specular lighting
}
#endif
else if (_DebugMipMapMode != DEBUGMIPMAPMODE_NONE)
{
diffuseLighting = bsdfData.diffuseColor;
specularLighting = float3(0.0, 0.0, 0.0); // Disable specular lighting
}
#endif
}

588
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/LitData.hlsl


//-------------------------------------------------------------------------------------
// Fill SurfaceData/Builtin data function
//-------------------------------------------------------------------------------------
#include "CoreRP/ShaderLibrary/Sampling/SampleUVMapping.hlsl"
#include "../MaterialUtilities.hlsl"
#include "../Decal/DecalUtilities.hlsl"
// TODO: move this function to commonLighting.hlsl once validated it work correctly
float GetSpecularOcclusionFromBentAO(float3 V, float3 bentNormalWS, SurfaceData surfaceData)
{
// Retrieve cone angle
// Ambient occlusion is cosine weighted, thus use following equation. See slide 129
float cosAv = sqrt(1.0 - surfaceData.ambientOcclusion);
float roughness = max(PerceptualSmoothnessToRoughness(surfaceData.perceptualSmoothness), 0.01); // Clamp to 0.01 to avoid edge cases
float cosAs = exp2((-log(10.0)/log(2.0)) * Sq(roughness));
float cosB = dot(bentNormalWS, reflect(-V, surfaceData.normalWS));
return SphericalCapIntersectionSolidArea(cosAv, cosAs, cosB) / (TWO_PI * (1.0 - cosAs));
}
void GetBuiltinData(FragInputs input, SurfaceData surfaceData, float alpha, float3 bentNormalWS, float depthOffset, out BuiltinData builtinData)
{
// Builtin Data
builtinData.opacity = alpha;
// TODO: Sample lightmap/lightprobe/volume proxy
// This should also handle projective lightmap
builtinData.bakeDiffuseLighting = SampleBakedGI(input.positionWS, bentNormalWS, input.texCoord1, input.texCoord2);
// It is safe to call this function here as surfaceData have been filled
// We want to know if we must enable transmission on GI for SSS material, if the material have no SSS, this code will be remove by the compiler.
BSDFData bsdfData = ConvertSurfaceDataToBSDFData(surfaceData);
if (HasMaterialFeatureFlag(bsdfData.materialFeatures, MATERIALFEATUREFLAGS_LIT_TRANSMISSION))
{
// For now simply recall the function with inverted normal, the compiler should be able to optimize the lightmap case to not resample the directional lightmap
// however it will not optimize the lightprobe case due to the proxy volume relying on dynamic if (we rely must get right of this dynamic if), not a problem for SH9, but a problem for proxy volume.
// TODO: optimize more this code.
// Add GI transmission contribution by resampling the GI for inverted vertex normal
builtinData.bakeDiffuseLighting += SampleBakedGI(input.positionWS, -input.worldToTangent[2], input.texCoord1, input.texCoord2) * bsdfData.transmittance;
}
#ifdef SHADOWS_SHADOWMASK
float4 shadowMask = SampleShadowMask(input.positionWS, input.texCoord1);
builtinData.shadowMask0 = shadowMask.x;
builtinData.shadowMask1 = shadowMask.y;
builtinData.shadowMask2 = shadowMask.z;
builtinData.shadowMask3 = shadowMask.w;
#else
builtinData.shadowMask0 = 0.0;
builtinData.shadowMask1 = 0.0;
builtinData.shadowMask2 = 0.0;
builtinData.shadowMask3 = 0.0;
#endif
// Emissive Intensity is only use here, but is part of BuiltinData to enforce UI parameters as we want the users to fill one color and one intensity
builtinData.emissiveIntensity = _EmissiveIntensity; // We still store intensity here so we can reuse it with debug code
builtinData.emissiveColor = _EmissiveColor * builtinData.emissiveIntensity * lerp(float3(1.0, 1.0, 1.0), surfaceData.baseColor.rgb, _AlbedoAffectEmissive);
#ifdef _EMISSIVE_COLOR_MAP
builtinData.emissiveColor *= SAMPLE_TEXTURE2D(_EmissiveColorMap, sampler_EmissiveColorMap, TRANSFORM_TEX(input.texCoord0, _EmissiveColorMap)).rgb;
#endif
builtinData.velocity = float2(0.0, 0.0);
#if (SHADERPASS == SHADERPASS_DISTORTION) || defined(DEBUG_DISPLAY)
float3 distortion = SAMPLE_TEXTURE2D(_DistortionVectorMap, sampler_DistortionVectorMap, input.texCoord0).rgb;
distortion.rg = distortion.rg * _DistortionVectorScale.xx + _DistortionVectorBias.xx;
builtinData.distortion = distortion.rg * _DistortionScale;
builtinData.distortionBlur = clamp(distortion.b * _DistortionBlurScale, 0.0, 1.0) * (_DistortionBlurRemapMax - _DistortionBlurRemapMin) + _DistortionBlurRemapMin;
#else
builtinData.distortion = float2(0.0, 0.0);
builtinData.distortionBlur = 0.0;
#endif
builtinData.depthOffset = depthOffset;
}
// Struct that gather UVMapping info of all layers + common calculation
// This is use to abstract the mapping that can differ on layers
struct LayerTexCoord
{
#ifndef LAYERED_LIT_SHADER
UVMapping base;
UVMapping details;
#else
// Regular texcoord
UVMapping base0;
UVMapping base1;
UVMapping base2;
UVMapping base3;
UVMapping details0;
UVMapping details1;
UVMapping details2;
UVMapping details3;
// Dedicated for blend mask
UVMapping blendMask;
#endif
// Store information that will be share by all UVMapping
float3 vertexNormalWS; // TODO: store also object normal map for object triplanar
float3 triplanarWeights;
#ifdef SURFACE_GRADIENT
// tangent basis for each UVSet - up to 4 for now
float3 vertexTangentWS0, vertexBitangentWS0;
float3 vertexTangentWS1, vertexBitangentWS1;
float3 vertexTangentWS2, vertexBitangentWS2;
float3 vertexTangentWS3, vertexBitangentWS3;
#endif
};
#ifdef SURFACE_GRADIENT
void GenerateLayerTexCoordBasisTB(FragInputs input, inout LayerTexCoord layerTexCoord)
{
float3 vertexNormalWS = input.worldToTangent[2];
layerTexCoord.vertexTangentWS0 = input.worldToTangent[0];
layerTexCoord.vertexBitangentWS0 = input.worldToTangent[1];
// TODO: We should use relative camera position here - This will be automatic when we will move to camera relative space.
float3 dPdx = ddx_fine(input.positionWS);
float3 dPdy = ddy_fine(input.positionWS);
float3 sigmaX = dPdx - dot(dPdx, vertexNormalWS) * vertexNormalWS;
float3 sigmaY = dPdy - dot(dPdy, vertexNormalWS) * vertexNormalWS;
//float flipSign = dot(sigmaY, cross(vertexNormalWS, sigmaX) ) ? -1.0 : 1.0;
float flipSign = dot(dPdy, cross(vertexNormalWS, dPdx)) < 0.0 ? -1.0 : 1.0; // gives same as the commented out line above
// TODO: Optimize! The compiler will not be able to remove the tangent space that are not use because it can't know due to our UVMapping constant we use for both base and details
// To solve this we should track which UVSet is use for normal mapping... Maybe not as simple as it sounds
SurfaceGradientGenBasisTB(vertexNormalWS, sigmaX, sigmaY, flipSign, input.texCoord1, layerTexCoord.vertexTangentWS1, layerTexCoord.vertexBitangentWS1);
#if defined(_REQUIRE_UV2) || defined(_REQUIRE_UV3)
SurfaceGradientGenBasisTB(vertexNormalWS, sigmaX, sigmaY, flipSign, input.texCoord2, layerTexCoord.vertexTangentWS2, layerTexCoord.vertexBitangentWS2);
#endif
#if defined(_REQUIRE_UV3)
SurfaceGradientGenBasisTB(vertexNormalWS, sigmaX, sigmaY, flipSign, input.texCoord3, layerTexCoord.vertexTangentWS3, layerTexCoord.vertexBitangentWS3);
#endif
}
#endif
#ifndef LAYERED_LIT_SHADER
// Want to use only one sampler for normalmap/bentnormalmap either we use OS or TS. And either we have normal map or bent normal or both.
#ifdef _NORMALMAP_TANGENT_SPACE
#if defined(_NORMALMAP)
#define SAMPLER_NORMALMAP_IDX sampler_NormalMap
#elif defined(_BENTNORMALMAP)
#define SAMPLER_NORMALMAP_IDX sampler_BentNormalMap
#endif
#else
#if defined(_NORMALMAP)
#define SAMPLER_NORMALMAP_IDX sampler_NormalMapOS
#elif defined(_BENTNORMALMAP)
#define SAMPLER_NORMALMAP_IDX sampler_BentNormalMapOS
#endif
#endif
#define SAMPLER_DETAILMAP_IDX sampler_DetailMap
#define SAMPLER_MASKMAP_IDX sampler_MaskMap
#define SAMPLER_HEIGHTMAP_IDX sampler_HeightMap
#define SAMPLER_SUBSURFACE_MASKMAP_IDX sampler_SubsurfaceMaskMap
#define SAMPLER_THICKNESSMAP_IDX sampler_ThicknessMap
// include LitDataIndividualLayer to define GetSurfaceData
#define LAYER_INDEX 0
#define ADD_IDX(Name) Name
#define ADD_ZERO_IDX(Name) Name
#ifdef _NORMALMAP
#define _NORMALMAP_IDX
#endif
#ifdef _NORMALMAP_TANGENT_SPACE
#define _NORMALMAP_TANGENT_SPACE_IDX
#endif
#ifdef _DETAIL_MAP
#define _DETAIL_MAP_IDX
#endif
#ifdef _SUBSURFACE_MASK_MAP
#define _SUBSURFACE_MASK_MAP_IDX
#endif
#ifdef _THICKNESSMAP
#define _THICKNESSMAP_IDX
#endif
#ifdef _MASKMAP
#define _MASKMAP_IDX
#endif
#ifdef _BENTNORMALMAP
#define _BENTNORMALMAP_IDX
#endif
#include "LitDataIndividualLayer.hlsl"
// This maybe call directly by tessellation (domain) shader, thus all part regarding surface gradient must be done
// in function with FragInputs input as parameters
// layerTexCoord must have been initialize to 0 outside of this function
void GetLayerTexCoord(float2 texCoord0, float2 texCoord1, float2 texCoord2, float2 texCoord3,
float3 positionWS, float3 vertexNormalWS, inout LayerTexCoord layerTexCoord)
{
layerTexCoord.vertexNormalWS = vertexNormalWS;
layerTexCoord.triplanarWeights = ComputeTriplanarWeights(vertexNormalWS);
int mappingType = UV_MAPPING_UVSET;
#if defined(_MAPPING_PLANAR)
mappingType = UV_MAPPING_PLANAR;
#elif defined(_MAPPING_TRIPLANAR)
mappingType = UV_MAPPING_TRIPLANAR;
#endif
// Be sure that the compiler is aware that we don't use UV1 to UV3 for main layer so it can optimize code
ComputeLayerTexCoord( texCoord0, texCoord1, texCoord2, texCoord3, float4(1.0, 0.0, 0.0, 0.0), _UVDetailsMappingMask,
_BaseColorMap_ST.xy, _BaseColorMap_ST.zw, _DetailMap_ST.xy, _DetailMap_ST.zw, 1.0, _LinkDetailsWithBase,
positionWS, _TexWorldScale,
mappingType, layerTexCoord);
}
// This is call only in this file
// layerTexCoord must have been initialize to 0 outside of this function
void GetLayerTexCoord(FragInputs input, inout LayerTexCoord layerTexCoord)
{
#ifdef SURFACE_GRADIENT
GenerateLayerTexCoordBasisTB(input, layerTexCoord);
#endif
GetLayerTexCoord( input.texCoord0, input.texCoord1, input.texCoord2, input.texCoord3,
input.positionWS, input.worldToTangent[2].xyz, layerTexCoord);
}
#include "LitDataDisplacement.hlsl"
void GetSurfaceAndBuiltinData(FragInputs input, float3 V, inout PositionInputs posInput, out SurfaceData surfaceData, out BuiltinData builtinData)
{
#ifdef LOD_FADE_CROSSFADE // enable dithering LOD transition if user select CrossFade transition in LOD group
LODDitheringTransition(posInput.positionSS, unity_LODFade.x);
#endif
ApplyDoubleSidedFlipOrMirror(input); // Apply double sided flip on the vertex normal
LayerTexCoord layerTexCoord;
ZERO_INITIALIZE(LayerTexCoord, layerTexCoord);
GetLayerTexCoord(input, layerTexCoord);
float depthOffset = ApplyPerPixelDisplacement(input, V, layerTexCoord);
#ifdef _DEPTHOFFSET_ON
ApplyDepthOffsetPositionInput(V, depthOffset, GetWorldToHClipMatrix(), posInput);
#endif
// We perform the conversion to world of the normalTS outside of the GetSurfaceData
// so it allow us to correctly deal with detail normal map and optimize the code for the layered shaders
float3 normalTS;
float3 bentNormalTS;
float3 bentNormalWS;
float alpha = GetSurfaceData(input, layerTexCoord, surfaceData, normalTS, bentNormalTS);
GetNormalWS(input, V, normalTS, surfaceData.normalWS);
// Ensure that the normal is front-facing.
float NdotV;
surfaceData.normalWS = GetViewReflectedNormal(surfaceData.normalWS, V, NdotV);
// Use bent normal to sample GI if available
#ifdef _BENTNORMALMAP
GetNormalWS(input, V, bentNormalTS, bentNormalWS);
#else
bentNormalWS = surfaceData.normalWS;
#endif
// By default we use the ambient occlusion with Tri-ace trick (apply outside) for specular occlusion.
// If user provide bent normal then we process a better term
#if defined(_BENTNORMALMAP) && defined(_ENABLESPECULAROCCLUSION)
// If we have bent normal and ambient occlusion, process a specular occlusion
surfaceData.specularOcclusion = GetSpecularOcclusionFromBentAO(V, bentNormalWS, surfaceData);
#elif defined(_MASKMAP)
surfaceData.specularOcclusion = GetSpecularOcclusionFromAmbientOcclusion(NdotV, surfaceData.ambientOcclusion, PerceptualSmoothnessToRoughness(surfaceData.perceptualSmoothness));
#else
surfaceData.specularOcclusion = 1.0;
#endif
// This is use with anisotropic material
surfaceData.tangentWS = Orthonormalize(surfaceData.tangentWS, surfaceData.normalWS);
AddDecalContribution(posInput.positionSS, surfaceData);
// Caution: surfaceData must be fully initialize before calling GetBuiltinData
GetBuiltinData(input, surfaceData, alpha, bentNormalWS, depthOffset, builtinData);
}
#include "LitDataMeshModification.hlsl"
#endif // #ifndef LAYERED_LIT_SHADER
//-------------------------------------------------------------------------------------
// Fill SurfaceData/Builtin data function
//-------------------------------------------------------------------------------------
#include "CoreRP/ShaderLibrary/Sampling/SampleUVMapping.hlsl"
#include "../MaterialUtilities.hlsl"
#include "../Decal/DecalUtilities.hlsl"
// TODO: move this function to commonLighting.hlsl once validated it work correctly
float GetSpecularOcclusionFromBentAO(float3 V, float3 bentNormalWS, SurfaceData surfaceData)
{
// Retrieve cone angle
// Ambient occlusion is cosine weighted, thus use following equation. See slide 129
float cosAv = sqrt(1.0 - surfaceData.ambientOcclusion);
float roughness = max(PerceptualSmoothnessToRoughness(surfaceData.perceptualSmoothness), 0.01); // Clamp to 0.01 to avoid edge cases
float cosAs = exp2((-log(10.0)/log(2.0)) * Sq(roughness));
float cosB = dot(bentNormalWS, reflect(-V, surfaceData.normalWS));
return SphericalCapIntersectionSolidArea(cosAv, cosAs, cosB) / (TWO_PI * (1.0 - cosAs));
}
void GetBuiltinData(FragInputs input, SurfaceData surfaceData, float alpha, float3 bentNormalWS, float depthOffset, out BuiltinData builtinData)
{
// Builtin Data
builtinData.opacity = alpha;
// TODO: Sample lightmap/lightprobe/volume proxy
// This should also handle projective lightmap
builtinData.bakeDiffuseLighting = SampleBakedGI(input.positionWS, bentNormalWS, input.texCoord1, input.texCoord2);
// It is safe to call this function here as surfaceData have been filled
// We want to know if we must enable transmission on GI for SSS material, if the material have no SSS, this code will be remove by the compiler.
BSDFData bsdfData = ConvertSurfaceDataToBSDFData(surfaceData);
if (HasMaterialFeatureFlag(bsdfData.materialFeatures, MATERIALFEATUREFLAGS_LIT_TRANSMISSION))
{
// For now simply recall the function with inverted normal, the compiler should be able to optimize the lightmap case to not resample the directional lightmap
// however it will not optimize the lightprobe case due to the proxy volume relying on dynamic if (we rely must get right of this dynamic if), not a problem for SH9, but a problem for proxy volume.
// TODO: optimize more this code.
// Add GI transmission contribution by resampling the GI for inverted vertex normal
builtinData.bakeDiffuseLighting += SampleBakedGI(input.positionWS, -input.worldToTangent[2], input.texCoord1, input.texCoord2) * bsdfData.transmittance;
}
#ifdef SHADOWS_SHADOWMASK
float4 shadowMask = SampleShadowMask(input.positionWS, input.texCoord1);
builtinData.shadowMask0 = shadowMask.x;
builtinData.shadowMask1 = shadowMask.y;
builtinData.shadowMask2 = shadowMask.z;
builtinData.shadowMask3 = shadowMask.w;
#else
builtinData.shadowMask0 = 0.0;
builtinData.shadowMask1 = 0.0;
builtinData.shadowMask2 = 0.0;
builtinData.shadowMask3 = 0.0;
#endif
// Emissive Intensity is only use here, but is part of BuiltinData to enforce UI parameters as we want the users to fill one color and one intensity
builtinData.emissiveIntensity = _EmissiveIntensity; // We still store intensity here so we can reuse it with debug code
builtinData.emissiveColor = _EmissiveColor * builtinData.emissiveIntensity * lerp(float3(1.0, 1.0, 1.0), surfaceData.baseColor.rgb, _AlbedoAffectEmissive);
#ifdef _EMISSIVE_COLOR_MAP
builtinData.emissiveColor *= SAMPLE_TEXTURE2D(_EmissiveColorMap, sampler_EmissiveColorMap, TRANSFORM_TEX(input.texCoord0, _EmissiveColorMap)).rgb;
#endif
builtinData.velocity = float2(0.0, 0.0);
#if (SHADERPASS == SHADERPASS_DISTORTION) || defined(DEBUG_DISPLAY)
float3 distortion = SAMPLE_TEXTURE2D(_DistortionVectorMap, sampler_DistortionVectorMap, input.texCoord0).rgb;
distortion.rg = distortion.rg * _DistortionVectorScale.xx + _DistortionVectorBias.xx;
builtinData.distortion = distortion.rg * _DistortionScale;
builtinData.distortionBlur = clamp(distortion.b * _DistortionBlurScale, 0.0, 1.0) * (_DistortionBlurRemapMax - _DistortionBlurRemapMin) + _DistortionBlurRemapMin;
#else
builtinData.distortion = float2(0.0, 0.0);
builtinData.distortionBlur = 0.0;
#endif
builtinData.depthOffset = depthOffset;
}
// Struct that gather UVMapping info of all layers + common calculation
// This is use to abstract the mapping that can differ on layers
struct LayerTexCoord
{
#ifndef LAYERED_LIT_SHADER
UVMapping base;
UVMapping details;
#else
// Regular texcoord
UVMapping base0;
UVMapping base1;
UVMapping base2;
UVMapping base3;
UVMapping details0;
UVMapping details1;
UVMapping details2;
UVMapping details3;
// Dedicated for blend mask
UVMapping blendMask;
#endif
// Store information that will be share by all UVMapping
float3 vertexNormalWS; // TODO: store also object normal map for object triplanar
float3 triplanarWeights;
#ifdef SURFACE_GRADIENT
// tangent basis for each UVSet - up to 4 for now
float3 vertexTangentWS0, vertexBitangentWS0;
float3 vertexTangentWS1, vertexBitangentWS1;
float3 vertexTangentWS2, vertexBitangentWS2;
float3 vertexTangentWS3, vertexBitangentWS3;
#endif
};
#ifdef SURFACE_GRADIENT
void GenerateLayerTexCoordBasisTB(FragInputs input, inout LayerTexCoord layerTexCoord)
{
float3 vertexNormalWS = input.worldToTangent[2];
layerTexCoord.vertexTangentWS0 = input.worldToTangent[0];
layerTexCoord.vertexBitangentWS0 = input.worldToTangent[1];
// TODO: We should use relative camera position here - This will be automatic when we will move to camera relative space.
float3 dPdx = ddx_fine(input.positionWS);
float3 dPdy = ddy_fine(input.positionWS);
float3 sigmaX = dPdx - dot(dPdx, vertexNormalWS) * vertexNormalWS;
float3 sigmaY = dPdy - dot(dPdy, vertexNormalWS) * vertexNormalWS;
//float flipSign = dot(sigmaY, cross(vertexNormalWS, sigmaX) ) ? -1.0 : 1.0;
float flipSign = dot(dPdy, cross(vertexNormalWS, dPdx)) < 0.0 ? -1.0 : 1.0; // gives same as the commented out line above
// TODO: Optimize! The compiler will not be able to remove the tangent space that are not use because it can't know due to our UVMapping constant we use for both base and details
// To solve this we should track which UVSet is use for normal mapping... Maybe not as simple as it sounds
SurfaceGradientGenBasisTB(vertexNormalWS, sigmaX, sigmaY, flipSign, input.texCoord1, layerTexCoord.vertexTangentWS1, layerTexCoord.vertexBitangentWS1);
#if defined(_REQUIRE_UV2) || defined(_REQUIRE_UV3)
SurfaceGradientGenBasisTB(vertexNormalWS, sigmaX, sigmaY, flipSign, input.texCoord2, layerTexCoord.vertexTangentWS2, layerTexCoord.vertexBitangentWS2);
#endif
#if defined(_REQUIRE_UV3)
SurfaceGradientGenBasisTB(vertexNormalWS, sigmaX, sigmaY, flipSign, input.texCoord3, layerTexCoord.vertexTangentWS3, layerTexCoord.vertexBitangentWS3);
#endif
}
#endif
#ifndef LAYERED_LIT_SHADER
// Want to use only one sampler for normalmap/bentnormalmap either we use OS or TS. And either we have normal map or bent normal or both.
#ifdef _NORMALMAP_TANGENT_SPACE
#if defined(_NORMALMAP)
#define SAMPLER_NORMALMAP_IDX sampler_NormalMap
#elif defined(_BENTNORMALMAP)
#define SAMPLER_NORMALMAP_IDX sampler_BentNormalMap
#endif
#else
#if defined(_NORMALMAP)
#define SAMPLER_NORMALMAP_IDX sampler_NormalMapOS
#elif defined(_BENTNORMALMAP)
#define SAMPLER_NORMALMAP_IDX sampler_BentNormalMapOS
#endif
#endif
#define SAMPLER_DETAILMAP_IDX sampler_DetailMap
#define SAMPLER_MASKMAP_IDX sampler_MaskMap
#define SAMPLER_HEIGHTMAP_IDX sampler_HeightMap
#define SAMPLER_SUBSURFACE_MASKMAP_IDX sampler_SubsurfaceMaskMap
#define SAMPLER_THICKNESSMAP_IDX sampler_ThicknessMap
// include LitDataIndividualLayer to define GetSurfaceData
#define LAYER_INDEX 0
#define ADD_IDX(Name) Name
#define ADD_ZERO_IDX(Name) Name
#ifdef _NORMALMAP
#define _NORMALMAP_IDX
#endif
#ifdef _NORMALMAP_TANGENT_SPACE
#define _NORMALMAP_TANGENT_SPACE_IDX
#endif
#ifdef _DETAIL_MAP
#define _DETAIL_MAP_IDX
#endif
#ifdef _SUBSURFACE_MASK_MAP
#define _SUBSURFACE_MASK_MAP_IDX
#endif
#ifdef _THICKNESSMAP
#define _THICKNESSMAP_IDX
#endif
#ifdef _MASKMAP
#define _MASKMAP_IDX
#endif
#ifdef _BENTNORMALMAP
#define _BENTNORMALMAP_IDX
#endif
#include "LitDataIndividualLayer.hlsl"
// This maybe call directly by tessellation (domain) shader, thus all part regarding surface gradient must be done
// in function with FragInputs input as parameters
// layerTexCoord must have been initialize to 0 outside of this function
void GetLayerTexCoord(float2 texCoord0, float2 texCoord1, float2 texCoord2, float2 texCoord3,
float3 positionWS, float3 vertexNormalWS, inout LayerTexCoord layerTexCoord)
{
layerTexCoord.vertexNormalWS = vertexNormalWS;
layerTexCoord.triplanarWeights = ComputeTriplanarWeights(vertexNormalWS);
int mappingType = UV_MAPPING_UVSET;
#if defined(_MAPPING_PLANAR)
mappingType = UV_MAPPING_PLANAR;
#elif defined(_MAPPING_TRIPLANAR)
mappingType = UV_MAPPING_TRIPLANAR;
#endif
// Be sure that the compiler is aware that we don't use UV1 to UV3 for main layer so it can optimize code
ComputeLayerTexCoord( texCoord0, texCoord1, texCoord2, texCoord3, float4(1.0, 0.0, 0.0, 0.0), _UVDetailsMappingMask,
_BaseColorMap_ST.xy, _BaseColorMap_ST.zw, _DetailMap_ST.xy, _DetailMap_ST.zw, 1.0, _LinkDetailsWithBase,
positionWS, _TexWorldScale,
mappingType, layerTexCoord);
}
// This is call only in this file
// layerTexCoord must have been initialize to 0 outside of this function
void GetLayerTexCoord(FragInputs input, inout LayerTexCoord layerTexCoord)
{
#ifdef SURFACE_GRADIENT
GenerateLayerTexCoordBasisTB(input, layerTexCoord);
#endif
GetLayerTexCoord( input.texCoord0, input.texCoord1, input.texCoord2, input.texCoord3,
input.positionWS, input.worldToTangent[2].xyz, layerTexCoord);
}
#include "LitDataDisplacement.hlsl"
void GetSurfaceAndBuiltinData(FragInputs input, float3 V, inout PositionInputs posInput, out SurfaceData surfaceData, out BuiltinData builtinData)
{
#ifdef LOD_FADE_CROSSFADE // enable dithering LOD transition if user select CrossFade transition in LOD group
LODDitheringTransition(posInput.positionSS, unity_LODFade.x);
#endif
ApplyDoubleSidedFlipOrMirror(input); // Apply double sided flip on the vertex normal
LayerTexCoord layerTexCoord;
ZERO_INITIALIZE(LayerTexCoord, layerTexCoord);
GetLayerTexCoord(input, layerTexCoord);
float depthOffset = ApplyPerPixelDisplacement(input, V, layerTexCoord);
#ifdef _DEPTHOFFSET_ON
ApplyDepthOffsetPositionInput(V, depthOffset, GetWorldToHClipMatrix(), posInput);
#endif
// We perform the conversion to world of the normalTS outside of the GetSurfaceData
// so it allow us to correctly deal with detail normal map and optimize the code for the layered shaders
float3 normalTS;
float3 bentNormalTS;
float3 bentNormalWS;
float alpha = GetSurfaceData(input, layerTexCoord, surfaceData, normalTS, bentNormalTS);
GetNormalWS(input, V, normalTS, surfaceData.normalWS);
// Ensure that the normal is front-facing.
float NdotV;
surfaceData.normalWS = GetViewReflectedNormal(surfaceData.normalWS, V, NdotV);
// Use bent normal to sample GI if available
#ifdef _BENTNORMALMAP
GetNormalWS(input, V, bentNormalTS, bentNormalWS);
#else
bentNormalWS = surfaceData.normalWS;
#endif
// By default we use the ambient occlusion with Tri-ace trick (apply outside) for specular occlusion.
// If user provide bent normal then we process a better term
#if defined(_BENTNORMALMAP) && defined(_ENABLESPECULAROCCLUSION)
// If we have bent normal and ambient occlusion, process a specular occlusion
surfaceData.specularOcclusion = GetSpecularOcclusionFromBentAO(V, bentNormalWS, surfaceData);
#elif defined(_MASKMAP)
surfaceData.specularOcclusion = GetSpecularOcclusionFromAmbientOcclusion(NdotV, surfaceData.ambientOcclusion, PerceptualSmoothnessToRoughness(surfaceData.perceptualSmoothness));
#else
surfaceData.specularOcclusion = 1.0;
#endif
// This is use with anisotropic material
surfaceData.tangentWS = Orthonormalize(surfaceData.tangentWS, surfaceData.normalWS);
AddDecalContribution(posInput.positionSS, surfaceData);
#if defined(DEBUG_DISPLAY)
if (_DebugMipMapMode != DEBUGMIPMAPMODE_NONE)
{
surfaceData.baseColor = GetTextureDataDebug(_DebugMipMapMode, layerTexCoord.base.uv, _BaseColorMap, _BaseColorMap_TexelSize, _BaseColorMap_MipInfo, surfaceData.baseColor);
surfaceData.metallic = 0;
}
#endif
// Caution: surfaceData must be fully initialize before calling GetBuiltinData
GetBuiltinData(input, surfaceData, alpha, bentNormalWS, depthOffset, builtinData);
}
#include "LitDataMeshModification.hlsl"
#endif // #ifndef LAYERED_LIT_SHADER

5
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/LitProperties.hlsl


// Set of users variables
float4 _BaseColor;
float4 _BaseColorMap_ST;
float4 _BaseColorMap_TexelSize;
float4 _BaseColorMap_MipInfo;
float _Metallic;
float _Smoothness;

float4 _BaseColorMap1_ST;
float4 _BaseColorMap2_ST;
float4 _BaseColorMap3_ST;
float4 _BaseColorMap0_TexelSize;
float4 _BaseColorMap0_MipInfo;
PROP_DECL(float, _Metallic);
PROP_DECL(float, _Smoothness);

7
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Unlit/UnlitData.hlsl


#endif
builtinData.depthOffset = 0.0;
#if defined(DEBUG_DISPLAY)
if (_DebugMipMapMode != DEBUGMIPMAPMODE_NONE)
{
surfaceData.color = GetTextureDataDebug(_DebugMipMapMode, unlitColorMapUv, _UnlitColorMap, _UnlitColorMap_TexelSize, _UnlitColorMap_MipInfo, surfaceData.color);
}
#endif
}

70
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Unlit/UnlitProperties.hlsl


TEXTURE2D(_DistortionVectorMap);
SAMPLER(sampler_DistortionVectorMap);
TEXTURE2D(_UnlitColorMap);
SAMPLER(sampler_UnlitColorMap);
TEXTURE2D(_EmissiveColorMap);
SAMPLER(sampler_EmissiveColorMap);
CBUFFER_START(UnityPerMaterial)
float4 _UnlitColor;
float4 _UnlitColorMap_ST;
float3 _EmissiveColor;
float4 _EmissiveColorMap_ST;
float _EmissiveIntensity;
float _AlphaCutoff;
float _DistortionScale;
float _DistortionVectorScale;
float _DistortionVectorBias;
float _DistortionBlurScale;
float _DistortionBlurRemapMin;
float _DistortionBlurRemapMax;
// Caution: C# code in BaseLitUI.cs call LightmapEmissionFlagsProperty() which assume that there is an existing "_EmissionColor"
// value that exist to identify if the GI emission need to be enabled.
// In our case we don't use such a mechanism but need to keep the code quiet. We declare the value and always enable it.
// TODO: Fix the code in legacy unity so we can customize the behavior for GI
float3 _EmissionColor;
CBUFFER_END
TEXTURE2D(_DistortionVectorMap);
SAMPLER(sampler_DistortionVectorMap);
TEXTURE2D(_UnlitColorMap);
SAMPLER(sampler_UnlitColorMap);
TEXTURE2D(_EmissiveColorMap);
SAMPLER(sampler_EmissiveColorMap);
CBUFFER_START(UnityPerMaterial)
float4 _UnlitColor;
float4 _UnlitColorMap_ST;
float4 _UnlitColorMap_TexelSize;
float4 _UnlitColorMap_MipInfo;
float3 _EmissiveColor;
float4 _EmissiveColorMap_ST;
float _EmissiveIntensity;
float _AlphaCutoff;
float _DistortionScale;
float _DistortionVectorScale;
float _DistortionVectorBias;
float _DistortionBlurScale;
float _DistortionBlurRemapMin;
float _DistortionBlurRemapMax;
// Caution: C# code in BaseLitUI.cs call LightmapEmissionFlagsProperty() which assume that there is an existing "_EmissionColor"
// value that exist to identify if the GI emission need to be enabled.
// In our case we don't use such a mechanism but need to keep the code quiet. We declare the value and always enable it.
// TODO: Fix the code in legacy unity so we can customize the behavior for GI
float3 _EmissionColor;
CBUFFER_END

1
ScriptableRenderPipeline/HDRenderPipeline/HDRP/ShaderPass/FragInputs.hlsl


break;
}
}

3
ScriptableRenderPipeline/HDRenderPipeline/HDRP/ShaderPass/ShaderPassForward.hlsl


// We need to skip lighting when doing debug pass because the debug pass is done before lighting so some buffers may not be properly initialized potentially causing crashes on PS4.
#ifdef DEBUG_DISPLAY
if (_DebugLightingMode != DEBUGLIGHTINGMODE_NONE)
if (_DebugLightingMode != DEBUGLIGHTINGMODE_NONE || _DebugMipMapMode != DEBUGMIPMAPMODE_NONE)
#endif
{
#ifdef _SURFACE_TYPE_TRANSPARENT

if (_DebugViewMaterial != 0)
{
float3 result = float3(1.0, 0.0, 1.0);
bool needLinearToSRGB = false;
GetPropertiesDataDebug(_DebugViewMaterial, result, needLinearToSRGB);

32
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/MipMapDebug.cs


using System.Collections.Generic;
using UnityEngine;
using System;
namespace UnityEngine.Experimental.Rendering.HDPipeline
{
[GenerateHLSL]
public enum DebugMipMapMode
{
None,
MipRatio,
MipCount,
MipCountReduction,
StreamingMipBudget,
StreamingMip
}
[Serializable]
public class MipMapDebugSettings
{
public DebugMipMapMode debugMipMapMode = DebugMipMapMode.None;
public bool IsDebugDisplayEnabled()
{
return debugMipMapMode != DebugMipMapMode.None;
}
public void OnValidate()
{
}
}
}

18
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/MipMapDebug.cs.hlsl


//
// This file was automatically generated. Please don't edit by hand.
//
#ifndef MIPMAPDEBUG_CS_HLSL
#define MIPMAPDEBUG_CS_HLSL
//
// UnityEngine.Experimental.Rendering.HDPipeline.DebugMipMapMode: static fields
//
#define DEBUGMIPMAPMODE_NONE (0)
#define DEBUGMIPMAPMODE_MIP_RATIO (1)
#define DEBUGMIPMAPMODE_MIP_COUNT (2)
#define DEBUGMIPMAPMODE_MIP_COUNT_REDUCTION (3)
#define DEBUGMIPMAPMODE_STREAMING_MIP_BUDGET (4)
#define DEBUGMIPMAPMODE_STREAMING_MIP (5)
#endif

9
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/MipMapDebug.cs.hlsl.meta


fileFormatVersion: 2
guid: b70bba64289151b40845a49cf9f32d18
ShaderImporter:
externalObjects: {}
defaultTextures: []
nonModifiableTextures: []
userData:
assetBundleName:
assetBundleVariant:

11
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Debug/MipMapDebug.cs.meta


fileFormatVersion: 2
guid: 2f696bd7f8848384c985304f7e4f8fe8
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:
正在加载...
取消
保存