float m = PerceptualRoughnessToRoughness(perceptualRoughness);
// Remap to spec power. See eq. 21 in --> https://dl.dropboxusercontent.com/u/55891920/papers/mm_brdf.pdf
float n = (2.0 / max(FLT_EPSILON, m * m)) - 2.0;
float n = (2.0 / max(FLT_EPS, m * m)) - 2.0;
n /= (4.0 * max(NdotR, FLT_EPSILON));
n /= (4.0 * max(NdotR, FLT_EPS));
// remap back to square root of real roughness (0.25 include both the sqrt root of the conversion and sqrt for going from roughness to perceptualRoughness)
// First ShadowContext.hlsl must declare the specific ShadowContext struct and the loader that goes along with it.
// First ShadowContext.hlsl provides a macro SHADOWCONTEXT_DECLARE that must be used in order to define the specific ShadowContext struct and accompanying loader.
// Second there are two headers for shadow algorithms, whose signatures must match any of the Get...Attenuation function prototypes.
// The first header contains engine defaults, whereas the second header is empty by default. All project specific custom shadow algorithms should go in there or leave empty.
//
// Last there's a dispatcher include. By default the Get...Attenuation functions are rerouted to their default implementations. This can be overridden for each
// shadow type in the dispatcher source. For each overridden shadow type a specific define must be defined to prevent falling back to the default functions.
//
/* Required defines: (define these to the desired numbers - must be in sync with loading and resource setup from C#)
#define SHADOWCONTEXT_MAX_TEX2DARRAY 0
#define SHADOWCONTEXT_MAX_TEXCUBEARRAY 0
#define SHADOWCONTEXT_MAX_SAMPLER 0
#define SHADOWCONTEXT_MAX_COMPSAMPLER 0
*/
/* Default values for optional defines:
#define SHADOW_SUPPORTS_DYNAMIC_INDEXING 0 // Dynamic indexing only works on >= sm 5.1
#define SHADOW_OPTIMIZE_REGISTER_USAGE 0 // Redefine this as 1 in your ShadowContext.hlsl to optimize for register usage over instruction count
// #define SHADOW_DISPATCH_USE_CUSTOM_PUNCTUAL // Enable custom implementations of GetPunctualShadowAttenuation. If not defined, a default implementation will be used.
// #define SHADOW_DISPATCH_USE_CUSTOM_DIRECTIONAL // Enable custom implementations of GetDirectionalShadowAttenuation. If not defined, a default implementation will be used.
*/
#define SHADOW_SUPPORTS_DYNAMIC_INDEXING 0 // only on >= sm 5.1
#define SHADOW_OPTIMIZE_REGISTER_USAGE 0 // redefine this as 1 in your ShadowContext.hlsl to optimize for register usage over instruction count
#ifndef SHADOW_SUPPORTS_DYNAMIC_INDEXING
#define SHADOW_SUPPORTS_DYNAMIC_INDEXING 0
#endif
#ifndef SHADOW_OPTIMIZE_REGISTER_USAGE
#define SHADOW_OPTIMIZE_REGISTER_USAGE 0
#endif
#include "../../../Core/Shadow/ShadowBase.cs.hlsl" // ShadowData definition, auto generated (don't modify)
#include "Shadow/ShadowBase.cs.hlsl" // ShadowData definition, auto generated (don't modify)
// Declares a shadow context struct with members and sampling code based on whether _...Slots > 0
// Be sure legacy HDR option is disable on camera as it cause banding in SceneView. Yes, it is a contradiction, but well, Unity...
// When HDR option is enabled, Unity render in FP16 then convert to 8bit with a stretch copy (this cause banding as it should be convert to sRGB (or other color appropriate color space)), then do a final shader with sRGB conversion
// When LDR, unity render in 8bitSRGB, then do a final shader with sRGB conversion
// What should be done is just in our Post process we convert to sRGB and store in a linear 10bit, but require C++ change...
// CAUTION: In case deferred lighting need to support various lighting model statically, we will require to do multicompile with different define like UNITY_MATERIAL_LIT
#define UNITY_MATERIAL_LIT // Need to be define before including Material.hlsl
publicreadonlyGUIContentrenderPipelineResources=newGUIContent("Render Pipeline Resources","Set of resources that need to be loaded when creating stand alone");
publicreadonlyGUIContentdefaultDiffuseMaterial=newGUIContent("Default Diffuse Material","Material to use when creating objects");
publicreadonlyGUIContentdefaultShader=newGUIContent("Default Shader","Shader to use when creating materials");
// Note: Legacy Unity behave like this for ShadowMask
// When you select ShadowMask in Lighting panel it recompile shaders on the fly with the SHADOW_MASK keyword.
// However there is no C# function that we can query to know what mode have been select in Lighting Panel and it will be wrong anyway. Lighting Panel setup what will be the next bake mode. But until light is bake, it is wrong.
// Currently to know if you need shadow mask you need to go through all visible lights (of CullResult), check the LightBakingOutput struct and look at lightmapBakeType/mixedLightingMode. If one light have shadow mask bake mode, then you need shadow mask features (i.e extra Gbuffer).
// It mean that when we build a standalone player, if we detect a light with bake shadow mask, we generate all shader variant (with and without shadow mask) and at runtime, when a bake shadow mask light is visible, we dynamically allocate an extra GBuffer and switch the shader.
// So the first thing to do is to go through all the light: PrepareLightsForGPU
// Note: Legacy Unity behave like this for ShadowMask
// When you select ShadowMask in Lighting panel it recompile shaders on the fly with the SHADOW_MASK keyword.
// However there is no C# function that we can query to know what mode have been select in Lighting Panel and it will be wrong anyway. Lighting Panel setup what will be the next bake mode. But until light is bake, it is wrong.
// Currently to know if you need shadow mask you need to go through all visible lights (of CullResult), check the LightBakingOutput struct and look at lightmapBakeType/mixedLightingMode. If one light have shadow mask bake mode, then you need shadow mask features (i.e extra Gbuffer).
// It mean that when we build a standalone player, if we detect a light with bake shadow mask, we generate all shader variant (with and without shadow mask) and at runtime, when a bake shadow mask light is visible, we dynamically allocate an extra GBuffer and switch the shader.
// So the first thing to do is to go through all the light: PrepareLightsForGPU
// In both forward and deferred, everything opaque should have been rendered at this point so we can safely copy the depth buffer for later processing.
CopyDepthBufferIfNeeded(cmd);
// In both forward and deferred, everything opaque should have been rendered at this point so we can safely copy the depth buffer for later processing.
// Don't update the sky environment if we are rendering a cubemap (it should be update already)
if(camera.cameraType!=CameraType.Reflection)
{
// Caution: We require sun light here as some sky use the sun light to render, mean UpdateSkyEnvironment
// must be call after BuildGPULightLists.
// TODO: Try to arrange code so we can trigger this call earlier and use async compute here to run sky convolution during other passes (once we move convolution shader to compute).
// Don't update the sky environment if we are rendering a cubemap (it should be update already)
if(camera.cameraType!=CameraType.Reflection)
{
// Caution: We require sun light here as some sky use the sun light to render, mean UpdateSkyEnvironment
// must be call after BuildGPULightLists.
// TODO: Try to arrange code so we can trigger this call earlier and use async compute here to run sky convolution during other passes (once we move convolution shader to compute).
UpdateSkyEnvironment(hdCamera,cmd);
}
// We compute subsurface scattering here. Therefore, no objects rendered afterwards will exhibit SSS.
// Currently, there is no efficient way to switch between SRT and MRT for the forward pass;
// therefore, forward-rendered objects do not output split lighting required for the SSS pass.
RenderVelocity(m_CullResults,hdCamera,renderContext,cmd);// Note we may have to render velocity earlier if we do temporalAO, temporal volumetric etc... Mean we will not take into account forward opaque in case of deferred rendering ?
else
{
RenderVelocity(m_CullResults,hdCamera,renderContext,cmd);// Note we may have to render velocity earlier if we do temporalAO, temporal volumetric etc... Mean we will not take into account forward opaque in case of deferred rendering ?
// Rendering distortion here have off course lot of artifact.
// But resolving at each objects that write in distortion is not possible (need to sort transparent, render those that do not distort, then resolve, then etc...)
// Instead we chose to apply distortion at the end after we cumulate distortion vector and desired blurriness.
// Rendering distortion here have off course lot of artifact.
// But resolving at each objects that write in distortion is not possible (need to sort transparent, render those that do not distort, then resolve, then etc...)
// Instead we chose to apply distortion at the end after we cumulate distortion vector and desired blurriness.
// Project specific file to override the default shadow sampling routines.
// We need to define which dispatchers we're overriding, otherwise the compiler will pick default implementations which will lead to compilation errors.
// Check Shadow.hlsl right below where this header is included for the individual defines.
#ifndef TILEPASS_SHADOW_HLSL
#define TILEPASS_SHADOW_HLSL
#define SHADOW_DISPATCH_USE_CUSTOM_DIRECTIONAL
#define SHADOW_DISPATCH_USE_CUSTOM_PUNCTUAL
#include "ShadowContext.hlsl"
// This is an example of how to override the default dynamic resource dispatcher
// by hardcoding the resources used and calling the shadow sampling routines that take an explicit texture and sampler.
#define SHADOW_DISPATCH_USE_CUSTOM_DIRECTIONAL // enables hardcoded resources and algorithm for directional lights
#define SHADOW_DISPATCH_USE_CUSTOM_PUNCTUAL // enables hardcoded resources and algorithm for punctual lights
//#define SHADOW_DISPATCH_USE_SEPARATE_CASCADE_ALGOS // enables separate cascade sampling variants for each cascade
//#define SHADOW_DISPATCH_USE_SEPARATE_PUNC_ALGOS // enables separate resources and algorithms for spot and point lights
float NdotL = saturate(dot(bsdfData.normalWS, L)); // Must have the same value without the clamp
float NdotV = preLightData.NdotV; // Get the unaltered (geometric) version
float LdotV = dot(L, V);
float invLenLV = rsqrt(max(2 * LdotV + 2, FLT_EPSILON)); // invLenLV = rcp(length(L + V)) - caution about the case where V and L are opposite, it can happen, use max to avoid this
float invLenLV = rsqrt(max(2 * LdotV + 2, FLT_EPS)); // invLenLV = rcp(length(L + V)) - caution about the case where V and L are opposite, it can happen, use max to avoid this
shadow = GetPunctualShadowAttenuation(lightLoopContext.shadowContext, positionWS + offset, N, lightData.shadowIndex, L_dist, posInput.unPositionSS);
#ifdef SHADOWS_SHADOWMASK
// Note: Legacy Unity have two shadow mask mode. ShadowMask (ShadowMask contain static objects shadow and ShadowMap contain only dynamic objects shadow, final result is the minimun of both value)
// and ShadowMask_Distance (ShadowMask contain static objects shadow and ShadowMap contain everything and is blend with ShadowMask based on distance (Global distance setup in QualitySettigns)).