float deviceDepth; // Depth from the depth buffer : [0, 1] (typically reversed)
float linearDepth; // View space Z coordinate : [Near, Far]
// If a compute shader call this function unPositionSS is an integer usually calculate like: uint2 unPositionSS = groupId.xy * BLOCK_SIZE + groupThreadId.xy
// If a compute shader call this function positionSS is an integer usually calculate like: uint2 positionSS = groupId.xy * BLOCK_SIZE + groupThreadId.xy
PositionInputs GetPositionInput(float2 unPositionSS, float2 invScreenSize, uint2 unTileCoord) // Specify explicit tile coordinates so that we can easily make it lane invariant for compute evaluation.
PositionInputs GetPositionInput(float2 positionSS, float2 invScreenSize, uint2 tileCoord) // Specify explicit tile coordinates so that we can easily make it lane invariant for compute evaluation.
float m = PerceptualRoughnessToRoughness(perceptualRoughness);
// Remap to spec power. See eq. 21 in --> https://dl.dropboxusercontent.com/u/55891920/papers/mm_brdf.pdf
float n = (2.0 / max(FLT_EPSILON, m * m)) - 2.0;
float n = (2.0 / max(FLT_EPS, m * m)) - 2.0;
n /= (4.0 * max(NdotR, FLT_EPSILON));
n /= (4.0 * max(NdotR, FLT_EPS));
// remap back to square root of real roughness (0.25 include both the sqrt root of the conversion and sqrt for going from roughness to perceptualRoughness)
perceptualRoughness = pow(2.0 / (n + 2.0), 0.25);
}
// Prevent NaNs arising from the division of 0 by 0.
// For baked probes in the editor we need to factor in the actual hash of texture because we can't increment the update count of a texture that's baked on the disk.
// This code leaks logic from reflection probe baking into the texture cache which is not good... TODO: Find a way to do that outside of the texture cache.
// look for first non zero entry. Will by the least recently used entry
// since the array was pre-sorted (in linear time) in NewFrame()
{
idx=m_SortedIdxArray[j];
if(m_SliceArray[idx].countLRU==0)++j;// if entry already snagged by a new texture in this frame then ++j
elsebFound=true;
if(m_SliceArray[idx].countLRU==0)
++j;// if entry already snagged by a new texture in this frame then ++j
else
bFound=true;
needUpdate=true;
// if we are replacing an existing entry delete it from m_locatorInSliceArray.
if(m_SliceArray[idx].texId!=g_InvalidTexID)
{
m_SliceArray[idx].texId=texId;
sliceIndex=idx;
bFoundAvailOrExistingSlice=true;
bSwapSlice=true;
// wrap up
Debug.Assert(bFoundAvailOrExistingSlice,"The texture cache doesn't have enough space to store all textures. Please either increase the size of the texture cache, or use fewer unique textures.");
if(bFoundAvailOrExistingSlice)
if(sliceIndex!=-1)
}
returnsliceIndex;
}
if(bSwapSlice)// if this was a miss
// In case the texture content with which we update the cache is not the input texture, we need to provide the right update count.
Debug.Assert(sliceIndex!=-1,"The texture cache doesn't have enough space to store all textures. Please either increase the size of the texture cache, or use fewer unique textures.");
// CAUTION: In case deferred lighting need to support various lighting model statically, we will require to do multicompile with different define like UNITY_MATERIAL_LIT
#define UNITY_MATERIAL_LIT // Need to be define before including Material.hlsl
publicbooluseForwardRenderingOnly;// TODO: Currently there is no way to strip the extra forward shaders generated by the shaders compiler, so we can switch dynamically.
publicbooluseDepthPrepassWithDeferredRendering;
publicboolrenderAlphaTestOnlyInDeferredPrepass;
// We have to fall back to forward-only rendering when scene view is using wireframe rendering mode --
// as rendering everything in wireframe + deferred do not play well together
// These need to be Runtime Only because those values are hold by the HDRenderPipeline asset so if user change them through the editor debug menu they might change the value in the asset without noticing it.
// These need to be Runtime Only because those values are held by the HDRenderPipeline asset so if user change them through the editor debug menu they might change the value in the asset without noticing it.
DebugMenuManager.instance.AddDebugItem<bool>("HDRP","Enable Big Tile",()=>m_Asset.tileSettings.enableBigTilePrepass,(value)=>m_Asset.tileSettings.enableBigTilePrepass=(bool)value,DebugItemFlag.RuntimeOnly);
// Note: Legacy Unity behave like this for ShadowMask
// When you select ShadowMask in Lighting panel it recompile shaders on the fly with the SHADOW_MASK keyword.
// However there is no C# function that we can query to know what mode have been select in Lighting Panel and it will be wrong anyway. Lighting Panel setup what will be the next bake mode. But until light is bake, it is wrong.
// Currently to know if you need shadow mask you need to go through all visible lights (of CullResult), check the LightBakingOutput struct and look at lightmapBakeType/mixedLightingMode. If one light have shadow mask bake mode, then you need shadow mask features (i.e extra Gbuffer).
// It mean that when we build a standalone player, if we detect a light with bake shadow mask, we generate all shader variant (with and without shadow mask) and at runtime, when a bake shadow mask light is visible, we dynamically allocate an extra GBuffer and switch the shader.
// So the first thing to do is to go through all the light: PrepareLightsForGPU
// Note: Legacy Unity behave like this for ShadowMask
// When you select ShadowMask in Lighting panel it recompile shaders on the fly with the SHADOW_MASK keyword.
// However there is no C# function that we can query to know what mode have been select in Lighting Panel and it will be wrong anyway. Lighting Panel setup what will be the next bake mode. But until light is bake, it is wrong.
// Currently to know if you need shadow mask you need to go through all visible lights (of CullResult), check the LightBakingOutput struct and look at lightmapBakeType/mixedLightingMode. If one light have shadow mask bake mode, then you need shadow mask features (i.e extra Gbuffer).
// It mean that when we build a standalone player, if we detect a light with bake shadow mask, we generate all shader variant (with and without shadow mask) and at runtime, when a bake shadow mask light is visible, we dynamically allocate an extra GBuffer and switch the shader.
// So the first thing to do is to go through all the light: PrepareLightsForGPU
// In both forward and deferred, everything opaque should have been rendered at this point so we can safely copy the depth buffer for later processing.
// In both forward and deferred, everything opaque should have been rendered at this point so we can safely copy the depth buffer for later processing.
// Don't update the sky environment if we are rendering a cubemap (it should be update already)
if(camera.cameraType!=CameraType.Reflection)
{
// Caution: We require sun light here as some sky use the sun light to render, mean UpdateSkyEnvironment
// must be call after BuildGPULightLists.
// TODO: Try to arrange code so we can trigger this call earlier and use async compute here to run sky convolution during other passes (once we move convolution shader to compute).
UpdateSkyEnvironment(hdCamera,cmd);
}
// Caution: We require sun light here as some sky use the sun light to render, mean UpdateSkyEnvironment
// must be call after BuildGPULightLists.
// TODO: Try to arrange code so we can trigger this call earlier and use async compute here to run sky convolution during other passes (once we move convolution shader to compute).
UpdateSkyEnvironment(hdCamera,cmd);
RenderDeferredLighting(hdCamera,cmd);
RenderDeferredLighting(hdCamera,cmd);
// We compute subsurface scattering here. Therefore, no objects rendered afterwards will exhibit SSS.
// Currently, there is no efficient way to switch between SRT and MRT for the forward pass;
// therefore, forward-rendered objects do not output split lighting required for the SSS pass.
RenderVelocity(m_CullResults,hdCamera,renderContext,cmd);// Note we may have to render velocity earlier if we do temporalAO, temporal volumetric etc... Mean we will not take into account forward opaque in case of deferred rendering ?
}
else
{
RenderVelocity(m_CullResults,hdCamera,renderContext,cmd);// Note we may have to render velocity earlier if we do temporalAO, temporal volumetric etc... Mean we will not take into account forward opaque in case of deferred rendering ?
// Rendering distortion here have off course lot of artifact.
// But resolving at each objects that write in distortion is not possible (need to sort transparent, render those that do not distort, then resolve, then etc...)
// Instead we chose to apply distortion at the end after we cumulate distortion vector and desired blurriness.
// Rendering distortion here have off course lot of artifact.
// But resolving at each objects that write in distortion is not possible (need to sort transparent, render those that do not distort, then resolve, then etc...)
// Instead we chose to apply distortion at the end after we cumulate distortion vector and desired blurriness.
// Make sure to unbind every render texture here because in the next iteration of the loop we might have to reallocate render texture (if the camera size is different)
// We still need to bind correctly default camera target with our depth buffer in case we are currently rendering scene view. It should be the last camera here
// bind depth surface for editor grid/gizmo/selection rendering
// It must also have a "DepthForwardOnly" and no "DepthOnly" pass as forward material (either deferred or forward only rendering) have always a depth pass.
// In case of forward only rendering we have a depth prepass. In case of deferred renderer, it is optional
// We render first the opaque object as opaque alpha tested are more costly to render and could be reject by early-z (but not Hi-z as it is disable with clip instruction)
// This is handled automatically with the RenderQueue value (OpaqueAlphaTested have a different value and thus are sorted after Opaque)
// Forward opaque material always have a prepass (whether or not we use deferred, whether or not there is option like alpha test only) so we pass the right depth state here.
m_FullScreenDebugPushed=true;// We need this flag because otherwise if no fullscreen debug is pushed, when we render the result in RenderDebug the temporary RT will not exist.
m_FullScreenDebugPushed=true;// We need this flag because otherwise if no fullscreen debug is pushed, when we render the result in RenderDebug the temporary RT will not exist.
m_FullScreenDebugPushed=true;// We need this flag because otherwise if no fullscreen debug is pushed, when we render the result in RenderDebug the temporary RT will not exist.
publicreadonlyGUIContentrange=newGUIContent("Range","Controls how far the light is emitted from the center of the object.");
publicreadonlyGUIContentcolor=newGUIContent("Color","Controls the color being emitted by the light.");
publicreadonlyGUIContentspotAngle=newGUIContent("Angle","Controls the angle in degrees at the base of a Spot light's cone.");
publicreadonlyGUIContentuseColorTemperature=newGUIContent("Use Color Temperature","Choose between RGB and temperature mode for light's color.");
publicreadonlyGUIContentcolorFilter=newGUIContent("Filter","A colored gel can be put in front of the light source to tint the light.");
publicreadonlyGUIContentcolorTemperature=newGUIContent("Temperature","Also known as CCT (Correlated color temperature). The color temperature of the electromagnetic radiation emitted from an ideal black body is defined as its surface temperature in Kelvin. White is 6500K");
publicreadonlyGUIContentintensity=newGUIContent("Intensity","Controls the brightness of the light. Light color is multiplied by this value.");
publicreadonlyGUIContentlightmappingMode=newGUIContent("Mode","Specifies the light mode used to determine if and how a light will be baked. Possible modes are Baked, Mixed, and Realtime.");
publicreadonlyGUIContentlightBounceIntensity=newGUIContent("Indirect Multiplier","Controls the intensity of indirect light being contributed to the scene. A value of 0 will cause Realtime lights to be removed from realtime global illumination and Baked and Mixed lights to no longer emit indirect lighting. Has no effect when both Realtime and Baked Global Illumination are disabled.");
publicreadonlyGUIContentcookie=newGUIContent("Cookie","Specifies the Texture mask to cast shadows, create silhouettes, or patterned illumination for the light.");
publicreadonlyGUIContentcookieSizeX=newGUIContent("Size X","Controls the size of the cookie mask currently assigned to the light.");
publicreadonlyGUIContentcookieSizeY=newGUIContent("Size Y","Controls the size of the cookie mask currently assigned to the light.");
publicreadonlyGUIContentshadowBias=newGUIContent("Bias","Controls the distance at which the shadows will be pushed away from the light. Useful for avoiding false self-shadowing artifacts.");
publicreadonlyGUIContentbakedShadowAngle=newGUIContent("Baked Shadow Angle","Controls the amount of artificial softening applied to the edges of shadows cast by directional lights.");
publicreadonlyGUIContentbakingWarning=newGUIContent("Light mode is currently overridden to Realtime mode. Enable Baked Global Illumination to use Mixed or Baked light modes.");
publicreadonlyGUIContentindirectBounceShadowWarning=newGUIContent("Realtime indirect bounce shadowing is not supported for Spot and Point lights.");
publicreadonlyGUIContentcookieWarning=newGUIContent("Cookie textures for spot lights should be set to clamp, not repeat, to avoid artifacts.");
// Additional light data
publicreadonlyGUIContentmaxSmoothness=newGUIContent("Max Smoothness","Very low cost way of faking spherical area lighting. This will modify the roughness of the material lit. This is useful when the specular highlight is too small or too sharp.");
// bakeDiffuseLighting is part of the prototype so a user is able to implement a "base pass" with GI and multipass direct light (aka old unity rendering path)
// If rectangle lights are before line lights, the compiler will duplicate light matrices in VGPR because they are used differently between the two types of lights.
//SetKeyword(material, "_MATID_STANDARD", materialId == Lit.MaterialId.LitStandard); // See comment in Lit.shader, it is the default, we don't define it
//CoreUtils.SetKeyword(material, "_MATID_STANDARD", materialId == Lit.MaterialId.LitStandard); // See comment in Lit.shader, it is the default, we don't define it
//SetKeyword(material, "_MATID_STANDARD", materialId == Lit.MaterialId.LitStandard); // See comment in Lit.shader, it is the default, we don't define it
//CoreUtils.SetKeyword(material, "_MATID_STANDARD", materialId == Lit.MaterialId.LitStandard); // See comment in Lit.shader, it is the default, we don't define it
// For GGX aniso and IBL we have done an empirical (eye balled) approximation compare to the reference.
// We use a single fetch, and we stretch the normal to use based on various criteria.
// NOTE: If we follow the theory we should use the modified normal for the different calculation implying a normal (like NdotV) and use 'anisoIblNormalWS'
// into function like GetSpecularDominantDir(). However modified normal is just a hack. The goal is just to stretch a cubemap, no accuracy here.
// With this in mind and for performance reasons we chose to only use modified normal to calculate R.
float3 anisoIblNormalWS = GetAnisotropicModifiedNormal(grainDirWS, N, V, stretch);
iblR = reflect(-V, anisoIblNormalWS);
iblN = GetAnisotropicModifiedNormal(grainDirWS, N, V, stretch);
float NdotL = saturate(dot(bsdfData.normalWS, L)); // Must have the same value without the clamp
float NdotV = preLightData.NdotV; // Get the unaltered (geometric) version
float LdotV = dot(L, V);
float invLenLV = rsqrt(max(2 * LdotV + 2, FLT_EPSILON)); // invLenLV = rcp(length(L + V)) - caution about the case where V and L are opposite, it can happen, use max to avoid this
float invLenLV = rsqrt(max(2 * LdotV + 2, FLT_EPS)); // invLenLV = rcp(length(L + V)) - caution about the case where V and L are opposite, it can happen, use max to avoid this
shadow = GetPunctualShadowAttenuation(lightLoopContext.shadowContext, positionWS + offset, N, lightData.shadowIndex, L_dist, posInput.positionSS);
#ifdef SHADOWS_SHADOWMASK
// Note: Legacy Unity have two shadow mask mode. ShadowMask (ShadowMask contain static objects shadow and ShadowMap contain only dynamic objects shadow, final result is the minimun of both value)
// and ShadowMask_Distance (ShadowMask contain static objects shadow and ShadowMap contain everything and is blend with ShadowMask based on distance (Global distance setup in QualitySettigns)).
#endif
}
illuminance *= shadow;
attenuation *= shadow;
// Projector lights always have a cookies, so we can perform clipping inside the if().
// Projector lights always have cookies, so we can perform clipping inside the if().
// Guideline for reflection volume: In HDRenderPipeline we separate the projection volume (the proxy of the scene) from the influence volume (what pixel on the screen is affected)
// However we add the constrain that the shape of the projection and influence volume is the same (i.e if we have a sphere shape projection volume, we have a shape influence).
// We store inverse AO so neutral is black. So either we sample inside or outside the texture it return 0 in case of neutral
// Ambient occlusion use for indirect lighting (reflection probe, baked diffuse lighting)
// 3) When this is solve (i.e move previousPositionCS to a free attribute semantic), Unity only support one pSecondaryFormat. Mean if we ahve a vertex color instance stream and motion vector, motion vector will overwrite vertex color stream. See MeshRenderingData.cpp
// All this could be fix we a new Mesh API not ready yet. Note that this feature only affect animated mesh (vertex or skin) as others use depth reprojection.
VelocityInGBuffer=0,// Change to 1 to enable the feature, then regenerate hlsl headers.
// TODO: not working yet, waiting for UINT16 RT format support
PackGBufferInU16=0,
CameraRelativeRendering=1// Rendering sets the origin of the world to the position of the primary (scene view) camera
};
// but it is usually small, so we are fine with it (until someone at microsoft fix the debuggger).