您最多选择25个主题 主题必须以中文或者字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符
 
 
 
 

204 行
9.9 KiB

// Return camera relative probe volume world to object transformation
float4x4 GetProbeVolumeWorldToObject()
{
return ApplyCameraTranslationToInverseMatrix(unity_ProbeVolumeWorldToObject);
}
// In unity we can have a mix of fully baked lightmap (static lightmap) + enlighten realtime lightmap (dynamic lightmap)
// for each case we can have directional lightmap or not.
// Else we have lightprobe for dynamic/moving entity. Either SH9 per object lightprobe or SH4 per pixel per object volume probe
float3 SampleBakedGI(float3 positionRWS, float3 normalWS, float2 uvStaticLightmap, float2 uvDynamicLightmap)
{
// If there is no lightmap, it assume lightprobe
#if !defined(LIGHTMAP_ON) && !defined(DYNAMICLIGHTMAP_ON)
// TODO: Confirm with Ionut but it seems that UNITY_LIGHT_PROBE_PROXY_VOLUME is always define for high end and
// unity_ProbeVolumeParams always bind.
if (unity_ProbeVolumeParams.x == 0.0)
{
// TODO: pass a tab of coefficient instead!
real4 SHCoefficients[7];
SHCoefficients[0] = unity_SHAr;
SHCoefficients[1] = unity_SHAg;
SHCoefficients[2] = unity_SHAb;
SHCoefficients[3] = unity_SHBr;
SHCoefficients[4] = unity_SHBg;
SHCoefficients[5] = unity_SHBb;
SHCoefficients[6] = unity_SHC;
return SampleSH9(SHCoefficients, normalWS);
}
else
{
return SampleProbeVolumeSH4(TEXTURE3D_PARAM(unity_ProbeVolumeSH, samplerunity_ProbeVolumeSH), positionRWS, normalWS, GetProbeVolumeWorldToObject(),
unity_ProbeVolumeParams.y, unity_ProbeVolumeParams.z, unity_ProbeVolumeMin, unity_ProbeVolumeSizeInv);
}
#else
float3 bakeDiffuseLighting = float3(0.0, 0.0, 0.0);
#ifdef UNITY_LIGHTMAP_FULL_HDR
bool useRGBMLightmap = false;
float4 decodeInstructions = float4(0.0, 0.0, 0.0, 0.0); // Never used but needed for the interface since it supports gamma lightmaps
#else
bool useRGBMLightmap = true;
#if defined(UNITY_LIGHTMAP_RGBM_ENCODING)
float4 decodeInstructions = float4(34.493242, 2.2, 0.0, 0.0); // range^2.2 = 5^2.2, gamma = 2.2
#else
float4 decodeInstructions = float4(2.0, 2.2, 0.0, 0.0); // range = 2.0^2.2 = 4.59
#endif
#endif
#ifdef LIGHTMAP_ON
#ifdef DIRLIGHTMAP_COMBINED
bakeDiffuseLighting += SampleDirectionalLightmap(TEXTURE2D_PARAM(unity_Lightmap, samplerunity_Lightmap),
TEXTURE2D_PARAM(unity_LightmapInd, samplerunity_Lightmap),
uvStaticLightmap, unity_LightmapST, normalWS, useRGBMLightmap, decodeInstructions);
#else
bakeDiffuseLighting += SampleSingleLightmap(TEXTURE2D_PARAM(unity_Lightmap, samplerunity_Lightmap), uvStaticLightmap, unity_LightmapST, useRGBMLightmap, decodeInstructions);
#endif
#endif
#ifdef DYNAMICLIGHTMAP_ON
#ifdef DIRLIGHTMAP_COMBINED
bakeDiffuseLighting += SampleDirectionalLightmap(TEXTURE2D_PARAM(unity_DynamicLightmap, samplerunity_DynamicLightmap),
TEXTURE2D_PARAM(unity_DynamicDirectionality, samplerunity_DynamicLightmap),
uvDynamicLightmap, unity_DynamicLightmapST, normalWS, false, decodeInstructions);
#else
bakeDiffuseLighting += SampleSingleLightmap(TEXTURE2D_PARAM(unity_DynamicLightmap, samplerunity_DynamicLightmap), uvDynamicLightmap, unity_DynamicLightmapST, false, decodeInstructions);
#endif
#endif
return bakeDiffuseLighting;
#endif
}
float4 SampleShadowMask(float3 positionRWS, float2 uvStaticLightmap) // normalWS not use for now
{
#if defined(LIGHTMAP_ON)
float2 uv = uvStaticLightmap * unity_LightmapST.xy + unity_LightmapST.zw;
float4 rawOcclusionMask = SAMPLE_TEXTURE2D(unity_ShadowMask, samplerunity_Lightmap, uv); // Reuse sampler from Lightmap
#else
float4 rawOcclusionMask;
if (unity_ProbeVolumeParams.x == 1.0)
{
rawOcclusionMask = SampleProbeOcclusion(TEXTURE3D_PARAM(unity_ProbeVolumeSH, samplerunity_ProbeVolumeSH), positionRWS, GetProbeVolumeWorldToObject(),
unity_ProbeVolumeParams.y, unity_ProbeVolumeParams.z, unity_ProbeVolumeMin, unity_ProbeVolumeSizeInv);
}
else
{
// Note: Default value when the feature is not enabled is float(1.0, 1.0, 1.0, 1.0) in C++
rawOcclusionMask = unity_ProbesOcclusion;
}
#endif
return rawOcclusionMask;
}
// Calculate velocity in Clip space [-1..1]
float2 CalculateVelocity(float4 positionCS, float4 previousPositionCS)
{
// This test on define is required to remove warning of divide by 0 when initializing empty struct
// TODO: Add forward opaque MRT case...
#if (SHADERPASS == SHADERPASS_VELOCITY)
// Encode velocity
positionCS.xy = positionCS.xy / positionCS.w;
previousPositionCS.xy = previousPositionCS.xy / previousPositionCS.w;
float2 velocity = (positionCS.xy - previousPositionCS.xy);
#if UNITY_UV_STARTS_AT_TOP
velocity.y = -velocity.y;
#endif
return velocity;
#else
return float2(0.0, 0.0);
#endif
}
// For builtinData we want to allow the user to overwrite default GI in the surface shader / shader graph.
// So we perform the following order of operation:
// 1. InitBuiltinData - Init bakeDiffuseLighting and backBakeDiffuseLighting
// 2. User can overwrite these value in the surface shader / shader graph
// 3. PostInitBuiltinData - Handle debug mode + allow the current lighting model to update the data with ModifyBakedDiffuseLighting
// This method initialize BuiltinData usual values and after update of builtinData by the caller must be follow by PostInitBuiltinData
void InitBuiltinData( float alpha, float3 normalWS, float3 backNormalWS, float3 positionRWS, float2 texCoord1, float2 texCoord2,
out BuiltinData builtinData)
{
ZERO_INITIALIZE(BuiltinData, builtinData);
builtinData.opacity = alpha;
// Sample lightmap/lightprobe/volume proxy
builtinData.bakeDiffuseLighting = SampleBakedGI(positionRWS, normalWS, texCoord1, texCoord2);
// We also sample the back lighting in case we have transmission. If not use this will be optimize out by the compiler
// For now simply recall the function with inverted normal, the compiler should be able to optimize the lightmap case to not resample the directional lightmap
// however it may not optimize the lightprobe case due to the proxy volume relying on dynamic if (to verify), not a problem for SH9, but a problem for proxy volume.
// TODO: optimize more this code.
builtinData.backBakeDiffuseLighting = SampleBakedGI(positionRWS, backNormalWS, texCoord1, texCoord2);
#ifdef SHADOWS_SHADOWMASK
float4 shadowMask = SampleShadowMask(positionRWS, texCoord1);
builtinData.shadowMask0 = shadowMask.x;
builtinData.shadowMask1 = shadowMask.y;
builtinData.shadowMask2 = shadowMask.z;
builtinData.shadowMask3 = shadowMask.w;
#endif
// Use uniform directly - The float need to be cast to uint (as unity don't support to set a uint as uniform)
builtinData.renderingLayers = _EnableLightLayers ? asuint(unity_RenderingLayer.x) : DEFAULT_LIGHT_LAYERS;
}
// InitBuiltinData must be call before calling PostInitBuiltinData
void PostInitBuiltinData( float3 V, inout PositionInputs posInput, SurfaceData surfaceData,
inout BuiltinData builtinData)
{
#ifdef DEBUG_DISPLAY
if (_DebugLightingMode == DEBUGLIGHTINGMODE_LUX_METER)
{
// The lighting in SH or lightmap is assume to contain bounced light only (i.e no direct lighting),
// and is divide by PI (i.e Lambert is apply), so multiply by PI here to get back the illuminance
builtinData.bakeDiffuseLighting *= PI; // don't take into account backBakeDiffuseLighting
}
else
#endif
{
#ifdef MODIFY_BAKED_DIFFUSE_LIGHTING
ModifyBakedDiffuseLighting(V, posInput, surfaceData, builtinData);
#endif
}
}
// Flipping or mirroring a normal can be done directly on the tangent space. This has the benefit to apply to the whole process either in surface gradient or not.
// This function will modify FragInputs and this is not propagate outside of GetSurfaceAndBuiltinData(). This is ok as tangent space is not use outside of GetSurfaceAndBuiltinData().
void ApplyDoubleSidedFlipOrMirror(inout FragInputs input)
{
#ifdef _DOUBLESIDED_ON
// _DoubleSidedConstants is float3(-1, -1, -1) in flip mode and float3(1, 1, -1) in mirror mode
// To get a flipped normal with the tangent space, we must flip bitangent (because it is construct from the normal) and normal
// To get a mirror normal with the tangent space, we only need to flip the normal and not the tangent
float2 flipSign = input.isFrontFace ? float2(1.0, 1.0) : _DoubleSidedConstants.yz; // TOCHECK : GetOddNegativeScale() is not necessary here as it is apply for tangent space creation.
input.worldToTangent[1] = flipSign.x * input.worldToTangent[1]; // bitangent
input.worldToTangent[2] = flipSign.y * input.worldToTangent[2]; // normal
#ifdef SURFACE_GRADIENT
// TOCHECK: seems that we don't need to invert any genBasisTB(), sign cancel. Which is expected as we deal with surface gradient.
// TODO: For surface gradient we must invert or mirror the normal just after the interpolation. It will allow to work with layered with all basis. Currently it is not the case
#endif
#endif
}
// This function convert the tangent space normal/tangent to world space and orthonormalize it + apply a correction of the normal if it is not pointing towards the near plane
void GetNormalWS(FragInputs input, float3 normalTS, out float3 normalWS)
{
#ifdef SURFACE_GRADIENT
normalWS = SurfaceGradientResolveNormal(input.worldToTangent[2], normalTS);
#else
// We need to normalize as we use mikkt tangent space and this is expected (tangent space is not normalize)
normalWS = normalize(TransformTangentToWorld(normalTS, input.worldToTangent));
#endif
}