# endif
#endif
// In case we pack data uint16 buffer we need to change the output render target format to uint16
// TODO: Is there a way to automate these output type based on the format declare in lit.cs ?
#if SHADEROPTIONS_PACK_GBUFFER_IN_U16
#define GBufferType0 uint4
#define GBufferType1 uint4
// TODO: How to abstract that ? We would like to avoid this PS4 test here
#ifdef SHADER_API_PSSL
// On PS4 we need to specify manually the format of the output render target, output type is not enough
#pragma PSSL_target_output_format(target 0 FMT_UINT16_ABGR)
#pragma PSSL_target_output_format(target 1 FMT_UINT16_ABGR)
#endif
#else
#endif
// GBuffer texture declaration
TEXTURE2D(_GBufferTexture0);
#endif
// Use Lambert diffuse instead of Disney diffuse
// #define LIT_DIFFUSE_LAMBERT_BRDF
// Use optimization of Precomputing LambdaV
// TODO: Test if this is a win
// #define LIT_USE_BSDF_PRE_LAMBDAV
#define LIT_USE_GGX_ENERGY_COMPENSATION
// Sampler use by area light, gaussian pyramid, ambient occlusion etc...
float4 _ThicknessRemaps[SSS_N_PROFILES]; // R: start, G = end - start, BA unused
float4 _ShapeParams[SSS_N_PROFILES]; // RGB = S = 1 / D, A = filter radius
float4 _TransmissionTints[SSS_N_PROFILES]; // RGB = 1/4 * color, A = unused
float4 _WorldScales[SSS_N_PROFILES]; // X = meters per world unit; Y = world units per meter
CBUFFER_END
//-----------------------------------------------------------------------------
// However as we use material classification it is hard to be fully separated
// the dependecy is define in this include where there is shared define for material and lighting in case of deferred material.
// If a user do a lighting architecture without material classification, this can be remove
#include "../../Lighting/TilePass/TilePass .cs.hlsl"
#include "../../Lighting/LightLoop/LightLoop .cs.hlsl"
static uint g_FeatureFlags = UINT_MAX;
bsdfData.fresnel0 = lerp(val.xxx, baseColor, metallic);
}
void FillMaterialIdAnisoData(float roughness, float3 normalWS, float3 tangentWS, float anisotropy, inout BSDFData bsdfData)
void FillMaterialIdSSSData(float3 baseColor, inout BSDFData bsdfData)
bsdfData.tangentWS = tangentWS;
bsdfData.bitangentWS = cross(normalWS, tangentWS);
ConvertAnisotropyToRoughness(roughness, anisotropy, bsdfData.roughnessT, bsdfData.roughnessB);
bsdfData.anisotropy = anisotropy;
bsdfData.diffuseColor = baseColor;
bsdfData.fresnel0 = SKIN_SPECULAR_VALUE; // TODO take from subsurfaceProfile instead
void FillMaterialIdSSSData(float3 baseColor, int subsurfaceProfile, float subsurfaceRadius, float thickness, inout BSDFData bsdfData)
void FillTransmissionData(int subsurfaceProfile, float subsurfaceRadius, float thickness, inout BSDFData bsdfData)
bsdfData.diffuseColor = baseColor;
bsdfData.fresnel0 = SKIN_SPECULAR_VALUE; // TODO take from subsurfaceProfile instead
bsdfData.subsurfaceProfile = subsurfaceProfile;
bsdfData.subsurfaceRadius = subsurfaceRadius;
bsdfData.thickness = _ThicknessRemaps[subsurfaceProfile].x + _ThicknessRemaps[subsurfaceProfile].y * thickness;
bsdfData.enableTransmission = _EnableSSSAndTransmission != 0;
bsdfData.enableTransmission = _EnableSSSAndTransmission != 0 && transmissionMode != SSS_TRSM_MODE_NONE;
if (bsdfData.enableTransmission)
if (bsdfData.enableTransmission && transmissionMode != SSS_TRSM_MODE_NONE)
bsdfData.useThinObjectMode = transmissionMode == SSS_TRSM_MODE_THIN;
bsdfData.subsurfaceProfile = subsurfaceProfile;
bsdfData.subsurfaceRadius = subsurfaceRadius;
bsdfData.useThickObjectMode = transmissionMode != SSS_TRSM_MODE_THIN;
bsdfData.thickness = _ThicknessRemaps[subsurfaceProfile].x + _ThicknessRemaps[subsurfaceProfile].y * thickness;
if (_UseDisneySSS != 0)
{
BSDFData bsdfData;
ZERO_INITIALIZE(BSDFData, bsdfData);
bsdfData.specularOcclusion = surfaceData.specularOcclusion;
bsdfData.normalWS = surfaceData.normalWS;
bsdfData.materialId = surfaceData.materialId;
bsdfData.specularOcclusion = surfaceData.specularOcclusion;
bsdfData.normalWS = surfaceData.normalWS;
bsdfData.anisotropy = surfaceData.anisotropy;
bsdfData.roughness = PerceptualRoughnessToRoughness(bsdfData.perceptualRoughness);
bsdfData.materialId = surfaceData.materialId;
ConvertAnisotropyToRoughness(bsdfData.perceptualRoughness, bsdfData.anisotropy, bsdfData.roughnessT, bsdfData.roughnessB);
if (surfaceData.materialId != MATERIALID_LIT_ANISO)
{
}
else if (bsdfData.materialId == MATERIALID_LIT_SSS)
{
FillMaterialIdSSSData(surfaceData.baseColor, surfaceData.subsurfaceProfile, surfaceData.subsurfaceRadius, surfaceData.thickness, bsdfData);
FillMaterialIdSSSData(surfaceData.baseColor, bsdfData);
FillTransmissionData(surfaceData.subsurfaceProfile, surfaceData.subsurfaceRadius, surfaceData.thickness, bsdfData);
FillMaterialIdAnisoData(bsdfData.roughness, surfaceData.normalWS, surfaceData.tangentWS, surfaceData.anisotropy, bsdfData);
bsdfData.tangentWS = surfaceData.tangentWS;
bsdfData.bitangentWS = cross(bsdfData.normalWS, bsdfData.tangentWS);
}
else if (bsdfData.materialId == MATERIALID_LIT_CLEAR_COAT)
{
void EncodeIntoGBuffer( SurfaceData surfaceData,
float3 bakeDiffuseLighting,
uint2 positionSS,
#if SHADEROPTIONS_PACK_GBUFFER_IN_U16
out GBufferType0 outGBufferU0,
out GBufferType1 outGBufferU1
#else
#endif
#if SHADEROPTIONS_PACK_GBUFFER_IN_U16
float4 outGBuffer0, outGBuffer1, outGBuffer2, outGBuffer3;
#endif
ApplyDebugToSurfaceData(surfaceData);
// RT0 - 8:8:8:8 sRGB
// Lighting
outGBuffer3 = float4(bakeDiffuseLighting, 0.0);
#if SHADEROPTIONS_PACK_GBUFFER_IN_U16
// Now pack all buffer into 2 uint buffer
// We don't have hardware sRGB to store base color in case we pack int u16, so rather than perform full sRGB encoding just use cheap gamma20
// TODO: test alternative like FastLinearToSRGB to better match unpacked gbuffer
outGBuffer0.xyz = LinearToGamma20(outGBuffer0.xyz);
uint packedGBuffer1 = PackR10G10B10A2(outGBuffer1);
outGBufferU0 = uint4( PackFloatToUInt(outGBuffer0.x, 8, 0) | PackFloatToUInt(outGBuffer0.y, 8, 8),
PackFloatToUInt(outGBuffer0.z, 8, 0) | PackFloatToUInt(outGBuffer0.w, 8, 8),
(packedGBuffer1 & 0x0000FFFF),
(packedGBuffer1 & 0xFFFF0000) >> 16);
uint packedGBuffer3 = PackToR11G11B10f(outGBuffer3.xyz);
outGBufferU1 = uint4( PackFloatToUInt(outGBuffer2.x, 8, 0) | PackFloatToUInt(outGBuffer2.y, 8, 8),
PackFloatToUInt(outGBuffer2.z, 8, 0) | PackFloatToUInt(outGBuffer2.w, 8, 8),
(packedGBuffer3 & 0x0000FFFF),
(packedGBuffer3 & 0xFFFF0000) >> 16);
#endif
}
float4 DecodeGBuffer0(GBufferType0 encodedGBuffer0)
{
float4 decodedGBuffer0;
#if SHADEROPTIONS_PACK_GBUFFER_IN_U16
decodedGBuffer0.x = UnpackUIntToFloat(encodedGBuffer0.x, 8, 0);
decodedGBuffer0.y = UnpackUIntToFloat(encodedGBuffer0.x, 8, 8);
decodedGBuffer0.z = UnpackUIntToFloat(encodedGBuffer0.y, 8, 0);
decodedGBuffer0.w = UnpackUIntToFloat(encodedGBuffer0.y, 8, 8);
decodedGBuffer0.xyz = Gamma20ToLinear(encodedGBuffer0.xyz);
#else
decodedGBuffer0 = encodedGBuffer0;
#endif
return decodedGBuffer0;
}
void DecodeFromGBuffer(
out float3 bakeDiffuseLighting)
{
#if SHADEROPTIONS_PACK_GBUFFER_IN_U16
GBufferType0 inGBufferU0 = LOAD_TEXTURE2D(_GBufferTexture0, positionSS);
GBufferType1 inGBufferU1 = LOAD_TEXTURE2D(_GBufferTexture1, positionSS);
#else
#endif
#if SHADEROPTIONS_PACK_GBUFFER_IN_U16
float4 inGBuffer0, inGBuffer1, inGBuffer2, inGBuffer3;
inGBuffer0 = DecodeGBuffer0(inGBufferU0);
uint packedGBuffer1 = inGBufferU0.z | inGBufferU0.w << 16;
inGBuffer1 = UnpackR10G10B10A2(packedGBuffer1);
inGBuffer2.x = UnpackUIntToFloat(inGBufferU1.x, 8, 0);
inGBuffer2.y = UnpackUIntToFloat(inGBufferU1.x, 8, 8);
inGBuffer2.z = UnpackUIntToFloat(inGBufferU1.y, 8, 0);
inGBuffer2.w = UnpackUIntToFloat(inGBufferU1.y, 8, 8);
uint packedGBuffer3 = inGBufferU1.z | inGBufferU1.w << 16;
inGBuffer3.xyz = UnpackFromR11G11B10f(packedGBuffer1);
inGBuffer3.w = 0.0;
#endif
float3 baseColor = inGBuffer0.rgb;
bsdfData.specularOcclusion = inGBuffer0.a;
bsdfData.normalWS = UnpackNormalOctEncode(float2(inGBuffer1.r, inGBuffer1.g));
bsdfData.roughness = PerceptualRoughnessToRoughness(bsdfData.perceptualRoughness);
// The material features system for material classification must allow compile time optimization (i.e everything should be static)
// Note that as we store materialId for Aniso based on content of RT2 we need to add few extra condition.
// The code is also call from MaterialFeatureFlagsFromGBuffer, so must work fully dynamic if featureFlags is UINT_MAX
// If the tile has anisotropy, all the pixels within the tile are evaluated as anisotropic.
if (HasMaterialFeatureFlag(MATERIALFEATUREFLAGS_LIT_ANISO))
{
float anisotropy ;
float3 tangentWS ;
bsdfData.anisotropy = 0 ;
bsdfData.tangentWS = GetLocalFrame(bsdfData.normalWS)[0] ;
if (bsdfData.materialId == MATERIALID_LIT_ANISO)
{
tangentWS = UnpackNormalOctEncode(inGBuffer2.rg);
anisotropy = inGBuffer2.b * 2 - 1;
bsdfData.anisotropy = inGBuffer2.b * 2 - 1;
bsdfData.tangentWS = UnpackNormalOctEncode(inGBuffer2.rg);
else
bsdfData.bitangentWS = cross(bsdfData.normalWS, bsdfData.tangentWS);
}
ConvertAnisotropyToRoughness(bsdfData.perceptualRoughness, bsdfData.anisotropy, bsdfData.roughnessT, bsdfData.roughnessB);
if (HasMaterialFeatureFlag(MATERIALFEATUREFLAGS_LIT_SSS))
{
float subsurfaceRadius = 0;
float thickness = 0;
int subsurfaceProfile = SSS_NEUTRAL_PROFILE_ID;
if (bsdfData.materialId == MATERIALID_LIT_SSS)
anisotropy = 0;
tangentWS = GetLocalFrame(bsdfData.normalWS)[0];
subsurfaceRadius = inGBuffer2.x;
thickness = inGBuffer2.y;
subsurfaceProfile = UnpackByte(inGBuffer2.w);
FillMaterialIdSSSData(baseColor, bsdfData);
FillMaterialIdAnisoData(bsdfData.roughness, bsdfData.normalWS, tangentWS, anisotropy, bsdfData);
FillTransmissionData(subsurfaceProfile, subsurfaceRadius, thickness, bsdfData);
}
if (bsdfData.materialId == MATERIALID_LIT_STANDARD && HasMaterialFeatureFlag(MATERIALFEATUREFLAGS_LIT_STANDARD))
UnpackFloatInt8bit(inGBuffer2.a, 4.0, metallic, materialIdExtent);
if (materialIdExtent == GBUFFER_LIT_STANDARD_SPECULAR_COLOR_ID)
[flatten] if (materialIdExtent == GBUFFER_LIT_STANDARD_SPECULAR_COLOR_ID)
{
// Note: Specular is not a material id but just a way to parameterize the standard materialid, thus we reset materialId to MATERIALID_LIT_STANDARD
// For material classification it will be consider as Standard as well, thus no need to create special case
FillMaterialIdStandardData(baseColor, metallic, bsdfData);
}
}
else if (bsdfData.materialId == MATERIALID_LIT_SSS && HasMaterialFeatureFlag(MATERIALFEATUREFLAGS_LIT_SSS))
{
float subsurfaceRadius = inGBuffer2.x;
float thickness = inGBuffer2.y;
int subsurfaceProfile = UnpackByte(inGBuffer2.w);
FillMaterialIdSSSData(baseColor, subsurfaceProfile, subsurfaceRadius, thickness, bsdfData);
}
else if (bsdfData.materialId == MATERIALID_LIT_CLEAR_COAT && HasMaterialFeatureFlag(MATERIALFEATUREFLAGS_LIT_CLEAR_COAT))
{
// We have swap the encoding of the normal to have more precision for coat normal as it is more smooth
// GGX
float partLambdaV;
float energyCompensation;
float TdotV;
float BdotV;
float3 coatV;
float3 refractV; // The view vector refracted through clear coat interface
// IBL
NdotV = saturate(dot(N, V));
preLightData.NdotV = NdotV;
float3 iblR;
float3 iblN, iblR;
preLightData.TdotV = dot(bsdfData.tangentWS, V);
preLightData.BdotV = dot(bsdfData.bitangentWS, V);
preLightData.partLambdaV = GetSmithJointGGXAnisoPartLambdaV(preLightData.TdotV, preLightData.BdotV, NdotV, bsdfData.roughnessT, bsdfData.roughnessB);
float TdotV = dot(bsdfData.tangentWS, V);
float BdotV = dot(bsdfData.bitangentWS, V);
preLightData.partLambdaV = GetSmithJointGGXAnisoPartLambdaV(TdotV, BdotV, NdotV, bsdfData.roughnessT, bsdfData.roughnessB);
// For GGX aniso and IBL we have done an empirical (eye balled) approximation compare to the reference.
// We use a single fetch, and we stretch the normal to use based on various criteria.
// NOTE: If we follow the theory we should use the modified normal for the different calculation implying a normal (like NdotV) and use 'anisoIblNormalWS'
// into function like GetSpecularDominantDir(). However modified normal is just a hack. The goal is just to stretch a cubemap, no accuracy here.
// With this in mind and for performance reasons we chose to only use modified normal to calculate R.
float3 anisoIblNormalWS = GetAnisotropicModifiedNormal(grainDirWS, N, V, stretch);
iblR = reflect(-V, anisoIblNormalWS);
iblN = GetAnisotropicModifiedNormal(grainDirWS, N, V, stretch);
preLightData.TdotV = 0;
preLightData.BdotV = 0;
preLightData.partLambdaV = GetSmithJointGGXPartLambdaV(NdotV, bsdfData.roughness);
iblR = reflect(-V, N);
preLightData.partLambdaV = GetSmithJointGGXPartLambdaV(NdotV, bsdfData.roughnessT);
iblN = N;
iblR = reflect(-V, iblN);
float reflectivity;
// IBL
else
{
// Note: this is a ad-hoc tweak.
float iblRoughness, iblPerceptualRoughness;
if (bsdfData.materialId == MATERIALID_LIT_ANISO && HasMaterialFeatureFlag(MATERIALFEATUREFLAGS_LIT_ANISO))
{
// Use the min roughness, and bias it for higher values of anisotropy and roughness.
float roughnessBias = 0.075 * bsdfData.anisotropy * bsdfData.roughness;
iblRoughness = saturate(min(bsdfData.roughnessT, bsdfData.roughnessB) + roughnessBias);
iblPerceptualRoughness = RoughnessToPerceptualRoughness(iblRoughness);
}
else
{
iblRoughness = bsdfData.roughness;
iblPerceptualRoughness = bsdfData.perceptualRoughness;
}
preLightData.iblDirWS = GetSpecularDominantDir(N, iblR, iblRoughness, NdotV);
preLightData.iblMipLevel = PerceptualRoughnessToMipmapLevel(iblPerceptualRoughness);
// TODO: we need a better hack.
float iblPerceptualRoughness = bsdfData.perceptualRoughness * saturate(1.2 - bsdfData.anisotropy);
float iblRoughness = PerceptualRoughnessToRoughness(iblPerceptualRoughness);
preLightData.iblDirWS = GetSpecularDominantDir(N, iblR, iblRoughness, NdotV);
preLightData.iblMipLevel = PerceptualRoughnessToMipmapLevel(iblPerceptualRoughness);
}
#ifdef LIT_USE_GGX_ENERGY_COMPENSATION
// But rough metals (black diffuse) still scatter quite a lot of light around, so
// we want to take some of that into account too.
lightTransportData.diffuseColor = bsdfData.diffuseColor + bsdfData.fresnel0 * bsdfData.roughness * 0.5 * surfaceData.metallic;
float roughness = PerceptualRoughnessToRoughness(bsdfData.perceptualRoughness);
lightTransportData.diffuseColor = bsdfData.diffuseColor + bsdfData.fresnel0 * roughness * 0.5 * surfaceData.metallic;
lightTransportData.emissiveColor = builtinData.emissiveColor;
return lightTransportData;
float BdotH = dot(bsdfData.bitangentWS, H);
float BdotL = dot(bsdfData.bitangentWS, L);
bsdfData.roughnessT = ClampRoughnessForAnalyticalLights(bsdfData.roughnessT);
bsdfData.roughnessB = ClampRoughnessForAnalyticalLights(bsdfData.roughnessB);
DV = DV_SmithJointGGXAniso(TdotH, BdotH, NdotH,
preLightData.TdotV, preLightData.BdotV, preLightData.NdotV,
TdotL, BdotL, NdotL,
bsdfData.roughnessT, bsdfData.roughnessB
#ifdef LIT_USE_BSDF_PRE_LAMBDAV
, preLightData.partLambdaV);
#else
);
#endif
DV = DV_SmithJointGGXAniso(TdotH, BdotH, NdotH, NdotV, TdotL, BdotL, NdotL,
bsdfData.roughnessT, bsdfData.roughnessB, preLightData.partLambdaV);
bsdfData.roughness = ClampRoughnessForAnalyticalLights(bsdfData.roughness);
DV = DV_SmithJointGGX(NdotH, NdotL, NdotV, bsdfData.roughness
#ifdef LIT_USE_BSDF_PRE_LAMBDAV
, preLightData partLambdaV);
#else
);
#endif
DV = DV_SmithJointGGX(NdotH, NdotL, NdotV, bsdfData.roughnessT, preLightData.partLambdaV);
}
specularLighting += F * DV;
float3 diffuseTerm = DiffuseGGX(bsdfData.diffuseColor, NdotV, NdotL, NdotH, LdotV, bsdfData.roughness);
float roughness = PerceptualRoughnessToRoughness(bsdfData.perceptualRoughness);
float3 diffuseTerm = DiffuseGGX(bsdfData.diffuseColor, NdotV, NdotL, NdotH, LdotV, roughness);
#else
// A note on subsurface scattering: [SSS-NOTE-TRSM]
// The correct way to handle SSS is to transmit light inside the surface, perform SSS,
// We don't multiply by 'bsdfData.diffuseColor' here. It's done only once in PostEvaluateBSDF().
diffuseLighting = diffuseTerm;
}
// In the "thin object" mode (for cards), we assume that the geometry is very thin.
// We apply wrapped lighting to compensate for that, and do not modify the shading position.
// Otherwise, in the "thick object" mode, we can have EITHER reflected (front) lighting
// OR transmitted (back) lighting, never both at the same time. For transmitted lighting,
// we need to push the shading position back to avoid self-shadowing problems.
float3 ComputeThicknessDisplacement(BSDFData bsdfData, float3 L, float NdotL)
{
// Compute the thickness in world units along the normal.
float thicknessInMeters = bsdfData.thickness * METERS_PER_MILLIMETER;
float thicknessInUnits = thicknessInMeters * _WorldScales[bsdfData.subsurfaceProfile].y;
// Compute the thickness in world units along the light vector.
float unprojectedThickness = thicknessInUnits / -NdotL;
return unprojectedThickness * L;
// We assume that the back side of the object is a uniformly illuminated infinite plane
// with the reversed normal (and the view vector) of the current sample.
float3 EvaluateTransmission(BSDFData bsdfData, float intensity, float shadow)
// Transmitted lighting is computed as follows:
// - we assume that the object is a thick plane (slab);
// - we reverse the front-facing normal for the back of the object;
// - we assume that the incoming radiance is constant along the entire back surface;
// - we apply BSDF-specific diffuse transmission to transmit the light subsurface and back;
// - we integrate the diffuse reflectance profile w.r.t. the radius (while also accounting
// for the thickness) to compute the transmittance;
// - we multiply the transmitted radiance by the transmittance.
float3 EvaluateTransmission(BSDFData bsdfData, float NdotL, float NdotV, float attenuation)
// For low thickness, we can reuse the shadowing status for the back of the object.
shadow = bsdfData.useThinObjectMode ? shadow : 1.0;
float wrappedNdotL = ComputeWrappedDiffuseLighting(-NdotL, SSS_WRAP_LIGHT);
float negatedNdotL = saturate(-NdotL);
// Apply wrapped lighting to better handle thin objects (cards) at grazing angles.
float backNdotL = bsdfData.useThickObjectMode ? negatedNdotL : wrappedNdotL;
// Apply BSDF-specific diffuse transmission to attenuation. See also: [SSS-NOTE-TRSM]
// We don't multiply by 'bsdfData.diffuseColor' here. It's done only once in PostEvaluateBSDF().
#ifdef LIT_DIFFUSE_LAMBERT_BRDF
attenuation *= Lambert();
#else
attenuation *= INV_PI * F_Transm_Schlick(0, 0.5, NdotV) * F_Transm_Schlick(0, 0.5, backNdotL);
#endif
float backLight = intensity * shadow;
float intensity = attenuation * backNdotL;
return backLight * bsdfData.transmittance; // Premultiplied with the diffuse color
return intensity * bsdfData.transmittance;
}
//-----------------------------------------------------------------------------
float3 positionLS = mul(lighToSample, transpose(lightToWorld));
float2 positionCS = positionLS.xy;
bool isInBounds;
// Tile the texture if the 'repeat' wrap mode is enabled.
bool isInBounds = lightData.tileCookie || max(abs(positionCS.x), abs(positionCS.y)) <= 1.0;
float2 positionNDC = positionCS * 0.5 + 0.5;
if (lightData.tileCookie)
{
// Tile the texture if the 'repeat' wrap mode is enabled.
positionNDC = frac(positionNDC);
isInBounds = true;
}
else
{
isInBounds = Max3(abs(positionCS.x), abs(positionCS.y), 1.0 - positionLS.z) <= 1.0;
}
float2 positionNDC = frac(positionCS * 0.5 + 0.5);
// We let the sampler handle tiling or clamping to border.
// Note: tiling (the repeat mode) is not currently supported.
// We let the sampler handle clamping to border.
float4 cookie = SampleCookie2D(lightLoopContext, positionNDC, lightData.cookieIndex);
cookie.a = isInBounds ? cookie.a : 0;
void EvaluateLight_Directional(LightLoopContext lightLoopContext, PositionInputs posInput,
DirectionalLightData lightData, BakeLightingData bakeLightingData,
float3 N, float3 L,
out float3 color, out float attenuation, out float shadow )
out float3 color, out float attenuation)
float shadow = 1.0;
shadow = 1.0;
#ifdef SHADOWS_SHADOWMASK
// shadowMaskSelector.x is -1 if there is no shadow mask
#endif
}
attenuation *= shadow;
[branch] if (lightData.cookieIndex >= 0)
{
float3 lightToSample = positionWS - lightData.positionWS;
DirectLighting lighting;
ZERO_INITIALIZE(DirectLighting, lighting);
float3 positionWS = posInput.positionWS;
float3 color; float attenuation, shadow;
[flatten] if (bsdfData.useThickObjectMode && NdotL < 0)
{
posInput.positionWS += ComputeThicknessDisplacement(bsdfData, L, NdotL);
}
float3 color; float attenuation;
color, attenuation, shadow);
color, attenuation);
float intensity = shadow * attenuation * saturate(NdotL);
float intensity = attenuation * saturate(NdotL);
BSDF(V, L, positionWS, preLightData, bsdfData, lighting.diffuse, lighting.specular);
BSDF(V, L, posInput.positionWS, preLightData, bsdfData, lighting.diffuse, lighting.specular);
lighting.diffuse *= intensity * lightData.diffuseScale;
lighting.specular *= intensity * lightData.specularScale;
{
// Apply wrapped lighting to better handle thin objects (cards) at grazing angles.
float wrappedNdotL = ComputeWrappedDiffuseLighting(-NdotL, SSS_WRAP_LIGHT);
// Apply the BSDF to attenuation. See also: [SSS-NOTE-TRSM]
#ifdef LIT_DIFFUSE_LAMBERT_BRDF
attenuation *= Lambert();
#else
float tNdotL = saturate(-NdotL);
float NdotV = preLightData.NdotV;
attenuation *= INV_PI * F_Transm_Schlick(0, 0.5, NdotV) * F_Transm_Schlick(0, 0.5, tNdotL);
#endif
// Shadowing is applied inside EvaluateTransmission().
intensity = attenuation * wrappedNdotL;
// We don't multiply by 'bsdfData.diffuseColor' here. It's done only once in PostEvaluateBSDF().
lighting.diffuse += EvaluateTransmission(bsdfData, intensity * lightData.diffuseScale, shadow);
lighting.diffuse += EvaluateTransmission(bsdfData, NdotL, preLightData.NdotV, attenuation * lightData.diffuseScale);
}
// Save ALU by applying light and cookie colors only once.
void EvaluateLight_Punctual(LightLoopContext lightLoopContext, PositionInputs posInput,
LightData lightData, BakeLightingData bakeLightingData,
float3 N, float3 L, float distSq,
out float3 color, out float attenuation, out float shadow )
out float3 color, out float attenuation)
float shadow = 1.0;
shadow = 1.0;
#ifdef SHADOWS_SHADOWMASK
// shadowMaskSelector.x is -1 if there is no shadow mask
#endif
}
attenuation *= shadow;
// Projector lights always have cookies, so we can perform clipping inside the if().
[branch] if (lightData.cookieIndex >= 0)
{
DirectLighting lighting;
ZERO_INITIALIZE(DirectLighting, lighting);
float3 positionWS = posInput.positionWS;
float3 lightToSample = positionWS - lightData.positionWS;
float3 lightToSample = posInput.positionWS - lightData.positionWS;
int lightType = lightData.lightType;
float3 unL = (lightType != GPULIGHTTYPE_PROJECTOR_BOX) ? -lightToSample : -lightData.forward;
float NdotL = dot(N, L);
float3 color; float attenuation, shadow;
[flatten] if (bsdfData.useThickObjectMode && NdotL < 0)
{
posInput.positionWS += ComputeThicknessDisplacement(bsdfData, L, NdotL);
}
float3 color; float attenuation;
color, attenuation, shadow);
color, attenuation);
float intensity = shadow * attenuation * saturate(NdotL);
float intensity = attenuation * saturate(NdotL);
bsdfData.roughness = max(bsdfData.roughness, lightData.minRoughness); // Simulate that a punctual light have a radius with this hack
BSDF(V, L, positionWS, preLightData, bsdfData, lighting.diffuse, lighting.specular);
// Simulate a sphere light with this hack.
bsdfData.roughnessT = max(bsdfData.roughnessT, lightData.minRoughness);
bsdfData.roughnessB = max(bsdfData.roughnessB, lightData.minRoughness);
BSDF(V, L, posInput.positionWS, preLightData, bsdfData, lighting.diffuse, lighting.specular);
lighting.diffuse *= intensity * lightData.diffuseScale;
lighting.specular *= intensity * lightData.specularScale;
{
// Apply wrapped lighting to better handle thin objects (cards) at grazing angles.
float wrappedNdotL = ComputeWrappedDiffuseLighting(-NdotL, SSS_WRAP_LIGHT);
// Apply the BSDF to attenuation. See also: [SSS-NOTE-TRSM]
#ifdef LIT_DIFFUSE_LAMBERT_BRDF
attenuation *= Lambert();
#else
float tNdotL = saturate(-NdotL);
float NdotV = preLightData.NdotV;
attenuation *= INV_PI * F_Transm_Schlick(0, 0.5, NdotV) * F_Transm_Schlick(0, 0.5, tNdotL);
#endif
// Shadowing is applied inside EvaluateTransmission().
intensity = attenuation * wrappedNdotL;
// We don't multiply by 'bsdfData.diffuseColor' here. It's done only once in PostEvaluateBSDF().
lighting.diffuse += EvaluateTransmission(bsdfData, intensity * lightData.diffuseScale, shadow);
lighting.diffuse += EvaluateTransmission(bsdfData, NdotL, preLightData.NdotV, attenuation * lightData.diffuseScale);
}
// Save ALU by applying light and cookie colors only once.
// Use the Lambertian approximation for performance reasons.
// The matrix multiplication should not generate any extra ALU on GCN.
// TODO: double evaluation is very inefficient! This is a temporary solution.
lighting.diffuse += EvaluateTransmission(bsdfData, ltcValue, 1);
lighting.diffuse += bsdfData.transmittance * ltcValue;
}
// Evaluate the coat part
float3x3 ltcTransform = mul(flipMatrix, k_identity3x3);
// Polygon irradiance in the transformed configuration.
// TODO: double evaluation is very inefficient! This is a temporary solution.
lighting.diffuse += EvaluateTransmission(bsdfData, ltcValue, 1);
lighting.diffuse += bsdfData.transmittance * ltcValue;
}
// Evaluate the coat part
float3 refractedBackPointWS = EstimateRaycast(V, posInput, preLightData.transmissionPositionWS, preLightData.transmissionRefractV);
// Calculate screen space coordinates of refracted point in back plane
float4 refractedBackPointCS = mul(UNITY_MATRIX_VP, float4(refractedBackPointWS, 1.0));
float2 refractedBackPointSS = ComputeNormalizedDeviceCoordinates(refractedBackPointCS);
float2 refractedBackPointNDC = ComputeNormalizedDeviceCoordinates(refractedBackPointWS, UNITY_MATRIX_VP);
float refractedBackPointDepth = LinearEyeDepth(LOAD_TEXTURE2D_LOD(_PyramidDepthTexture, refractedBackPointSS * depthSize, 0).r, _ZBufferParams);
float refractedBackPointDepth = LinearEyeDepth(LOAD_TEXTURE2D_LOD(_PyramidDepthTexture, refractedBackPointNDC * depthSize, 0).r, _ZBufferParams);
|| any(refractedBackPointSS < 0.0)
|| any(refractedBackPointSS > 1.0))
|| any(refractedBackPointNDC < 0.0)
|| any(refractedBackPointNDC > 1.0))
{
// Do nothing and don't update the hierarchy weight so we can fall back on refraction probe
return lighting;
lighting.specularTransmitted = SAMPLE_TEXTURE2D_LOD(_GaussianPyramidColorTexture, s_trilinear_clamp_sampler, refractedBackPointSS, preLightData.transmissionSSMipLevel).rgb;
lighting.specularTransmitted = SAMPLE_TEXTURE2D_LOD(_GaussianPyramidColorTexture, s_trilinear_clamp_sampler, refractedBackPointNDC, preLightData.transmissionSSMipLevel).rgb;
// Beer-Lamber law for absorption
lighting.specularTransmitted *= preLightData.transmissionTransmittance;
// TODO: factor this code in common, so other material authoring don't require to rewrite everything,
// TODO: test the strech from Tomasz
// float shrinkedRoughness = AnisotropicStrechAtGrazingAngle(bsdfData.roughness, bsdfData.perceptualRoughness, NdotV);
// float roughness = PerceptualRoughnessToRoughness(bsdfData.perceptualRoughness);
// float shrunkRoughness = AnisotropicStrechAtGrazingAngle(roughness, roughness, NdotV);
// Guideline for reflection volume: In HDRenderPipeline we separate the projection volume (the proxy of the scene) from the influence volume (what pixel on the screen is affected)
// However we add the constrain that the shape of the projection and influence volume is the same (i.e if we have a sphere shape projection volume, we have a shape influence).
bakeDiffuseLighting *= lerp(_AmbientOcclusionParam.rgb, float3(1.0, 1.0, 1.0), indirectAmbientOcclusion);
#endif
float specularOcclusion = GetSpecularOcclusionFromAmbientOcclusion(preLightData.NdotV, indirectAmbientOcclusion, bsdfData.roughness);
float roughness = PerceptualRoughnessToRoughness(bsdfData.perceptualRoughness);
float specularOcclusion = GetSpecularOcclusionFromAmbientOcclusion(preLightData.NdotV, indirectAmbientOcclusion, roughness);
// Try to mimic multibounce with specular color. Not the point of the original formula but ok result.
// Take the min of screenspace specular occlusion and visibility cone specular occlusion
#if GTAO_MULTIBOUNCE_APPROX
}
else if (_DebugLightingMode == DEBUGLIGHTINGMODE_INDIRECT_SPECULAR_OCCLUSION_FROM_SSAO)
{
diffuseLighting = GetSpecularOcclusionFromAmbientOcclusion(preLightData.NdotV, indirectAmbientOcclusion, bsdfData.roughness) ;
diffuseLighting = specularOcclusion ;
specularLighting = float3(0.0, 0.0, 0.0); // Disable specular lighting
}
#if GTAO_MULTIBOUNCE_APPROX