浏览代码

merge

/Yibing-Project-2
Sebastien Lagarde 7 年前
当前提交
44175da7
共有 2 个文件被更改,包括 141 次插入78 次删除
  1. 100
      ScriptableRenderPipeline/HDRenderPipeline/Material/LightEvaluationShare1.hlsl
  2. 119
      ScriptableRenderPipeline/HDRenderPipeline/Material/LightEvaluationShare2.hlsl

100
ScriptableRenderPipeline/HDRenderPipeline/Material/LightEvaluationShare1.hlsl


}
#ifdef WANT_SSS_CODE
#define SKIN_SPECULAR_VALUE 0.028
bsdfData.fresnel0 = 0.028; // TODO take from subsurfaceProfile instead
bsdfData.fresnel0 = SKIN_SPECULAR_VALUE; // TODO take from subsurfaceProfile instead
bsdfData.subsurfaceProfile = subsurfaceProfile;
bsdfData.subsurfaceRadius = subsurfaceRadius;
bsdfData.thickness = _ThicknessRemaps[subsurfaceProfile].x + _ThicknessRemaps[subsurfaceProfile].y * thickness;

bsdfData.enableTransmission = transmissionMode != SSS_TRSM_MODE_NONE && (_EnableSSSAndTransmission > 0);
bsdfData.useThinObjectMode = transmissionMode == SSS_TRSM_MODE_THIN;
bool performPostScatterTexturing = IsBitSet(_TexturingModeFlags, subsurfaceProfile);
#if defined(SHADERPASS) && (SHADERPASS == SHADERPASS_LIGHT_TRANSPORT) // In case of GI pass don't modify the diffuseColor
bool enableSssAndTransmission = false;
#elif defined(SHADERPASS) && (SHADERPASS == SHADERPASS_SUBSURFACE_SCATTERING)
bool enableSssAndTransmission = true;
#else
bool enableSssAndTransmission = _EnableSSSAndTransmission != 0;
#endif
if (enableSssAndTransmission) // If we globally disable SSS effect, don't modify diffuseColor
{
// We modify the albedo here as this code is used by all lighting (including light maps and GI).
if (performPostScatterTexturing)
{
#if !defined(SHADERPASS) || (SHADERPASS != SHADERPASS_SUBSURFACE_SCATTERING)
bsdfData.diffuseColor = float3(1.0, 1.0, 1.0);
#endif
}
else
{
bsdfData.diffuseColor = sqrt(bsdfData.diffuseColor);
}
}
bsdfData.useThinObjectMode = transmissionMode == SSS_TRSM_MODE_THIN;
bsdfData.transmittance = ComputeTransmittance(_ShapeParams[subsurfaceProfile].rgb,
_TransmissionTints[subsurfaceProfile].rgb,
bsdfData.thickness, bsdfData.subsurfaceRadius);
bsdfData.transmittance = ComputeTransmittanceDisney(_ShapeParams[subsurfaceProfile].rgb,
_TransmissionTints[subsurfaceProfile].rgb,
bsdfData.thickness, bsdfData.subsurfaceRadius);
}
else
{

_TransmissionTints[subsurfaceProfile].rgb,
bsdfData.thickness, bsdfData.subsurfaceRadius);
}
}
}
bsdfData.transmittance *= bsdfData.diffuseColor; // Premultiply
// Returns the modified albedo (diffuse color) for materials with subsurface scattering.
// Ref: Advanced Techniques for Realistic Real-Time Skin Rendering.
float3 ApplyDiffuseTexturingMode(BSDFData bsdfData)
{
float3 albedo = bsdfData.diffuseColor;
if (bsdfData.materialId == MATERIALID_LIT_SSS)
{
#if defined(SHADERPASS) && (SHADERPASS == SHADERPASS_SUBSURFACE_SCATTERING)
// If the SSS pass is executed, we know we have SSS enabled.
bool enableSssAndTransmission = true;
#else
bool enableSssAndTransmission = _EnableSSSAndTransmission != 0;
#endif
if (enableSssAndTransmission)
{
bool performPostScatterTexturing = IsBitSet(_TexturingModeFlags, bsdfData.subsurfaceProfile);
if (performPostScatterTexturing)
{
// Post-scatter texturing mode: the albedo is only applied during the SSS pass.
#if !defined(SHADERPASS) || (SHADERPASS != SHADERPASS_SUBSURFACE_SCATTERING)
albedo = float3(1, 1, 1);
#endif
}
else
{
// Pre- and pos- scatter texturing mode.
albedo = sqrt(albedo);
}
}
return albedo;
#endif
// For image based lighting, a part of the BSDF is pre-integrated.

{
PreLightData preLightData;
preLightData.NdotV = dot(bsdfData.normalWS, V); // Store the unaltered (geometric) version
float NdotV = preLightData.NdotV;
float3 N;
float NdotV;
// In the case of IBL we want shift a bit the normal that are not toward the viewver to reduce artifact
float3 iblNormalWS = GetViewShiftedNormal(bsdfData.normalWS, V, NdotV, MIN_N_DOT_V); // Use non clamped NdotV
float3 iblR = reflect(-V, iblNormalWS);
N = bsdfData.normalWS;
NdotV = saturate(dot(N, V));
preLightData.NdotV = NdotV;
NdotV = max(NdotV, MIN_N_DOT_V); // Use the modified (clamped) version
float3 iblR;
preLightData.TdotV = dot(bsdfData.tangentWS, V);
preLightData.BdotV = dot(bsdfData.bitangentWS, V);
preLightData.TdotV = dot(bsdfData.tangentWS, V);
preLightData.BdotV = dot(bsdfData.bitangentWS, V);
preLightData.partLambdaV = GetSmithJointGGXAnisoPartLambdaV(preLightData.TdotV, preLightData.BdotV, NdotV, bsdfData.roughnessT, bsdfData.roughnessB);
// For GGX aniso and IBL we have done an empirical (eye balled) approximation compare to the reference.

float3 grainDirWS = (bsdfData.anisotropy >= 0) ? bsdfData.bitangentWS : bsdfData.tangentWS;
// Reduce stretching for (perceptualRoughness < 0.2).
float stretch = abs(bsdfData.anisotropy) * saturate(5 * bsdfData.perceptualRoughness);
// NOTE: If we follow the theory we should use the modified normal for the different calculation implying a normal (like NdotV) and use iblNormalWS
// NOTE: If we follow the theory we should use the modified normal for the different calculation implying a normal (like NdotV) and use 'anisoIblNormalWS'
float3 anisoIblNormalWS = GetAnisotropicModifiedNormal(grainDirWS, iblNormalWS, V, stretch);
float3 anisoIblNormalWS = GetAnisotropicModifiedNormal(grainDirWS, N, V, stretch);
iblR = reflect(-V, anisoIblNormalWS);
}
else // GGX iso

preLightData.partLambdaV = GetSmithJointGGXPartLambdaV(NdotV, bsdfData.roughness);
iblR = reflect(-V, iblNormalWS);
iblR = reflect(-V, N);
}
float reflectivity;

iblPerceptualRoughness = bsdfData.perceptualRoughness;
}
preLightData.iblDirWS = GetSpecularDominantDir(iblNormalWS, iblR, iblRoughness, NdotV);
preLightData.iblDirWS = GetSpecularDominantDir(N, iblR, iblRoughness, NdotV);
preLightData.iblMipLevel = PerceptualRoughnessToMipmapLevel(iblPerceptualRoughness);
#ifdef LIT_USE_GGX_ENERGY_COMPENSATION

119
ScriptableRenderPipeline/HDRenderPipeline/Material/LightEvaluationShare2.hlsl


DirectLighting EvaluateBSDF_Directional( LightLoopContext lightLoopContext,
float3 V, PositionInputs posInput, PreLightData preLightData,
DirectionalLightData lightData, BSDFData bsdfData)
DirectionalLightData lightData, BSDFData bsdfData, BakeLightingData bakeLightingData)
{
DirectLighting lighting;
ZERO_INITIALIZE(DirectLighting, lighting);

float NdotL = dot(bsdfData.normalWS, L);
float illuminance = saturate(NdotL);
float shadow = 1.0;
float shadow = 1.0;
float shadowMask = 1.0;
#ifdef SHADOWS_SHADOWMASK
// shadowMaskSelector.x is -1 if there is no shadow mask
// Note that we override shadow value (in case we don't have any dynamic shadow)
shadow = shadowMask = (lightData.shadowMaskSelector.x >= 0.0) ? dot(bakeLightingData.bakeShadowMask, lightData.shadowMaskSelector) : 1.0;
#endif
[branch] if (lightData.shadowIndex >= 0)
{

shadow = LOAD_TEXTURE2D(_DeferredShadowTexture, posInput.unPositionSS).x;
#endif
illuminance *= shadow;
#ifdef SHADOWS_SHADOWMASK
float fade = saturate(posInput.depthVS * lightData.fadeDistanceScaleAndBias.x + lightData.fadeDistanceScaleAndBias.y);
// See comment in EvaluateBSDF_Punctual
shadow = lightData.dynamicShadowCasterOnly ? min(shadowMask, shadow) : shadow;
shadow = lerp(shadow, shadowMask, fade); // Caution to lerp parameter: fade is the reverse of shadowDimmer
// Note: There is no shadowDimmer when there is no shadow mask
#endif
illuminance *= shadow;
[branch] if (lightData.cookieIndex >= 0)
{

#ifdef WANT_SSS_CODE
[branch] if (bsdfData.enableTransmission)
{
// We apply wrapped lighting instead of the regular Lambertian diffuse
// to compensate for approximations within EvaluateTransmission().
float illuminance = Lambert() * ComputeWrappedDiffuseLighting(-NdotL, SSS_WRAP_LIGHT);
// Apply wrapped lighting to better handle thin objects (cards) at grazing angles.
float wrappedNdotL = ComputeWrappedDiffuseLighting(-NdotL, SSS_WRAP_LIGHT);
#ifdef LIT_DIFFUSE_LAMBERT_BRDF
illuminance = Lambert() * wrappedNdotL;
#else
float tNdotL = saturate(-NdotL);
float NdotV = preLightData.NdotV;
illuminance = INV_PI * F_Transm_Schlick(0, 0.5, NdotV) * F_Transm_Schlick(0, 0.5, tNdotL) * wrappedNdotL;
#endif
// We don't multiply by 'bsdfData.diffuseColor' here. It's done only once in PostEvaluateBSDF().
lighting.diffuse += EvaluateTransmission(bsdfData, illuminance * lightData.diffuseScale, shadow);
}
#endif

DirectLighting EvaluateBSDF_Punctual( LightLoopContext lightLoopContext,
float3 V, PositionInputs posInput,
PreLightData preLightData, LightData lightData, BSDFData bsdfData, int GPULightType)
PreLightData preLightData, LightData lightData, BSDFData bsdfData, BakeLightingData bakeLightingData, int GPULightType)
int lightType = GPULightType;
int lightType = GPULightType;
// All punctual light type in the same formula, attenuation is neutral depends on light type.
// light.positionWS is the normalize light direction in case of directional light and invSqrAttenuationRadius is 0

float3 lightToSurface = positionWS - lightData.positionWS;
float3 unL = -lightToSurface;
float3 unL = -lightToSurface;
float dist = sqrt(distSq);
float3 L = (lightType != GPULIGHTTYPE_PROJECTOR_BOX) ? unL * rsqrt(distSq) : -lightData.forward;
float NdotL = dot(bsdfData.normalWS, L);
float dist = sqrt(distSq);
float3 L = (lightType != GPULIGHTTYPE_PROJECTOR_BOX) ? unL * rsqrt(distSq) : -lightData.forward;
float NdotL = dot(bsdfData.normalWS, L);
lightData.diffuseScale *= attenuation;
lightData.diffuseScale *= attenuation;
float shadowMask = 1.0;
#ifdef SHADOWS_SHADOWMASK
// shadowMaskSelector.x is -1 if there is no shadow mask
// Note that we override shadow value (in case we don't have any dynamic shadow)
shadow = shadowMask = (lightData.shadowMaskSelector.x >= 0.0) ? dot(bakeLightingData.bakeShadowMask, lightData.shadowMaskSelector) : 1.0;
#endif
[branch] if (lightData.shadowIndex >= 0)
{

shadow = GetPunctualShadowAttenuation(lightLoopContext.shadowContext, positionWS + offset, bsdfData.normalWS, lightData.shadowIndex, L_dist, posInput.unPositionSS);
#ifdef SHADOWS_SHADOWMASK
// Note: Legacy Unity have two shadow mask mode. ShadowMask (ShadowMask contain static objects shadow and ShadowMap contain only dynamic objects shadow, final result is the minimun of both value)
// and ShadowMask_Distance (ShadowMask contain static objects shadow and ShadowMap contain everything and is blend with ShadowMask based on distance (Global distance setup in QualitySettigns)).
// HDRenderPipeline change this behavior. Only ShadowMask mode is supported but we support both blend with distance AND minimun of both value. Distance is control by light.
// The following code do this.
// The min handle the case of having only dynamic objects in the ShadowMap
// The second case for blend with distance is handled with ShadowDimmer. ShadowDimmer is define manually and by shadowDistance by light.
// With distance, ShadowDimmer become one and only the ShadowMask appear, we get the blend with distance behavior.
shadow = lightData.dynamicShadowCasterOnly ? min(shadowMask, shadow) : shadow;
shadow = lerp(shadowMask, shadow, lightData.shadowDimmer);
#else
illuminance *= shadow;
#endif
#ifdef VOLUMETRIC_SHADOWING_ENABLED
float volumetricShadow = Transmittance(OpticalDepthHomogeneous(preLightData.globalFogExtinction, dist));
// Premultiply.
lightData.diffuseScale *= volumetricShadow;
lightData.specularScale *= volumetricShadow;
#endif
illuminance *= shadow;
// Projector lights always have a cookies, so we can perform clipping inside the if().
[branch] if (lightData.cookieIndex >= 0)

// Premultiply.
lightData.color *= cookie.rgb;
lightData.diffuseScale *= cookie.a;
lightData.color *= cookie.rgb;
lightData.diffuseScale *= cookie.a;
lightData.specularScale *= cookie.a;
}

#ifdef WANT_SSS_CODE
[branch] if (bsdfData.enableTransmission)
{
// We apply wrapped lighting instead of the regular Lambertian diffuse
// to compensate for approximations within EvaluateTransmission().
float illuminance = Lambert() * ComputeWrappedDiffuseLighting(-NdotL, SSS_WRAP_LIGHT);
// Apply wrapped lighting to better handle thin objects (cards) at grazing angles.
float wrappedNdotL = ComputeWrappedDiffuseLighting(-NdotL, SSS_WRAP_LIGHT);
#ifdef LIT_DIFFUSE_LAMBERT_BRDF
illuminance = Lambert() * wrappedNdotL;
#else
float tNdotL = saturate(-NdotL);
float NdotV = preLightData.NdotV;
illuminance = INV_PI * F_Transm_Schlick(0, 0.5, NdotV) * F_Transm_Schlick(0, 0.5, tNdotL) * wrappedNdotL;
#endif
// We don't multiply by 'bsdfData.diffuseColor' here. It's done only once in PostEvaluateBSDF().
lighting.diffuse += EvaluateTransmission(bsdfData, illuminance * lightData.diffuseScale, shadow);
}
#endif

DirectLighting EvaluateBSDF_Line( LightLoopContext lightLoopContext,
float3 V, PositionInputs posInput,
PreLightData preLightData, LightData lightData, BSDFData bsdfData)
PreLightData preLightData, LightData lightData, BSDFData bsdfData, BakeLightingData bakeLightingData)
{
DirectLighting lighting;
ZERO_INITIALIZE(DirectLighting, lighting);

DirectLighting EvaluateBSDF_Rect( LightLoopContext lightLoopContext,
float3 V, PositionInputs posInput,
PreLightData preLightData, LightData lightData, BSDFData bsdfData)
PreLightData preLightData, LightData lightData, BSDFData bsdfData, BakeLightingData bakeLightingData)
{
DirectLighting lighting;
ZERO_INITIALIZE(DirectLighting, lighting);

DirectLighting EvaluateBSDF_Area( LightLoopContext lightLoopContext,
float3 V, PositionInputs posInput,
PreLightData preLightData, LightData lightData, BSDFData bsdfData, int GPULightType)
PreLightData preLightData, LightData lightData, BSDFData bsdfData, BakeLightingData bakeLightingData, int GPULightType)
return EvaluateBSDF_Line(lightLoopContext, V, posInput, preLightData, lightData, bsdfData);
return EvaluateBSDF_Line(lightLoopContext, V, posInput, preLightData, lightData, bsdfData, bakeLightingData);
return EvaluateBSDF_Rect(lightLoopContext, V, posInput, preLightData, lightData, bsdfData);
return EvaluateBSDF_Rect(lightLoopContext, V, posInput, preLightData, lightData, bsdfData, bakeLightingData);
}
}

void PostEvaluateBSDF( LightLoopContext lightLoopContext,
float3 V, PositionInputs posInput,
PreLightData preLightData, BSDFData bsdfData, float3 bakeDiffuseLighting, AggregateLighting lighting,
PreLightData preLightData, BSDFData bsdfData, BakeLightingData bakeLightingData, AggregateLighting lighting,
float3 bakeDiffuseLighting = bakeLightingData.bakeDiffuseLighting;
// Use GTAOMultiBounce approximation for ambient occlusion (allow to get a tint from the baseColor)
#define GTAO_MULTIBOUNCE_APPROX 1

lighting.indirect.specularReflected *= lerp(_AmbientOcclusionParam.rgb, float3(1.0, 1.0, 1.0), min(bsdfData.specularOcclusion, specularOcclusion));
#endif
// TODO: we could call a function like PostBSDF that will apply albedo and divide by PI once for the loop
lighting.direct.diffuse *=
#if GTAO_MULTIBOUNCE_APPROX
GTAOMultiBounce(directAmbientOcclusion, bsdfData.diffuseColor);

diffuseLighting = lighting.direct.diffuse + bakeDiffuseLighting;
float3 modifiedDiffuseColor = ApplyDiffuseTexturingMode(bsdfData);
// Apply the albedo to the direct diffuse lighting (only once). The indirect (baked)
// diffuse lighting has already had the albedo applied in GetBakedDiffuseLigthing().
diffuseLighting = modifiedDiffuseColor * lighting.direct.diffuse + bakeDiffuseLighting;
// If refraction is enable we use the transmittanceMask to lerp between current diffuse lighting and refraction value
// Physically speaking, it should be transmittanceMask should be 1, but for artistic reasons, we let the value vary
#if HAS_REFRACTION

正在加载...
取消
保存