浏览代码

Merge pull request #545 from EvgeniiG/Unity-2017.3

Improve subsurface scattering
/Yibing-Project-2
GitHub 7 年前
当前提交
3f66af34
共有 12 个文件被更改,包括 170 次插入88 次删除
  1. 3
      ScriptableRenderPipeline/Core/ShaderLibrary/API/PSSL.hlsl
  2. 19
      ScriptableRenderPipeline/Core/ShaderLibrary/Common.hlsl
  3. 16
      ScriptableRenderPipeline/Core/ShaderLibrary/CommonMaterial.hlsl
  4. 2
      ScriptableRenderPipeline/Core/ShaderLibrary/EntityLighting.hlsl
  5. 6
      ScriptableRenderPipeline/Core/ShaderLibrary/Shadow/Resources/ShadowBlurMoments.compute
  6. 62
      ScriptableRenderPipeline/HDRenderPipeline/HDRenderPipeline.cs
  7. 7
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/Deferred.shader
  8. 8
      ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/Deferred.compute
  9. 47
      ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Lit.hlsl
  10. 80
      ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Resources/SubsurfaceScattering.compute
  11. 6
      ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Resources/SubsurfaceScattering.shader
  12. 2
      ScriptableRenderPipeline/HDRenderPipeline/ShaderPass/ShaderPassLightTransport.hlsl

3
ScriptableRenderPipeline/Core/ShaderLibrary/API/PSSL.hlsl


// This file assume SHADER_API_D3D11 is defined
#define INTRINSIC_BITFIELD_EXTRACT
#define INTRINSIC_WAVEREADFIRSTLANE
#define WaveReadFirstLane ReadFirstLane
#define INTRINSIC_MED3
#define INTRINSIC_MINMAX3
#define Min3 min3
#define Max3 max3

19
ScriptableRenderPipeline/Core/ShaderLibrary/Common.hlsl


return BitFieldExtract(data, 1u, bitPos) != 0;
}
#ifndef INTRINSIC_CLAMP
// TODO: should we force all clamp to be intrinsic by default ?
// Some platform have one instruction clamp
#define Clamp clamp
#endif // INTRINSIC_CLAMP
#ifndef INTRINSIC_WAVEREADFIRSTLANE
// Warning: for correctness, the value you pass to the function must be constant across the wave!
uint WaveReadFirstLane(uint scalarValue)
{
return scalarValue;
}
#endif
#ifndef INTRINSIC_MUL24
int Mul24(int a, int b)

return a * b + c;
}
#endif // INTRINSIC_MAD24
#ifndef INTRINSIC_MED3
float Med3(float a, float b, float c)
{
return Clamp(a, b, c);
}
#endif // INTRINSIC_MED3
#ifndef INTRINSIC_MINMAX3
float Min3(float a, float b, float c)

16
ScriptableRenderPipeline/Core/ShaderLibrary/CommonMaterial.hlsl


return saturate((NdotL + w) / ((1 + w) * (1 + w)));
}
// In order to support subsurface scattering, we need to know which pixels have an SSS material.
// It can be accomplished by reading the stencil buffer.
// A faster solution (which avoids an extra texture fetch) is to simply make sure that
// all pixels which belong to an SSS material are not black (those that don't always are).
float3 TagLightingForSSS(float3 subsurfaceLighting)
{
subsurfaceLighting.r = max(subsurfaceLighting.r, HFLT_MIN);
return subsurfaceLighting;
}
// See TagLightingForSSS() for details.
bool TestLightingForSSS(float3 subsurfaceLighting)
{
return subsurfaceLighting.r > 0;
}
// MACRO from Legacy Untiy
// Transforms 2D UV by scale/bias property
#define TRANSFORM_TEX(tex, name) ((tex.xy) * name##_ST.xy + name##_ST.zw)

2
ScriptableRenderPipeline/Core/ShaderLibrary/EntityLighting.hlsl


// but last one is not used.
// Clamp to edge of the "internal" texture, as R is from half texel to size of R texture minus half texel.
// This avoid leaking
texCoord.x = Clamp(texCoord.x * 0.25, 0.5 * texelSizeX, 0.25 - 0.5 * texelSizeX);
texCoord.x = clamp(texCoord.x * 0.25, 0.5 * texelSizeX, 0.25 - 0.5 * texelSizeX);
float4 shAr = SAMPLE_TEXTURE3D(SHVolumeTexture, SHVolumeSampler, texCoord);
texCoord.x += 0.25;

6
ScriptableRenderPipeline/Core/ShaderLibrary/Shadow/Resources/ShadowBlurMoments.compute


#if MAX_MSAA > 1
uint width, height, sampleCnt;
depthTex.GetDimensions( width, height, sampleCnt );
sampleCnt = Clamp( sampleCnt, 2, MAX_MSAA );
sampleCnt = clamp( sampleCnt, 2, MAX_MSAA );
float sampleCntRcp = 1.0 / sampleCnt;
// calculate weights based on sample positions

// We're pancaking triangles to znear in the depth pass so depth and subsequently all moments can end up being zero.
// The solver ShadowMoments_SolveMSM then ends up calculating infinities and nands, which produces different results
// on different vendors' GPUs. So we're adding a small safety margin here.
depth = Clamp( depth, 0.001, 0.999 );
depth = clamp( depth, 0.001, 0.999 );
# endif
avgMoments += sampleWeights[is] * DepthToMoments( depth );
}

// We're pancaking triangles to znear in the depth pass so depth and subsequently all moments can end up being zero.
// The solver ShadowMoments_SolveMSM then ends up calculating infinities and nands, which produces different results
// on different vendors' GPUs. So we're adding a small safety margin here.
depth = Clamp( depth, 0.001, 0.999 );
depth = clamp( depth, 0.001, 0.999 );
# endif
writeToShared( DepthToMoments( depth ), int2( ldsIdx.x, groupThreadId.y ), LDS_STRIDE );
#endif

62
ScriptableRenderPipeline/HDRenderPipeline/HDRenderPipeline.cs


bool NeedDepthBufferCopy()
{
// For now we consider only PS4 to be able to read from a bound depth buffer. Need to test/implement for other platforms.
// For now we consider only PS4 to be able to read from a bound depth buffer.
// TODO: test/implement for other platforms.
return SystemInfo.graphicsDeviceType != GraphicsDeviceType.PlayStation4;
}

{
// Currently, Unity does not offer a way to access the GCN HTile even on PS4 and Xbox One.
// Therefore, it's computed in a pixel shader, and optimized to only contain the SSS bit.
return m_CurrentDebugDisplaySettings.renderingDebugSettings.enableSSSAndTransmission;
return m_CurrentDebugDisplaySettings.renderingDebugSettings.enableSSSAndTransmission && sssSettings.useDisneySSS;
}
bool NeedTemporarySubsurfaceBuffer()
{
// Typed UAV loads from FORMAT_R16G16B16A16_FLOAT is an optional feature of Direct3D 11.
// Most modern GPUs support it. We can avoid performing a costly copy in this case.
// TODO: test/implement for other platforms.
return m_CurrentDebugDisplaySettings.renderingDebugSettings.enableSSSAndTransmission && (!sssSettings.useDisneySSS || (
SystemInfo.graphicsDeviceType != GraphicsDeviceType.PlayStation4 &&
SystemInfo.graphicsDeviceType != GraphicsDeviceType.XboxOne &&
SystemInfo.graphicsDeviceType != GraphicsDeviceType.XboxOneD3D12));
}
RenderTargetIdentifier GetDepthTexture()

cmd.SetComputeVectorArrayParam(m_SubsurfaceScatteringCS, HDShaderIDs._FilterKernels, sssParameters.filterKernels);
cmd.SetComputeVectorArrayParam(m_SubsurfaceScatteringCS, HDShaderIDs._ShapeParams, sssParameters.shapeParams);
cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._GBufferTexture0, m_GbufferManager.GetGBuffers()[0]);
cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._GBufferTexture1, m_GbufferManager.GetGBuffers()[1]);
cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._GBufferTexture2, m_GbufferManager.GetGBuffers()[2]);
cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._GBufferTexture3, m_GbufferManager.GetGBuffers()[3]);
cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._DepthTexture, GetDepthTexture());
cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._StencilTexture, GetStencilTexture());
cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._HTile, GetHTile());
cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._IrradianceSource, m_CameraSssDiffuseLightingBufferRT);
cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._CameraColorTexture, m_CameraColorBufferRT);
cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._CameraFilteringBuffer, m_CameraFilteringBufferRT);
cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._GBufferTexture0, m_GbufferManager.GetGBuffers()[0]);
cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._GBufferTexture1, m_GbufferManager.GetGBuffers()[1]);
cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._GBufferTexture2, m_GbufferManager.GetGBuffers()[2]);
cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._GBufferTexture3, m_GbufferManager.GetGBuffers()[3]);
cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._DepthTexture, GetDepthTexture());
cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._HTile, GetHTile());
cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._IrradianceSource, m_CameraSssDiffuseLightingBufferRT);
if (NeedTemporarySubsurfaceBuffer())
{
cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._CameraFilteringBuffer, m_CameraFilteringBufferRT);
// Perform the SSS filtering pass which fills 'm_CameraFilteringBufferRT'.
cmd.DispatchCompute(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, ((int)hdCamera.screenSize.x + 15) / 16, ((int)hdCamera.screenSize.y + 15) / 16, 1);
// Perform the SSS filtering pass which fills 'm_CameraFilteringBufferRT'.
cmd.DispatchCompute(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, ((int)hdCamera.screenSize.x + 15) / 16, ((int)hdCamera.screenSize.y + 15) / 16, 1);
cmd.SetGlobalTexture(HDShaderIDs._IrradianceSource, m_CameraFilteringBufferRT); // Cannot set a RT on a material
cmd.SetGlobalTexture(HDShaderIDs._IrradianceSource, m_CameraFilteringBufferRT); // Cannot set a RT on a material
// Additively blend diffuse and specular lighting into 'm_CameraColorBufferRT'.
CoreUtils.DrawFullScreen(cmd, m_CombineLightingPass, m_CameraColorBufferRT, m_CameraDepthStencilBufferRT);
}
else
{
cmd.SetComputeTextureParam(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, HDShaderIDs._CameraColorTexture, m_CameraColorBufferRT);
// Combine diffuse and specular lighting into 'm_CameraColorBufferRT'.
CoreUtils.DrawFullScreen(cmd, m_CombineLightingPass, m_CameraColorBufferRT, m_CameraDepthStencilBufferRT);
// Perform the SSS filtering pass which performs an in-place update of 'm_CameraColorBufferRT'.
cmd.DispatchCompute(m_SubsurfaceScatteringCS, m_SubsurfaceScatteringKernel, ((int)hdCamera.screenSize.x + 15) / 16, ((int)hdCamera.screenSize.y + 15) / 16, 1);
}
}
else
{

cmd.GetTemporaryRT(m_CameraColorBuffer, w, h, 0, FilterMode.Point, RenderTextureFormat.ARGBHalf, RenderTextureReadWrite.Linear, 1, true); // Enable UAV
cmd.GetTemporaryRT(m_CameraSssDiffuseLightingBuffer, w, h, 0, FilterMode.Point, RenderTextureFormat.RGB111110Float, RenderTextureReadWrite.Linear, 1, true); // Enable UAV
cmd.GetTemporaryRT(m_CameraFilteringBuffer, w, h, 0, FilterMode.Point, RenderTextureFormat.RGB111110Float, RenderTextureReadWrite.Linear, 1, true); // Enable UAV
if (NeedTemporarySubsurfaceBuffer())
{
cmd.GetTemporaryRT(m_CameraFilteringBuffer, w, h, 0, FilterMode.Point, RenderTextureFormat.RGB111110Float, RenderTextureReadWrite.Linear, 1, true); // Enable UAV
}
// Color and depth pyramids
int s = CalculatePyramidSize(w, h);

}
// Old SSS Model >>>
if (!sssSettings.useDisneySSS)
if (NeedTemporarySubsurfaceBuffer())
{
// Clear the SSS filtering target
using (new ProfilingSample(cmd, "Clear SSS filtering target"))

7
ScriptableRenderPipeline/HDRenderPipeline/Lighting/Deferred.shader


Outputs outputs;
#ifdef OUTPUT_SPLIT_LIGHTING
outputs.specularLighting = float4(specularLighting, 1.0);
outputs.diffuseLighting = diffuseLighting;
// We SSSSS is enabled with use split lighting.
// SSSSS algorithm need to know which pixels contribute to SSS and which doesn't. We could use the stencil for that but it mean that it will increase the cost of SSSSS
// A simpler solution is to add a slight contribution here that isn't visible (here we chose fp16 min (which is also fp11 and fp10 min).
// The SSSSS algorithm will check if diffuse lighting is black and discard the pixel if it is the case
outputs.diffuseLighting.r = max(outputs.diffuseLighting.r, HFLT_MIN);
outputs.diffuseLighting = TagLightingForSSS(diffuseLighting);
#else
outputs.combinedLighting = float4(diffuseLighting + specularLighting, 1.0);
#endif

8
ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/Deferred.compute


if (_EnableSSSAndTransmission != 0 && bsdfData.materialId == MATERIALID_LIT_SSS && HasMaterialFeatureFlag(MATERIALFEATUREFLAGS_LIT_SSS))
{
// We SSSSS is enabled with use split lighting.
// SSSSS algorithm need to know which pixels contribute to SSS and which doesn't. We could use the stencil for that but it mean that it will increase the cost of SSSSS
// A simpler solution is to add a slight contribution here that isn't visible (here we chose fp16 min (which is also fp11 and fp10 min).
// The SSSSS algorithm will check if diffuse lighting is black and discard the pixel if it is the case
diffuseLighting.r = max(diffuseLighting.r, HFLT_MIN);
diffuseLightingUAV[pixelCoord] = diffuseLighting;
diffuseLightingUAV[pixelCoord] = TagLightingForSSS(diffuseLighting);
}
else
{

47
ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Lit.hlsl


#define GBufferType1 uint4
// TODO: How to abstract that ? We would like to avoid this PS4 test here
#ifdef SHADER_API_PS4
#ifdef SHADER_API_PSSL
// On PS4 we need to specify manually the format of the output render target, output type is not enough
#pragma PSSL_target_output_format(target 0 FMT_UINT16_ABGR)
#pragma PSSL_target_output_format(target 1 FMT_UINT16_ABGR)

#define LTC_LUT_OFFSET (0.5 * rcp(LTC_LUT_SIZE))
// Subsurface scattering constant
#define SSS_WRAP_ANGLE (PI/12) // Used for wrap lighting
#define SSS_WRAP_ANGLE (PI/12) // 15 degrees
#define SSS_WRAP_LIGHT cos(PI/2 - SSS_WRAP_ANGLE)
CBUFFER_START(UnitySSSParameters)

#elif LIT_DIFFUSE_GGX_BRDF
float3 diffuseTerm = DiffuseGGX(bsdfData.diffuseColor, NdotV, NdotL, NdotH, LdotV, bsdfData.roughness);
#else
float diffuseTerm = DisneyDiffuse(NdotV, NdotL, LdotV, bsdfData.perceptualRoughness);
// A note on subsurface scattering.
// The correct way to handle SSS is to transmit light inside the surface, perform SSS,
// and then transmit it outside towards the viewer.
// Transmit(X) = F_Transm_Schlick(F0, F90, NdotX), where F0 = 0, F90 = 1.
// Therefore, the diffuse BSDF should be decomposed as follows:
// f_d = A / Pi * F_Transm_Schlick(0, 1, NdotL) * F_Transm_Schlick(0, 1, NdotV) + f_d_reflection,
// with F_Transm_Schlick(0, 1, NdotV) applied after the SSS pass.
// The alternative (artistic) formulation of Disney is to set F90 = 0.5:
// f_d = A / Pi * F_Transm_Schlick(0, 0.5, NdotL) * F_Transm_Schlick(0, 0.5, NdotV) + f_retro_reflection.
// That way, darkening at grading angles is reduced to 0.5.
// In practice, applying F_Transm_Schlick(F0, F90, NdotV) after the SSS pass is expensive,
// as it forces us to read the normal buffer at the end of the SSS pass.
// Separating f_retro_reflection also has a small cost (mostly due to energy compensation
// for multi-bounce GGX), and the visual difference is negligible.
// Therefore, we choose not to separate diffuse lighting into reflected and transmitted.
float diffuseTerm = DisneyDiffuse(NdotV, NdotL, LdotV, bsdfData.perceptualRoughness);
#endif
// We don't multiply by 'bsdfData.diffuseColor' here. It's done only once in PostEvaluateBSDF().

[branch] if (bsdfData.enableTransmission)
{
// We apply wrapped lighting instead of the regular Lambertian diffuse
// to compensate for approximations within EvaluateTransmission().
float illuminance = Lambert() * ComputeWrappedDiffuseLighting(-NdotL, SSS_WRAP_LIGHT);
// Apply wrapped lighting to better handle thin objects (cards) at grazing angles.
float wrappedNdotL = ComputeWrappedDiffuseLighting(-NdotL, SSS_WRAP_LIGHT);
#ifdef LIT_DIFFUSE_LAMBERT_BRDF
illuminance = Lambert() * wrappedNdotL;
#else
float tNdotL = saturate(-NdotL);
float NdotV = max(preLightData.NdotV, MIN_N_DOT_V);
illuminance = INV_PI * F_Transm_Schlick(0, 0.5, NdotV) * F_Transm_Schlick(0, 0.5, tNdotL) * wrappedNdotL;
#endif
// We use diffuse lighting for accumulation since it is going to be blurred during the SSS pass.
// We don't multiply by 'bsdfData.diffuseColor' here. It's done only once in PostEvaluateBSDF().

[branch] if (bsdfData.enableTransmission)
{
// We apply wrapped lighting instead of the regular Lambertian diffuse
// to compensate for approximations within EvaluateTransmission().
float illuminance = Lambert() * ComputeWrappedDiffuseLighting(-NdotL, SSS_WRAP_LIGHT);
// Apply wrapped lighting to better handle thin objects (cards) at grazing angles.
float wrappedNdotL = ComputeWrappedDiffuseLighting(-NdotL, SSS_WRAP_LIGHT);
#ifdef LIT_DIFFUSE_LAMBERT_BRDF
illuminance = Lambert() * wrappedNdotL;
#else
float tNdotL = saturate(-NdotL);
float NdotV = max(preLightData.NdotV, MIN_N_DOT_V);
illuminance = INV_PI * F_Transm_Schlick(0, 0.5, NdotV) * F_Transm_Schlick(0, 0.5, tNdotL) * wrappedNdotL;
#endif
// We use diffuse lighting for accumulation since it is going to be blurred during the SSS pass.
// We don't multiply by 'bsdfData.diffuseColor' here. It's done only once in PostEvaluateBSDF().

80
ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Resources/SubsurfaceScattering.compute


// Tweak parameters.
#define SSS_BILATERAL_FILTER 1
#define SSS_USE_LDS_CACHE 1
#define SSS_ENABLE_NEAR_FIELD 0
#define SSS_SAMPLE_TEST_HTILE 0
#define SSS_USE_TANGENT_PLANE 0
#define SSS_CLAMP_ARTIFACT 0
#define SSS_ENABLE_NEAR_FIELD 0 // Greatly increases the number of samples. Comes at a high cost.
#define SSS_SAMPLE_TEST_HTILE 0 // Potential optimization. YMMV.
#define SSS_USE_TANGENT_PLANE 0 // Improves the accuracy of the approximation(0 -> 1st order). High cost. Does not work with back-facing normals.
#define SSS_CLAMP_ARTIFACT 0 // Reduces bleeding. Use with SSS_USE_TANGENT_PLANE.
#define SSS_DEBUG_LOD 0
#define SSS_DEBUG_NORMAL_VS 0

#define TEXTURE_CACHE_BORDER 2
#define TEXTURE_CACHE_SIZE_1D (GROUP_SIZE_1D + 2 * TEXTURE_CACHE_BORDER)
// Check for support of typed UAV loads from FORMAT_R16G16B16A16_FLOAT.
// TODO: query the format support more precisely.
#if !(defined(SHADER_API_PSSL) || defined(SHADER_API_XBOXONE))
#define USE_INTERMEDIATE_BUFFER
#endif
//--------------------------------------------------------------------------------------------------
// Included headers
//--------------------------------------------------------------------------------------------------

float4 _WorldScales[SSS_N_PROFILES]; // Size of the world unit in meters (only the X component is used)
float4 _FilterKernels[SSS_N_PROFILES][SSS_N_SAMPLES_NEAR_FIELD]; // XY = near field, ZW = far field; 0 = radius, 1 = reciprocal of the PDF
DECLARE_GBUFFER_TEXTURE(_GBufferTexture); // Contains the albedo and SSS parameters
TEXTURE2D(_DepthTexture); // Z-buffer
TEXTURE2D(_StencilTexture); // DXGI_FORMAT_R8_UINT is not supported by Unity
TEXTURE2D(_HTile); // DXGI_FORMAT_R8_UINT is not supported by Unity
TEXTURE2D(_IrradianceSource); // Includes transmitted light
RW_TEXTURE2D(float4, _CameraFilteringTexture); // Target texture
DECLARE_GBUFFER_TEXTURE(_GBufferTexture); // Contains the albedo and SSS parameters
TEXTURE2D(_DepthTexture); // Z-buffer
TEXTURE2D(_HTile); // DXGI_FORMAT_R8_UINT is not supported by Unity
TEXTURE2D(_IrradianceSource); // Includes transmitted light
#ifdef USE_INTERMEDIATE_BUFFER
RW_TEXTURE2D(float4, _CameraFilteringTexture); // Target texture
#else
RW_TEXTURE2D(float4, _CameraColorTexture); // Target texture
#endif
//--------------------------------------------------------------------------------------------------
// Implementation

bool passedStencilTest;
#if SSS_SAMPLE_TEST_HTILE
int2 tileCoord = pixelCoord >> 3; // Divide by 8
int2 tileCoord = pixelCoord / 8;
// Perform the stencil test (reject at the tile rate).
passedStencilTest = stencilRef == LOAD_TEXTURE2D(_HTile, tileCoord).r;

{
// Unfortunately, our copy of HTile does not allow to accept at the tile rate.
// Therefore, we have to additionally perform the stencil test at the pixel rate.
passedStencilTest = (uint)stencilRef == UnpackByte(LOAD_TEXTURE2D(_StencilTexture, pixelCoord).r);
// We check the tagged irradiance buffer to avoid an extra stencil texture fetch.
passedStencilTest = TestLightingForSSS(LOAD_TEXTURE2D(_IrradianceSource, pixelCoord).rgb);
}
return passedStencilTest;

float4 textureSample = LoadSample(position, cacheAnchor);
float3 irradiance = textureSample.rgb;
float linearDepth = textureSample.a;
if (linearDepth > 0)
if (TestLightingForSSS(irradiance))
float linearDepth = textureSample.a;
float z = linearDepth - centerPosVS.z;
float p = _FilterKernels[profileID][i][iP];
float3 w = ComputeBilateralWeight(xy2, z, mmPerUnit, shapeParam, p);

}
else
{
// The irradiance is 0. This could happen for 2 reasons.
// Most likely, the surface fragment does not have an SSS material.
// Alternatively, our sample comes from a region without any geometry.
// Our blur is energy-preserving, so 'centerWeight' should be set to 0.
// We do not terminate the loop since we want to gather the contribution
// of the remaining samples (e.g. in case of hair covering skin).
}
}
void WriteResult(uint2 pixelCoord, float3 irradiance)
{
#ifdef USE_INTERMEDIATE_BUFFER
_CameraFilteringTexture[pixelCoord] = float4(irradiance, 1);
#else
_CameraColorTexture[pixelCoord] += float4(irradiance, 0);
#endif
}
#pragma kernel SubsurfaceScattering

uint groupThreadId : SV_GroupThreadID)
{
// Note: any factor of 64 is a suitable wave size for our algorithm.
uint waveIndex = groupThreadId / 64;
uint waveIndex = WaveReadFirstLane(groupThreadId / 64);
uint laneIndex = groupThreadId % 64;
uint quadIndex = laneIndex / 4;

float3 centerIrradiance = 0;
float centerDepth = 0;
float4 cachedValue = float4(0, 0, 0, 0);
float4 cachedValue = 0;
bool passedStencilTest = StencilTest((int2)pixelCoord, stencilRef);

uint2 cacheCoord2 = 2 * (startQuad + quadCoord) + uint2(laneIndex & 1, (laneIndex >> 1) & 1);
int2 pixelCoord2 = (int2)(tileAnchor + cacheCoord2) - TEXTURE_CACHE_BORDER;
float4 cachedValue2 = float4(0, 0, 0, 0);
float4 cachedValue2 = 0;
[branch] if (StencilTest(pixelCoord2, stencilRef))
{

[branch] if (distScale == 0 || maxDistInPixels < 1)
{
#if SSS_DEBUG_LOD
_CameraFilteringTexture[pixelCoord] = float4(0, 0, 1, 1);
WriteResult(pixelCoord, float3(0, 0, 1));
_CameraFilteringTexture[pixelCoord] = float4(albedo * centerIrradiance, 1);
WriteResult(pixelCoord, albedo * centerIrradiance);
#endif
return;
}

float3 tangentY = GetLocalFrame(normalVS)[1] * unitsPerMm;
#if SSS_DEBUG_NORMAL_VS
// We expect the view-space normal to be front-facing.
if (normalVS.z >= 0)
// We expect the normal to be front-facing.
float3 viewDirVS = normalize(centerPosVS);
if (dot(normalVS, viewDirVS) >= 0)
_CameraFilteringTexture[pixelCoord] = float4(1, 1, 1, 1);
WriteResult(pixelCoord, float3(1, 1, 1));
return;
}
#endif

#if SSS_DEBUG_LOD
_CameraFilteringTexture[pixelCoord] = useNearFieldKernel ? float4(1, 0, 0, 1) : float4(0.5, 0.5, 0, 1);
WriteResult(pixelCoord, useNearFieldKernel ? float3(1, 0, 0) : float3(0.5, 0.5, 0);
return;
#endif

[branch] if (!useNearFieldKernel)
{
_CameraFilteringTexture[pixelCoord] = float4(albedo * totalIrradiance / totalWeight, 1);
WriteResult(pixelCoord, albedo * totalIrradiance / totalWeight);
return;
}

totalIrradiance, totalWeight);
}
_CameraFilteringTexture[pixelCoord] = float4(albedo * totalIrradiance / totalWeight, 1);
WriteResult(pixelCoord, albedo * totalIrradiance / totalWeight);
}

6
ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Resources/SubsurfaceScattering.shader


sampleWeight = _FilterKernelsBasic[profileID][i].rgb;
sampleIrradiance = LOAD_TEXTURE2D(_IrradianceSource, samplePosition).rgb;
[flatten]
if (any(sampleIrradiance))
if (TestLightingForSSS(sampleIrradiance))
{
// Apply bilateral weighting.
// Ref #1: Skin Rendering by Pseudo–Separable Cross Bilateral Filtering.

}
else
{
// The irradiance is 0. This could happen for 3 reasons.
// The irradiance is 0. This could happen for 2 reasons.
// Finally, the surface fragment could be completely shadowed.
// Our blur is energy-preserving, so 'centerWeight' should be set to 0.
// We do not terminate the loop since we want to gather the contribution
// of the remaining samples (e.g. in case of hair covering skin).

2
ScriptableRenderPipeline/HDRenderPipeline/ShaderPass/ShaderPassLightTransport.hlsl


{
// Apply diffuseColor Boost from LightmapSettings.
// put abs here to silent a warning, no cost, no impact as color is assume to be positive.
res.rgb = Clamp(pow(abs(lightTransportData.diffuseColor), saturate(unity_OneOverOutputBoost)), 0, unity_MaxOutputValue);
res.rgb = clamp(pow(abs(lightTransportData.diffuseColor), saturate(unity_OneOverOutputBoost)), 0, unity_MaxOutputValue);
}
if (unity_MetaFragmentControl.y)

正在加载...
取消
保存