浏览代码

Clean up VBuffer sampling

/main
Evgenii Golubev 7 年前
当前提交
92ea5c34
共有 3 个文件被更改,包括 138 次插入87 次删除
  1. 187
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Volumetrics/VBuffer.hlsl
  2. 24
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Volumetrics/VolumetricLighting.compute
  3. 14
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Sky/AtmosphericScattering/AtmosphericScattering.hlsl

187
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Volumetrics/VBuffer.hlsl


// Interpolation in the log space is non-linear.
// Therefore, given 'logEncodedDepth', we compute a new depth value
// which allows us to perform HW interpolation which is linear in the view space.
float ComputeLerpPositionForLogEncoding(float linearDepth, float logEncodedDepth,
float ComputeLerpPositionForLogEncoding(float linearDepth,
float logEncodedDepth,
float2 VBufferSliceCount,
float4 VBufferDepthDecodingParams)
{

float numSlices = VBufferSliceCount.x;
float rcpNumSlices = VBufferSliceCount.y;
float s0 = floor(d * numSlices - 0.5);
float s1 = ceil(d * numSlices - 0.5);
float s = d * numSlices - 0.5;
float s0 = floor(s);
float s1 = ceil(s);
float d0 = saturate(s0 * rcpNumSlices + (0.5 * rcpNumSlices));
float d1 = saturate(s1 * rcpNumSlices + (0.5 * rcpNumSlices));
float z0 = DecodeLogarithmicDepthGeneralized(d0, VBufferDepthDecodingParams);

return d0 + t * rcpNumSlices;
}
// Performs trilinear reconstruction of the V-Buffer.
// If (clampToEdge == false), out-of-bounds loads return 0.
float4 SampleVBuffer(TEXTURE3D_ARGS(VBuffer, trilinearSampler), bool clampToEdge,
float2 positionNDC, float linearDepth,
// if (correctLinearInterpolation), we use ComputeLerpPositionForLogEncoding() to correct weighting
// of both slices at the cost of extra ALUs.
//
// if (quadraticFilterXY), we perform biquadratic (3x3) reconstruction for each slice to reduce
// aliasing at the cost of extra ALUs and bandwidth.
// Warning: you MUST pass a linear sampler in order for the quadratic filter to work.
//
// Note: for correct filtering, the data has to be stored in the perceptual space.
// This means storing tone mapped radiance and transmittance instead of optical depth.
// See "A Fresh Look at Generalized Sampling", p. 51.
//
// if (clampToBorder), samples outside of the buffer return 0 (we perform a smooth fade).
// Otherwise, the sampler simply clamps the texture coordinate to the edge of the texture.
// Warning: clamping to border may not work as expected with the quadratic filter due to its extent.
float4 SampleVBuffer(TEXTURE3D_ARGS(VBuffer, clampSampler),
float2 positionNDC,
float linearDepth,
float4 VBufferResolution,
float4 VBufferDepthDecodingParams)
float4 VBufferDepthDecodingParams,
bool correctLinearInterpolation,
bool quadraticFilterXY,
bool clampToBorder)
float numSlices = VBufferSliceCount.x;
float rcpNumSlices = VBufferSliceCount.y;
float w;
// Unity doesn't support samplers clamping to border, so we have to do it ourselves.
// TODO: add the proper sampler support.
bool isInBounds = Min3(uv.x, uv.y, d) > 0 && Max3(uv.x, uv.y, d) < 1;
UNITY_BRANCH if (clampToEdge || isInBounds)
if (correctLinearInterpolation)
#if 1
// Adjust the texture coordinate for HW linear filtering.
w = ComputeLerpPositionForLogEncoding(z, d, VBufferSliceCount, VBufferDepthDecodingParams);
}
else
{
float w = d;
#else
// Adjust the texture coordinate for HW trilinear sampling.
float w = ComputeLerpPositionForLogEncoding(z, d, VBufferSliceCount, VBufferDepthDecodingParams);
#endif
w = d;
}
return SAMPLE_TEXTURE3D_LOD(VBuffer, trilinearSampler, float3(uv, w), 0);
float fadeWeight = 1;
if (clampToBorder)
{
// Compute the distance to the edge, and remap it to the [0, 1] range.
// TODO: add support for the HW border clamp sampler.
float weightU = saturate((1 - 2 * abs(uv.x - 0.5)) * VBufferResolution.x);
float weightV = saturate((1 - 2 * abs(uv.y - 0.5)) * VBufferResolution.y);
float weightW = saturate((1 - 2 * abs(w - 0.5)) * VBufferSliceCount.x);
fadeWeight = weightU * weightV * weightW;
else
float4 result = 0;
if (fadeWeight > 0)
return 0;
}
}
if (quadraticFilterXY)
{
float2 xy = uv * VBufferResolution.xy;
float2 ic = floor(xy);
float2 fc = frac(xy);
// Returns interpolated {volumetric radiance, transmittance}. The sampler clamps to edge.
float4 SampleInScatteredRadianceAndTransmittance(TEXTURE3D_ARGS(VBufferLighting, trilinearSampler),
float2 positionNDC, float linearDepth,
float4 VBufferResolution,
float2 VBufferSliceCount,
float4 VBufferDepthEncodingParams,
float4 VBufferDepthDecodingParams)
{
#ifdef RECONSTRUCTION_FILTER_TRILINEAR
float4 L = SampleVBuffer(TEXTURE3D_PARAM(VBufferLighting, trilinearSampler), true,
positionNDC, linearDepth,
VBufferSliceCount,
VBufferDepthEncodingParams,
VBufferDepthDecodingParams);
#else // Perform biquadratic reconstruction in XY, linear in Z, using 4x trilinear taps (3x3x2 texels in total).
float2 uv = positionNDC;
float2 xy = uv * VBufferResolution.xy;
float2 ic = floor(xy);
float2 fc = frac(xy);
float2 weights[2], offsets[2];
BiquadraticFilter(1 - fc, weights, offsets); // Inverse-translate the filter centered around 0.5
// The distance between slices is log-encoded.
float z = linearDepth;
float d = EncodeLogarithmicDepthGeneralized(z, VBufferDepthEncodingParams);
result = (weights[0].x * weights[0].y) * SAMPLE_TEXTURE3D_LOD(VBuffer, clampSampler, float3((ic + float2(offsets[0].x, offsets[0].y)) * VBufferResolution.zw, w), 0) // Top left
+ (weights[1].x * weights[0].y) * SAMPLE_TEXTURE3D_LOD(VBuffer, clampSampler, float3((ic + float2(offsets[1].x, offsets[0].y)) * VBufferResolution.zw, w), 0) // Top right
+ (weights[0].x * weights[1].y) * SAMPLE_TEXTURE3D_LOD(VBuffer, clampSampler, float3((ic + float2(offsets[0].x, offsets[1].y)) * VBufferResolution.zw, w), 0) // Bottom left
+ (weights[1].x * weights[1].y) * SAMPLE_TEXTURE3D_LOD(VBuffer, clampSampler, float3((ic + float2(offsets[1].x, offsets[1].y)) * VBufferResolution.zw, w), 0); // Bottom right
}
else
{
result = SAMPLE_TEXTURE3D_LOD(VBuffer, clampSampler, float3(uv, w), 0);
}
#if 0
// Ignore non-linearity (for performance reasons) at the cost of accuracy.
// The results are exact for a stationary camera, but can potentially cause some judder in motion.
float w = d;
#else
// Adjust the texture coordinate for HW trilinear sampling.
float w = ComputeLerpPositionForLogEncoding(z, d, VBufferSliceCount, VBufferDepthDecodingParams);
#endif
result *= fadeWeight;
}
float2 weights[2], offsets[2];
BiquadraticFilter(1 - fc, weights, offsets); // Inverse-translate the filter centered around 0.5
return result;
}
float2 rcpRes = VBufferResolution.zw;
float4 SampleVBuffer(TEXTURE3D_ARGS(VBuffer, clampSampler),
float3 positionWS,
float4x4 viewProjMatrix,
float4 VBufferResolution,
float2 VBufferSliceCount,
float4 VBufferDepthEncodingParams,
float4 VBufferDepthDecodingParams,
bool correctLinearInterpolation,
bool quadraticFilterXY,
bool clampToBorder)
{
float2 positionNDC = ComputeNormalizedDeviceCoordinates(positionWS, viewProjMatrix);
float linearDepth = mul(viewProjMatrix, float4(positionWS, 1)).w;
// Note: for correct filtering, the data has to be stored in the perceptual space.
// This means storing the tone mapped radiance and transmittance instead of optical depth.
// See "A Fresh Look at Generalized Sampling", p. 51.
float4 L = (weights[0].x * weights[0].y) * SAMPLE_TEXTURE3D_LOD(VBufferLighting, trilinearSampler, float3((ic + float2(offsets[0].x, offsets[0].y)) * rcpRes, w), 0) // Top left
+ (weights[1].x * weights[0].y) * SAMPLE_TEXTURE3D_LOD(VBufferLighting, trilinearSampler, float3((ic + float2(offsets[1].x, offsets[0].y)) * rcpRes, w), 0) // Top right
+ (weights[0].x * weights[1].y) * SAMPLE_TEXTURE3D_LOD(VBufferLighting, trilinearSampler, float3((ic + float2(offsets[0].x, offsets[1].y)) * rcpRes, w), 0) // Bottom left
+ (weights[1].x * weights[1].y) * SAMPLE_TEXTURE3D_LOD(VBufferLighting, trilinearSampler, float3((ic + float2(offsets[1].x, offsets[1].y)) * rcpRes, w), 0); // Bottom right
#endif
return SampleVBuffer(TEXTURE3D_PARAM(VBuffer, clampSampler),
positionNDC,
linearDepth,
VBufferResolution,
VBufferSliceCount,
VBufferDepthEncodingParams,
VBufferDepthDecodingParams,
correctLinearInterpolation,
quadraticFilterXY,
clampToBorder);
}
// TODO: add some animated noise to the reconstructed radiance.
return float4(FastTonemapInvert(L.rgb), L.a);
// Returns interpolated {volumetric radiance, transmittance}.
float4 SampleVolumetricLighting(TEXTURE3D_ARGS(VBufferLighting, clampSampler),
float2 positionNDC,
float linearDepth,
float4 VBufferResolution,
float2 VBufferSliceCount,
float4 VBufferDepthEncodingParams,
float4 VBufferDepthDecodingParams,
bool correctLinearInterpolation,
bool quadraticFilterXY)
{
// TODO: add some slowly animated noise to the reconstructed value.
return FastTonemapInvert(SampleVBuffer(TEXTURE3D_PARAM(VBufferLighting, clampSampler),
positionNDC,
linearDepth,
VBufferResolution,
VBufferSliceCount,
VBufferDepthEncodingParams,
VBufferDepthDecodingParams,
correctLinearInterpolation,
quadraticFilterXY,
false));
}
#endif // UNITY_VBUFFER_INCLUDED

24
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Volumetrics/VolumetricLighting.compute


//--------------------------------------------------------------------------------------------------
#include "CoreRP/ShaderLibrary/Common.hlsl"
#include "CoreRP/ShaderLibrary/Color.hlsl"
#include "CoreRP/ShaderLibrary/Filtering.hlsl"
#include "CoreRP/ShaderLibrary/VolumeRendering.hlsl"

float3 L = -lightToSample * distRcp;
float3 color; float attenuation;
EvaluateLight_Punctual(context, posInput, light, unused, 0, L, lightToSample,
distances, color, attenuation);
EvaluateLight_Punctual(context, posInput, light, unused, 1,
0, L, lightToSample, distances, color, attenuation);
// Important:
// Ideally, all scattering calculations should use the stratified versions

float4 distances = float4(1, 1, 1, distProj);
float3 color; float attenuation;
EvaluateLight_Punctual(context, posInput, light, unused, 0, L, lightToSample,
distances, color, attenuation);
EvaluateLight_Punctual(context, posInput, light, unused, extinction,
0, L, lightToSample, distances, color, attenuation);
// Important:
// Ideally, all scattering calculations should use the stratified versions

#endif
#if ENABLE_REPROJECTION
// Reproject the history at 'centerWS'.
float2 reprojPosNDC = ComputeNormalizedDeviceCoordinates(centerWS, _PrevViewProjMatrix);
float reprojZ = mul(_PrevViewProjMatrix, float4(centerWS, 1)).w;
float4 reprojValue = SampleVBuffer(TEXTURE3D_PARAM(_VBufferLightingHistory, s_trilinear_clamp_sampler), false,
reprojPosNDC, reprojZ,
_VBufferSliceCount.xy,
_VBufferDepthEncodingParams,
_VBufferDepthDecodingParams);
float4 reprojValue = SampleVBuffer(TEXTURE3D_PARAM(_VBufferLightingHistory, s_linear_clamp_sampler),
centerWS,
_PrevViewProjMatrix,
_VBufferResolution,
_VBufferSliceCount.xy,
_VBufferDepthEncodingParams,
_VBufferDepthDecodingParams,
false, false, true);
// Compute the exponential moving average over 'n' frames:
// X = (1 - a) * ValueAtFrame[n] + a * AverageOverPreviousFrames.

14
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Sky/AtmosphericScattering/AtmosphericScattering.hlsl


float fogFactor = 0;
#if (SHADEROPTIONS_VOLUMETRIC_LIGHTING_PRESET != 0)
float4 volFog = SampleInScatteredRadianceAndTransmittance(TEXTURE3D_PARAM(_VBufferLighting, s_trilinear_clamp_sampler),
posInput.positionNDC, posInput.linearDepth,
_VBufferResolution,
_VBufferSliceCount.xy,
_VBufferDepthEncodingParams,
_VBufferDepthDecodingParams);
float4 volFog = SampleVolumetricLighting(TEXTURE3D_PARAM(_VBufferLighting, s_linear_clamp_sampler),
posInput.positionNDC,
posInput.linearDepth,
_VBufferResolution,
_VBufferSliceCount.xy,
_VBufferDepthEncodingParams,
_VBufferDepthDecodingParams,
true, true);
fogColor = volFog.rgb;
fogFactor = 1 - volFog.a;
#else

正在加载...
取消
保存