浏览代码

Fix the edge darkening issue with volumetric lighting

/main
Evgenii Golubev 7 年前
当前提交
8ebbdf07
共有 3 个文件被更改,包括 13 次插入9 次删除
  1. 12
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Volumetrics/VBuffer.hlsl
  2. 9
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Volumetrics/VolumetricLighting.compute
  3. 1
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Sky/OpaqueAtmosphericScattering.shader

12
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Volumetrics/VBuffer.hlsl


// Performs trilinear reconstruction of the V-Buffer.
// If (clampToEdge == false), out-of-bounds loads return 0.
float4 SampleVBuffer(TEXTURE3D_ARGS(VBufferLighting, trilinearSampler), bool clampToEdge,
float4 SampleVBuffer(TEXTURE3D_ARGS(VBuffer, trilinearSampler), bool clampToEdge,
float2 positionNDC, float linearDepth,
float2 VBufferSliceCount,
float4 VBufferDepthEncodingParams,

float w = ComputeLerpPositionForLogEncoding(z, d, VBufferSliceCount, VBufferDepthDecodingParams);
#endif
return SAMPLE_TEXTURE3D_LOD(VBufferLighting, trilinearSampler, float3(uv, w), 0);
return SAMPLE_TEXTURE3D_LOD(VBuffer, trilinearSampler, float3(uv, w), 0);
}
else
{

VBufferSliceCount,
VBufferDepthEncodingParams,
VBufferDepthDecodingParams);
#else // Perform biquadratic reconstruction in XY, linear in Z, using 4x trilinear taps.
#else // Perform biquadratic reconstruction in XY, linear in Z, using 4x trilinear taps (3x3x2 texels in total).
float2 uv = positionNDC;
float2 xy = uv * VBufferResolution.xy;
float2 ic = floor(xy);

float2 rcpRes = VBufferResolution.zw;
// TODO: reconstruction should be performed in the perceptual space (e.i., after tone mapping).
// But our VBuffer is linear. How to achieve that?
// Note: for correct filtering, the data has to be stored in the perceptual space.
// This means storing the tone mapped radiance and transmittance instead of optical depth.
// See "A Fresh Look at Generalized Sampling", p. 51.
float4 L = (weights[0].x * weights[0].y) * SAMPLE_TEXTURE3D_LOD(VBufferLighting, trilinearSampler, float3((ic + float2(offsets[0].x, offsets[0].y)) * rcpRes, w), 0) // Top left
+ (weights[1].x * weights[0].y) * SAMPLE_TEXTURE3D_LOD(VBufferLighting, trilinearSampler, float3((ic + float2(offsets[1].x, offsets[0].y)) * rcpRes, w), 0) // Top right

// TODO: add some animated noise to the reconstructed radiance.
return float4(L.rgb, Transmittance(L.a));
return float4(FastTonemapInvert(L.rgb), L.a);
}
#endif // UNITY_VBUFFER_INCLUDED

9
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Lighting/Volumetrics/VolumetricLighting.compute


// Reproject the history at 'centerWS'.
float2 reprojPosNDC = ComputeNormalizedDeviceCoordinates(centerWS, _PrevViewProjMatrix);
float reprojZ = mul(_PrevViewProjMatrix, float4(centerWS, 1)).w;
float4 reprojValue = SampleVBuffer(TEXTURE3D_PARAM(_VBufferLightingHistory, s_trilinear_clamp_sampler),
false, reprojPosNDC, reprojZ,
float4 reprojValue = SampleVBuffer(TEXTURE3D_PARAM(_VBufferLightingHistory, s_trilinear_clamp_sampler), false,
reprojPosNDC, reprojZ,
_VBufferSliceCount.xy,
_VBufferDepthEncodingParams,
_VBufferDepthDecodingParams);

opticalDepth += 0.5 * extinction * dt;
// Store the voxel data.
_VBufferLightingIntegral[voxelCoord] = float4(totalRadiance, opticalDepth);
// Note: for correct filtering, the data has to be stored in the perceptual space.
// This means storing the tone mapped radiance and transmittance instead of optical depth.
// See "A Fresh Look at Generalized Sampling", p. 51.
_VBufferLightingIntegral[voxelCoord] = float4(FastTonemap(totalRadiance), Transmittance(opticalDepth));
// Compute the optical depth up to the end of the interval.
opticalDepth += 0.5 * extinction * dt;

1
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Sky/OpaqueAtmosphericScattering.shader


// #pragma enable_d3d11_debug_symbols
#include "CoreRP/ShaderLibrary/Common.hlsl"
#include "CoreRP/ShaderLibrary/Color.hlsl"
#include "../ShaderVariables.hlsl"
#include "AtmosphericScattering/AtmosphericScattering.hlsl"

正在加载...
取消
保存