浏览代码

Merge pull request #170 from EvgeniiG/master

Fix depth offset for POM
/Branch_Batching2
GitHub 7 年前
当前提交
b7cd7bb8
共有 11 个文件被更改,包括 132 次插入70 次删除
  1. 31
      Assets/ScriptableRenderPipeline/HDRenderPipeline/HDRenderPipeline.cs
  2. 1
      Assets/ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/Resources/shadeopaque.compute
  3. 50
      Assets/ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/LitData.hlsl
  4. 1
      Assets/ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Resources/CombineSubsurfaceScattering.shader
  5. 8
      Assets/ScriptableRenderPipeline/HDRenderPipeline/ShaderVariables.hlsl
  6. 15
      Assets/ScriptableRenderPipeline/HDRenderPipeline/Utilities.cs
  7. 65
      Assets/ScriptableRenderPipeline/ShaderLibrary/Common.hlsl
  8. 18
      Assets/ScriptableRenderPipeline/ShaderLibrary/PerPixelDisplacement.hlsl
  9. 2
      Assets/TestScenes/HDTest/GraphicTest/Parallax Occlusion Mapping/Material/POM - Rock.mat
  10. 2
      Assets/TestScenes/HDTest/GraphicTest/Parallax Occlusion Mapping/Material/POM - Wood.mat
  11. 9
      Assets/TestScenes/HDTest/GlobalIlluminationTest/LightingData.asset.meta

31
Assets/ScriptableRenderPipeline/HDRenderPipeline/HDRenderPipeline.cs


public struct HDCamera
{
public Camera camera;
public Vector4 screenSize;
public Camera camera;
public Vector4 screenSize;
public Vector4 invProjectionParam;
}
public class GBufferManager

public void PushGlobalParams(HDCamera hdCamera, ScriptableRenderContext renderContext, SubsurfaceScatteringSettings sssParameters)
{
var cmd = new CommandBuffer {name = "Push Global Parameters"};
cmd.SetGlobalVector("_ScreenSize", hdCamera.screenSize);
cmd.SetGlobalMatrix("_ViewProjMatrix", hdCamera.viewProjectionMatrix);
cmd.SetGlobalMatrix("_InvViewProjMatrix", hdCamera.invViewProjectionMatrix);
cmd.SetGlobalMatrix("_InvProjMatrix", hdCamera.invProjectionMatrix);
cmd.SetGlobalVector("_InvProjParam", hdCamera.invProjectionParam);
// TODO: cmd.SetGlobalInt() does not exist, so we are forced to use Shader.SetGlobalInt() instead.
if (m_SkyManager.IsSkyValid())
{
m_SkyManager.SetGlobalSkyTexture();

// Broadcast SSS parameters to all shaders.
Shader.SetGlobalInt("_TransmissionFlags", sssParameters.transmissionFlags);
Shader.SetGlobalFloatArray("_ThicknessRemaps", sssParameters.thicknessRemaps);
Shader.SetGlobalVectorArray("_HalfRcpVariancesAndLerpWeights", sssParameters.halfRcpVariancesAndLerpWeights);
cmd.SetGlobalFloatArray("_ThicknessRemaps", sssParameters.thicknessRemaps);
cmd.SetGlobalVectorArray("_HalfRcpVariancesAndLerpWeights", sssParameters.halfRcpVariancesAndLerpWeights);
Shader.EnableKeyword("_SUBSURFACE_SCATTERING");
cmd.EnableShaderKeyword("_SUBSURFACE_SCATTERING");
Shader.DisableKeyword("_SUBSURFACE_SCATTERING");
cmd.DisableShaderKeyword("_SUBSURFACE_SCATTERING");
var cmd = new CommandBuffer {name = "Push Global Parameters"};
cmd.SetGlobalVector("_ScreenSize", hdCamera.screenSize);
cmd.SetGlobalMatrix("_ViewProjMatrix", hdCamera.viewProjectionMatrix);
cmd.SetGlobalMatrix("_InvViewProjMatrix", hdCamera.invViewProjectionMatrix);
renderContext.ExecuteCommandBuffer(cmd);
cmd.Dispose();

var cmd = new CommandBuffer() { name = "Subsurface Scattering Pass" };
// Perform the vertical SSS filtering pass.
m_FilterSubsurfaceScattering.SetMatrix("_InvProjMatrix", hdCamera.invProjectionMatrix);
m_FilterSubsurfaceScattering.SetVectorArray("_FilterKernels", sssParameters.filterKernels);
m_FilterSubsurfaceScattering.SetVectorArray("_HalfRcpWeightedVariances", sssParameters.halfRcpWeightedVariances);
cmd.SetGlobalTexture("_IrradianceSource", m_CameraSubsurfaceBufferRT);

// Perform the horizontal SSS filtering pass, and combine diffuse and specular lighting.
m_FilterAndCombineSubsurfaceScattering.SetMatrix("_InvProjMatrix", hdCamera.invProjectionMatrix);
m_FilterAndCombineSubsurfaceScattering.SetVectorArray("_FilterKernels", sssParameters.filterKernels);
m_FilterAndCombineSubsurfaceScattering.SetVectorArray("_HalfRcpWeightedVariances", sssParameters.halfRcpWeightedVariances);
cmd.SetGlobalTexture("_IrradianceSource", m_CameraFilteringBufferRT);

1
Assets/ScriptableRenderPipeline/HDRenderPipeline/Lighting/TilePass/Resources/shadeopaque.compute


[numthreads(TILE_SIZE, TILE_SIZE, 1)]
void SHADE_OPAQUE_ENTRY(uint2 dispatchThreadId : SV_DispatchThreadID, uint2 groupId : SV_GroupID)
{
// input.positionCS is SV_Position
uint2 pixelCoord = dispatchThreadId;
PositionInputs posInput = GetPositionInput(pixelCoord.xy, _ScreenSize.zw);
float depth = LOAD_TEXTURE2D(_CameraDepthTexture, posInput.unPositionSS).x;

50
Assets/ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/LitData.hlsl


#include "ShaderLibrary/PerPixelDisplacement.hlsl"
void ApplyPerPixelDisplacement(FragInputs input, float3 V, inout LayerTexCoord layerTexCoord)
float ApplyPerPixelDisplacement(FragInputs input, float3 V, inout LayerTexCoord layerTexCoord)
{
bool ppdEnable = false;
bool isPlanar = false;

float lod = ComputeTextureLOD(minUvSize);
PerPixelHeightDisplacementParam ppdParam;
float height; // final height processed
// planar/triplanar
float2 uvXZ;

if (isTriplanar)
{
float3 viewDirTS;
float planeHeight;
int numSteps;
// Perform a POM in each direction and modify appropriate texture coordinate

float2 offsetZY = ParallaxOcclusionMapping(lod, _PPDLodThreshold, numSteps, viewDirTS, maxHeight, ppdParam);
float2 offsetZY = ParallaxOcclusionMapping(lod, _PPDLodThreshold, numSteps, viewDirTS, maxHeight, ppdParam, planeHeight);
height = layerTexCoord.triplanarWeights.x * planeHeight;
float2 offsetXZ = ParallaxOcclusionMapping(lod, _PPDLodThreshold, numSteps, viewDirTS, maxHeight, ppdParam);
float2 offsetXZ = ParallaxOcclusionMapping(lod, _PPDLodThreshold, numSteps, viewDirTS, maxHeight, ppdParam, planeHeight);
height += layerTexCoord.triplanarWeights.y * planeHeight;
float2 offsetXY = ParallaxOcclusionMapping(lod, _PPDLodThreshold, numSteps, viewDirTS, maxHeight, ppdParam);
float2 offsetXY = ParallaxOcclusionMapping(lod, _PPDLodThreshold, numSteps, viewDirTS, maxHeight, ppdParam, planeHeight);
height += layerTexCoord.triplanarWeights.z * planeHeight;
}
else
{

float3 viewDirTS = isPlanar ? float3(uvXZ, V.y) : TransformWorldToTangent(V, worldToTangent);
int numSteps = (int)lerp(_PPDMaxSamples, _PPDMinSamples, viewDirTS.z);
float2 offset = ParallaxOcclusionMapping(lod, _PPDLodThreshold, numSteps, viewDirTS, maxHeight, ppdParam);
float2 offset = ParallaxOcclusionMapping(lod, _PPDLodThreshold, numSteps, viewDirTS, maxHeight, ppdParam, height);
// Since POM "pushes" geometry inwards (rather than extrude it), { height = height - 1 }.
// Since the result is used as a 'depthOffsetVS', it needs to be positive, so we flip the sign.
return maxHeight - height * maxHeight;
return 0.0;
}
// Calculate displacement for per vertex displacement mapping

GetLayerTexCoord(input, layerTexCoord);
ApplyPerPixelDisplacement(input, V, layerTexCoord);
float depthOffset = 0.0;
float depthOffset = ApplyPerPixelDisplacement(input, V, layerTexCoord);
ApplyDepthOffsetPositionInput(V, depthOffset, posInput);
ApplyDepthOffsetPositionInput(V, depthOffset, _ViewProjMatrix, posInput);
#endif
// We perform the conversion to world of the normalTS outside of the GetSurfaceData

// - Blend Mask use same mapping as main layer (UVO, Planar, Triplanar)
// From these rules it mean that PPD is enable only if the user 1) ask for it, 2) if there is one heightmap enabled on active layer, 3) if mapping is the same for all layer respecting 2), 4) if mapping is UV0, planar or triplanar mapping
// Most contraint are handled by the inspector (i.e the UI) like the mapping constraint and is assumed in the shader.
void ApplyPerPixelDisplacement(FragInputs input, float3 V, inout LayerTexCoord layerTexCoord)
float ApplyPerPixelDisplacement(FragInputs input, float3 V, inout LayerTexCoord layerTexCoord)
{
bool ppdEnable = false;
bool isPlanar = false;

ppdParam.mainHeightInfluence = 0.0;
#endif
float height; // final height processed
// We need to calculate the texture space direction. It depends on the mapping.
if (isTriplanar)
{

// TODO: do we support object triplanar ? See ComputeLayerTexCoord
float3 viewDirTS = isPlanar ? float3(-V.xz, V.y) : TransformWorldToTangent(V, worldToTangent);
int numSteps = (int)lerp(_PPDMaxSamples, _PPDMinSamples, viewDirTS.z);
float2 offset = ParallaxOcclusionMapping(lod, _PPDLodThreshold, numSteps, viewDirTS, maxHeight, ppdParam);
float2 offset = ParallaxOcclusionMapping(lod, _PPDLodThreshold, numSteps, viewDirTS, maxHeight, ppdParam, height);
float4 planarWeight = float4( layerTexCoord.base0.mappingType == UV_MAPPING_PLANAR ? 1.0 : 0.0,
float4 planarWeight = float4( layerTexCoord.base0.mappingType == UV_MAPPING_PLANAR ? 1.0 : 0.0,
layerTexCoord.base1.mappingType == UV_MAPPING_PLANAR ? 1.0 : 0.0,
layerTexCoord.base2.mappingType == UV_MAPPING_PLANAR ? 1.0 : 0.0,
layerTexCoord.base3.mappingType == UV_MAPPING_PLANAR ? 1.0 : 0.0);

layerTexCoord.base0.uv += offsetWeights.x * offset;
layerTexCoord.base1.uv += offsetWeights.y * offset;
layerTexCoord.base2.uv += offsetWeights.z * offset;

layerTexCoord.details2.uv += offsetWeights.z * offset;
layerTexCoord.details3.uv += offsetWeights.w * offset;
}
// Since POM "pushes" geometry inwards (rather than extrude it), { height = height - 1 }.
// Since the result is used as a 'depthOffsetVS', it needs to be positive, so we flip the sign.
return maxHeight - height * maxHeight;
return 0.0;
}
// Calculate displacement for per vertex displacement mapping

ZERO_INITIALIZE(LayerTexCoord, layerTexCoord);
GetLayerTexCoord(input, layerTexCoord);
ApplyPerPixelDisplacement(input, V, layerTexCoord);
float depthOffset = ApplyPerPixelDisplacement(input, V, layerTexCoord);
float depthOffset = 0.0;
ApplyDepthOffsetPositionInput(V, depthOffset, posInput);
ApplyDepthOffsetPositionInput(V, depthOffset, _ViewProjMatrix, posInput);
#endif
SurfaceData surfaceData0, surfaceData1, surfaceData2, surfaceData3;

1
Assets/ScriptableRenderPipeline/HDRenderPipeline/Material/Lit/Resources/CombineSubsurfaceScattering.shader


float4 _FilterKernels[N_PROFILES][N_SAMPLES]; // RGB = weights, A = radial distance
float4 _HalfRcpWeightedVariances[N_PROFILES]; // RGB for chromatic, A for achromatic
float4x4 _InvProjMatrix;
TEXTURE2D_FLOAT(_CameraDepthTexture);
TEXTURE2D(_GBufferTexture2);

8
Assets/ScriptableRenderPipeline/HDRenderPipeline/ShaderVariables.hlsl


// ----------------------------------------------------------------------------
// TODO: move this to constant buffer by Pass
float4x4 _InvViewProjMatrix;
float4 _ScreenSize;
float4 _ScreenSize;
float4x4 _InvViewProjMatrix;
float4x4 _InvProjMatrix;
float4 _InvProjParam;
float4x4 GetWorldToViewMatrix()
{

return mul(GetWorldToHClipMatrix(), float4(positionWS, 1.0));
}
// Computes world space view direction, from object space position
// Computes the world space view direction (pointing towards the camera).
float3 GetWorldSpaceNormalizeViewDir(float3 positionWS)
{
float3 V = _WorldSpaceCameraPos.xyz - positionWS;

15
Assets/ScriptableRenderPipeline/HDRenderPipeline/Utilities.cs


var gpuProj = GL.GetGPUProjectionMatrix(camera.projectionMatrix, false);
var gpuVP = gpuProj * camera.worldToCameraMatrix;
// Ref: An Efficient Depth Linearization Method for Oblique View Frustums, Eq. 6.
Vector4 invProjectionParam = new Vector4(gpuProj.m20 / (gpuProj.m00 * gpuProj.m23),
gpuProj.m21 / (gpuProj.m11 * gpuProj.m23),
-1.0f / gpuProj.m23,
(-gpuProj.m22
+ gpuProj.m20 * gpuProj.m02 / gpuProj.m00
+ gpuProj.m21 * gpuProj.m12 / gpuProj.m11) / gpuProj.m23);
hdCamera.invProjectionParam = invProjectionParam;
return hdCamera;
}

material.SetVector("_ScreenSize", hdCamera.screenSize);
material.SetMatrix("_ViewProjMatrix", hdCamera.viewProjectionMatrix);
material.SetVector("_ScreenSize", hdCamera.screenSize);
material.SetMatrix("_ViewProjMatrix", hdCamera.viewProjectionMatrix);
material.SetMatrix("_InvProjMatrix", hdCamera.invProjectionMatrix);
material.SetVector("_InvProjParam", hdCamera.invProjectionParam);
}
// TEMP: These functions should be implemented C++ side, for now do it in C#

65
Assets/ScriptableRenderPipeline/ShaderLibrary/Common.hlsl


// World position reconstruction / transformation
// ----------------------------------------------------------------------------
// Z buffer to linear 0..1 depth (0 at near plane, 1 at far plane)
// Z buffer to linear 0..1 depth (0 at near plane, 1 at far plane).
// Does not correctly handle oblique view frustums.
// Z buffer to linear 0..1 depth (0 at camera position, 1 at far plane)
// Z buffer to linear 0..1 depth (0 at camera position, 1 at far plane).
// Does not correctly handle oblique view frustums.
// Z buffer to linear depth
// Z buffer to linear depth.
// Does not correctly handle oblique view frustums.
// Z buffer to linear depth.
// Correctly handles oblique view frustums. Only valid for projection matrices!
// Ref: An Efficient Depth Linearization Method for Oblique View Frustums, Eq. 6.
float LinearEyeDepth(float2 positionSS, float depthRaw, float4 invProjParam)
{
float4 positionCS = float4(positionSS * 2.0 - 1.0, depthRaw, 1.0);
float viewSpaceZ = rcp(dot(positionCS, invProjParam));
// The view space uses a right-handed coordinate system.
return -viewSpaceZ;
}
struct PositionInputs
{
// Normalize screen position (offset by 0.5)

float depthRaw; // raw depth from depth buffer
float depthVS;
float4 positionCS;
float3 positionWS;
};

// depthRaw and depthVS come directly form .zw of SV_Position
void UpdatePositionInput(float depthRaw, float depthVS, float3 positionWS, inout PositionInputs posInput)
{
posInput.depthRaw = depthRaw;
posInput.depthVS = depthVS;
// TODO: We revert for DX but maybe it is not the case of OGL ? Test the define ?
posInput.positionCS = float4((posInput.positionSS - 0.5) * float2(2.0, -2.0), depthRaw, 1.0) * depthVS; // depthVS is SV_Position.w
posInput.depthRaw = depthRaw;
posInput.depthVS = depthVS;
posInput.positionWS = positionWS;
}

// It may be necessary to flip the Y axis as the origin of the screen-space coordinate system
// of Direct3D is at the top left corner of the screen, with the Y axis pointing downwards.
void UpdatePositionInput(float depth, float4x4 invViewProjectionMatrix, float4x4 ViewProjectionMatrix,
void UpdatePositionInput(float depthRaw, float4x4 invViewProjMatrix, float4x4 ViewProjMatrix,
posInput.depthRaw = depth;
posInput.depthRaw = depthRaw;
posInput.positionCS = float4(screenSpacePos * 2.0 - 1.0, depth, 1.0);
float4 hpositionWS = mul(invViewProjectionMatrix, posInput.positionCS);
float4 positionCS = float4(screenSpacePos * 2.0 - 1.0, depthRaw, 1.0);
float4 hpositionWS = mul(invViewProjMatrix, positionCS);
posInput.depthVS = mul(ViewProjectionMatrix, float4(posInput.positionWS, 1.0)).w;
posInput.positionCS *= posInput.depthVS;
posInput.depthVS = mul(ViewProjMatrix, float4(posInput.positionWS, 1.0)).w;
float3 ComputeViewSpacePosition(float2 positionSS, float rawDepth, float4x4 invProjMatrix)
// It may be necessary to flip the Y axis as the origin of the screen-space coordinate system
// of Direct3D is at the top left corner of the screen, with the Y axis pointing downwards.
float3 ComputeViewSpacePosition(float2 positionSS, float depthRaw, float4x4 invProjMatrix, bool flipY = false)
float4 positionCS = float4(positionSS * 2.0 - 1.0, rawDepth, 1.0);
float2 screenSpacePos;
screenSpacePos.x = positionSS.x;
screenSpacePos.y = flipY ? 1.0 - positionSS.y : positionSS.y;
float4 positionCS = float4(screenSpacePos * 2.0 - 1.0, depthRaw, 1.0);
float4 positionVS = mul(invProjMatrix, positionCS);
// The view space uses a right-handed coordinate system.
positionVS.z = -positionVS.z;

// depthOffsetVS is always in the direction of the view vector (V)
void ApplyDepthOffsetPositionInput(float3 V, float depthOffsetVS, inout PositionInputs posInput)
// 'depthOffsetVS' is in the direction opposite to the view vector 'V', e.i. away from the camera.
void ApplyDepthOffsetPositionInput(float3 V, float depthOffsetVS, float4x4 viewProjMatrix, inout PositionInputs posInput)
posInput.depthVS += depthOffsetVS;
// TODO: it is an approx, need a correct value where we use projection matrix to reproject the depth from VS
posInput.depthRaw = posInput.positionCS.z / posInput.depthVS;
posInput.depthVS += depthOffsetVS;
posInput.positionWS -= depthOffsetVS * V;
// TODO: Do we need to flip Y axis here on OGL ?
posInput.positionCS = float4(posInput.positionSS.xy * 2.0 - 1.0, posInput.depthRaw, 1.0) * posInput.depthVS;
// Just add the offset along the view vector is sufficiant for world position
posInput.positionWS += V * depthOffsetVS;
float4 positionCS = mul(viewProjMatrix, float4(posInput.positionWS, 1.0));
posInput.depthRaw = positionCS.z / positionCS.w;
}
// Generates a triangle in homogeneous clip space, s.t.

18
Assets/ScriptableRenderPipeline/ShaderLibrary/PerPixelDisplacement.hlsl


// it return the offset to apply to the UVSet provide in PerPixelHeightDisplacementParam
// viewDirTS is view vector in texture space matching the UVSet
// ref: https://www.gamedev.net/resources/_/technical/graphics-programming-and-theory/a-closer-look-at-parallax-occlusion-mapping-r3262
float2 ParallaxOcclusionMapping(float lod, float lodThreshold, int numSteps, float3 viewDirTS, float maxHeight, PerPixelHeightDisplacementParam ppdParam)
float2 ParallaxOcclusionMapping(float lod, float lodThreshold, int numSteps, float3 viewDirTS, float maxHeight, PerPixelHeightDisplacementParam ppdParam, out float outHeight)
// TEMP: 0.1 to achieve parity between tessellation and POM w.r.t. height for a single tile.
// TODO: for tiling, the max. height of the tessellated height map should be reduced by NumTiles.
maxHeight *= 0.1;
// Convention: 1.0 is top, 0.0 is bottom - POM is always inward, no extrusion
float stepSize = 1.0 / (float)numSteps;

float pt1 = rayHeight;
float delta0 = pt0 - prevHeight;
float delta1 = pt1 - currHeight;
// Secant method to affine the search
// Ref: Faster Relief Mapping Using the Secant Method - Eric Risser
for (int i = 0; i < 5; ++i)

}
#else // regular POM intersection
//float pt1 = rayHeight;
//float pt1 = rayHeight;
//float delta0 = pt0 - prevHeight;
//float delta1 = pt1 - currHeight;
//float intersectionHeight = (pt0 * delta1 - pt1 * delta0) / (delta1 - delta0);

float ratio = delta0 / (delta0 + delta1);
float2 offset = texOffsetCurrent - ratio * texOffsetPerStep;
currHeight = ComputePerPixelHeightDisplacement(offset, lod, ppdParam);
outHeight = currHeight;
// Fade the effect with lod (allow to avoid pop when switching to a discrete LOD mesh)
offset *= (1.0 - saturate(lod - lodThreshold));

2
Assets/TestScenes/HDTest/GraphicTest/Parallax Occlusion Mapping/Material/POM - Rock.mat


- _GlossMapScale: 1
- _Glossiness: 0.5
- _GlossyReflections: 1
- _HeightAmplitude: 0.09
- _HeightAmplitude: 0.8
- _HeightCenter: 0.5
- _HorizonFade: 1
- _MaterialID: 0

2
Assets/TestScenes/HDTest/GraphicTest/Parallax Occlusion Mapping/Material/POM - Wood.mat


- _GlossMapScale: 1
- _Glossiness: 0.5
- _GlossyReflections: 1
- _HeightAmplitude: 0.17
- _HeightAmplitude: 1.27
- _HeightCenter: 0.5
- _HorizonFade: 1
- _MaterialID: 0

9
Assets/TestScenes/HDTest/GlobalIlluminationTest/LightingData.asset.meta


fileFormatVersion: 2
guid: 3d51fc2c60f333c44b613049001dfba8
timeCreated: 1485441498
licenseType: Pro
NativeFormatImporter:
mainObjectFileID: 25800000
userData:
assetBundleName:
assetBundleVariant:
正在加载...
取消
保存