浏览代码

Use current frame motion vector

/main
Frédéric Vauchelles 7 年前
当前提交
e98eb92e
共有 3 个文件被更改,包括 56 次插入98 次删除
  1. 1
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Camera/HDCameraFrameHistoryType.cs
  2. 91
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/HDRenderPipeline.cs
  3. 62
      ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/Lit.hlsl

1
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Camera/HDCameraFrameHistoryType.cs


{
DepthPyramid,
ColorPyramid,
MotionVectors,
VolumetricLighting,
Count
}

91
ScriptableRenderPipeline/HDRenderPipeline/HDRP/HDRenderPipeline.cs


RTHandleSystem.RTHandle m_CameraDepthBufferCopy;
RTHandleSystem.RTHandle m_CameraStencilBufferCopy;
RTHandleSystem.RTHandle m_VelocityBuffer;
RTHandleSystem.RTHandle m_DeferredShadowBuffer;
RTHandleSystem.RTHandle m_AmbientOcclusionBuffer;
RTHandleSystem.RTHandle m_DistortionBuffer;

m_AmbientOcclusionBuffer = RTHandles.Alloc(Vector2.one, filterMode: FilterMode.Bilinear, colorFormat: RenderTextureFormat.R8, sRGB: false, enableRandomWrite: true, name: "AmbientOcclusion");
}
if (m_Asset.renderPipelineSettings.supportMotionVectors)
{
m_VelocityBuffer = RTHandles.Alloc(Vector2.one, filterMode: FilterMode.Point, colorFormat: Builtin.GetVelocityBufferFormat(), sRGB: Builtin.GetVelocityBufferSRGBFlag(), enableMSAA: true, name: "Velocity");
}
m_DistortionBuffer = RTHandles.Alloc(Vector2.one, filterMode: FilterMode.Point, colorFormat: Builtin.GetDistortionBufferFormat(), sRGB: Builtin.GetDistortionBufferSRGBFlag(), name: "Distortion");
// TODO: For MSAA, we'll need to add a Draw path in order to support MSAA properly

RTHandles.Release(m_CameraStencilBufferCopy);
RTHandles.Release(m_AmbientOcclusionBuffer);
RTHandles.Release(m_VelocityBuffer);
RTHandles.Release(m_DistortionBuffer);
RTHandles.Release(m_DeferredShadowBuffer);

cmd.SetGlobalVector(HDShaderIDs._ColorPyramidSize, Vector4.one);
cmd.SetGlobalVector(HDShaderIDs._ColorPyramidScale, Vector4.one);
}
var previousMotionVectorsRT = hdCamera.GetPreviousFrameRT((int)HDCameraFrameHistoryType.MotionVectors);
if (previousMotionVectorsRT != null)
{
cmd.SetGlobalTexture(HDShaderIDs._CameraMotionVectorsTexture, previousMotionVectorsRT);
cmd.SetGlobalVector(HDShaderIDs._CameraMotionVectorsSize, new Vector4(
previousMotionVectorsRT.referenceSize.x,
previousMotionVectorsRT.referenceSize.y,
1f / previousMotionVectorsRT.referenceSize.x,
1f / previousMotionVectorsRT.referenceSize.y
));
cmd.SetGlobalVector(HDShaderIDs._CameraMotionVectorsScale, new Vector4(
previousMotionVectorsRT.referenceSize.x / (float)previousMotionVectorsRT.rt.width,
previousMotionVectorsRT.referenceSize.y / (float)previousMotionVectorsRT.rt.height,
1, 0.0f
));
}
else
{
cmd.SetGlobalTexture(HDShaderIDs._CameraMotionVectorsTexture, Texture2D.blackTexture);
cmd.SetGlobalVector(HDShaderIDs._CameraMotionVectorsSize, Vector4.one);
cmd.SetGlobalVector(HDShaderIDs._CameraMotionVectorsScale, Vector4.one);
}
}
}

// Planar probes rendering is not currently supported for orthographic camera
// Avoid rendering to prevent error log spamming
&& !camera.orthographic)
{
// Frame settings state was updated by previous render, we must recalculate it
FrameSettings.InitializeFrameSettings(camera, m_Asset.GetRenderPipelineSettings(), srcFrameSettings, ref currentFrameSettings);
}
// Init material if needed
// TODO: this should be move outside of the camera loop but we have no command buffer, ask details to Tim or Julien to do this

// Caution: We require sun light here as some skies use the sun light to render, it means that UpdateSkyEnvironment must be called after PrepareLightsForGPU.
// TODO: Try to arrange code so we can trigger this call earlier and use async compute here to run sky convolution during other passes (once we move convolution shader to compute).
UpdateSkyEnvironment(hdCamera, cmd);
RenderDepthPyramid(hdCamera, cmd, renderContext, FullScreenDebugMode.DepthPyramid);
StopStereoRendering(renderContext, hdCamera);

// If the flag hasn't been set yet on this camera, motion vectors will skip a frame.
hdCamera.camera.depthTextureMode |= DepthTextureMode.MotionVectors | DepthTextureMode.Depth;
var velocityBufferRT = hdCamera.GetCurrentFrameRT((int)HDCameraFrameHistoryType.MotionVectors)
?? hdCamera.AllocHistoryFrameRT((int)HDCameraFrameHistoryType.MotionVectors, AllocMotionVectorRT);
HDUtils.SetRenderTarget(cmd, hdCamera, velocityBufferRT, m_CameraDepthStencilBuffer);
HDUtils.SetRenderTarget(cmd, hdCamera, m_VelocityBuffer, m_CameraDepthStencilBuffer);
cmd.SetGlobalTexture(HDShaderIDs._CameraMotionVectorsTexture, m_VelocityBuffer);
cmd.SetGlobalVector(HDShaderIDs._CameraMotionVectorsSize, new Vector4(
m_VelocityBuffer.referenceSize.x,
m_VelocityBuffer.referenceSize.y,
1f / m_VelocityBuffer.referenceSize.x,
1f / m_VelocityBuffer.referenceSize.y
));
cmd.SetGlobalVector(HDShaderIDs._CameraMotionVectorsScale, new Vector4(
m_VelocityBuffer.referenceSize.x / (float)m_VelocityBuffer.rt.width,
m_VelocityBuffer.referenceSize.y / (float)m_VelocityBuffer.rt.height,
1, 0.0f
));
}
}

// If the flag hasn't been set yet on this camera, motion vectors will skip a frame.
hdCamera.camera.depthTextureMode |= DepthTextureMode.MotionVectors | DepthTextureMode.Depth;
var velocityBufferRT = hdCamera.GetCurrentFrameRT((int)HDCameraFrameHistoryType.MotionVectors)
?? hdCamera.AllocHistoryFrameRT((int)HDCameraFrameHistoryType.MotionVectors, AllocMotionVectorRT);
HDUtils.DrawFullScreen(cmd, hdCamera, m_CameraMotionVectorsMaterial, velocityBufferRT, m_CameraDepthStencilBuffer, null, 0);
PushFullScreenDebugTexture(hdCamera, cmd, velocityBufferRT, FullScreenDebugMode.MotionVectors);
var scale = new Vector2((float)hdCamera.actualWidth / velocityBufferRT.rt.width, (float)hdCamera.actualHeight / velocityBufferRT.rt.height);
cmd.SetGlobalTexture(HDShaderIDs._CameraMotionVectorsTexture, velocityBufferRT);
cmd.SetGlobalVector(HDShaderIDs._CameraMotionVectorsSize, new Vector4(
hdCamera.actualWidth, hdCamera.actualHeight,
1f / hdCamera.actualWidth, 1f / hdCamera.actualHeight));
cmd.SetGlobalVector(HDShaderIDs._CameraMotionVectorsScale, new Vector4(scale.x, scale.y, 1, 0.0f));
}
}

PushFullScreenDebugTextureMip(hdCamera, cmd, cameraRT, m_BufferPyramid.GetPyramidLodCount(new Vector2Int(hdCamera.actualWidth, hdCamera.actualHeight)), new Vector4(pyramidScale.x, pyramidScale.y, 0.0f, 0.0f), debugMode);
}
static readonly int _CameraMotionVectorsTexture_PostHack = Shader.PropertyToID("_CameraMotionVectorsTexture_PostHack");
void RenderPostProcess(HDCamera hdcamera, CommandBuffer cmd, PostProcessLayer layer)
{
using (new ProfilingSample(cmd, "Post-processing", CustomSamplerId.PostProcessing.GetSampler()))

// Since we don't render to the full render textures, we need to feed the post processing stack with the right scale/bias.
// This feature not being implemented yet, we'll just copy the relevant buffers into an appropriately sized RT.
cmd.ReleaseTemporaryRT(HDShaderIDs._CameraDepthTexture);
cmd.ReleaseTemporaryRT(_CameraMotionVectorsTexture_PostHack);
cmd.ReleaseTemporaryRT(HDShaderIDs._CameraMotionVectorsTexture);
var velocityBufferRT = hdcamera.GetCurrentFrameRT((int)HDCameraFrameHistoryType.MotionVectors);
if (velocityBufferRT != null)
if (m_VelocityBuffer != null)
cmd.GetTemporaryRT(_CameraMotionVectorsTexture_PostHack, hdcamera.actualWidth, hdcamera.actualHeight, 0, FilterMode.Point, velocityBufferRT.rt.format);
HDUtils.BlitCameraTexture(cmd, hdcamera, velocityBufferRT, _CameraMotionVectorsTexture_PostHack);
cmd.SetGlobalTexture(HDShaderIDs._CameraMotionVectorsTexture, _CameraMotionVectorsTexture_PostHack);
cmd.GetTemporaryRT(HDShaderIDs._CameraMotionVectorsTexture, hdcamera.actualWidth, hdcamera.actualHeight, 0, FilterMode.Point, m_VelocityBuffer.rt.format);
HDUtils.BlitCameraTexture(cmd, hdcamera, m_VelocityBuffer, HDShaderIDs._CameraMotionVectorsTexture);
}
cmd.GetTemporaryRT(HDShaderIDs._CameraColorTexture, hdcamera.actualWidth, hdcamera.actualHeight, 0, FilterMode.Point, m_CameraColorBuffer.rt.format);
HDUtils.BlitCameraTexture(cmd, hdcamera, m_CameraColorBuffer, HDShaderIDs._CameraColorTexture);

{
// Note: Here we don't use GetDepthTexture() to get the depth texture but m_CameraDepthStencilBuffer as the Forward transparent pass can
// write extra data to deal with DOF/MB
var velocityBufferRT = hdcamera.GetCurrentFrameRT((int)HDCameraFrameHistoryType.MotionVectors);
cmd.SetGlobalTexture(HDShaderIDs._CameraMotionVectorsTexture, velocityBufferRT);
cmd.SetGlobalTexture(HDShaderIDs._CameraMotionVectorsTexture, m_VelocityBuffer);
}
var context = hdcamera.postprocessRenderContext;

{
if (hdCamera.frameSettings.enableStereo)
renderContext.StopMultiEye(hdCamera.camera);
}
RTHandleSystem.RTHandle AllocMotionVectorRT(string id, int frameIndex, RTHandleSystem rtHandleSystem)
{
return rtHandleSystem.Alloc(
Vector2.one,
filterMode: FilterMode.Point,
colorFormat: Builtin.GetVelocityBufferFormat(),
sRGB: Builtin.GetVelocityBufferSRGBFlag(),
enableMSAA: true,
name: string.Format("Velocity-{0}-{1}", id, frameIndex)
);
}
}
}

62
ScriptableRenderPipeline/HDRenderPipeline/HDRP/Material/Lit/Lit.hlsl


int projectionModel = PROJECTIONMODEL_NONE;
#if HAS_REFRACTION
if (GPUImageBasedLightingType == GPUIMAGEBASEDLIGHTINGTYPE_REFRACTION)
{
{
#if defined(_REFRACTION_SSRAY_HIZ)
projectionModel = PROJECTIONMODEL_HI_Z;
#elif defined(_REFRACTION_SSRAY_PROXY)

int debugMode = 0;
#endif
float invScreenWeightDistance = 0;
float temporalFilteringWeight = 0.2;
float temporalFilteringWeight = 0.1;
// Refraction process:
// 1. Depending on the shape model, we calculate the refracted point in world space and the optical depth
// 2. We calculate the screen space position of the refracted point
// 3. If this point is available (ie: in color buffer and point is not in front of the object)
// a. Get the corresponding color depending on the roughness from the gaussian pyramid of the color buffer
// b. Multiply by the transmittance for absorption (depends on the optical depth)
// Refraction process:
// 1. Depending on the shape model, we calculate the refracted point in world space and the optical depth
// 2. We calculate the screen space position of the refracted point
// 3. If this point is available (ie: in color buffer and point is not in front of the object)
// a. Get the corresponding color depending on the roughness from the gaussian pyramid of the color buffer
// b. Multiply by the transmittance for absorption (depends on the optical depth)
rayOriginWS = preLightData.transparentPositionWS;
rayDirWS = preLightData.transparentRefractV;

// Debug screen space tracing
#ifdef DEBUG_DISPLAY
if (_DebugLightingMode == debugMode
if (_DebugLightingMode == debugMode
{
float weight = 1.0;
UpdateLightingHierarchyWeights(hierarchyWeight, weight);
{
float weight = 1.0;
UpdateLightingHierarchyWeights(hierarchyWeight, weight);
lighting.specularTransmitted = hit.debugOutput;
lighting.specularTransmitted = hit.debugOutput;
return lighting;
}
return lighting;
}
if (!hitSuccessful)
return lighting;
if (!hitSuccessful)
return lighting;
// -------------------------------
// Resolve weight and color

// TODO: Fade pixels marked as foreground in stencil
float weight = weightNDC.x * weightNDC.y * hitWeight;
float hitDeviceDepth = LOAD_TEXTURE2D_LOD(_DepthPyramidTexture, hit.positionSS, 0).r;
float hitLinearDepth = LinearEyeDepth(hitDeviceDepth, _ZBufferParams);
float hitDeviceDepth = LOAD_TEXTURE2D_LOD(_DepthPyramidTexture, hit.positionSS, 0).r;
float hitLinearDepth = LinearEyeDepth(hitDeviceDepth, _ZBufferParams);
{
// Do nothing and don't update the hierarchy weight so we can fall back on refraction probe
return lighting;
}
// Do nothing and don't update the hierarchy weight so we can fall back on refraction probe
return lighting;
UpdateLightingHierarchyWeights(hierarchyWeight, weight); // Shouldn't be needed, but safer in case we decide to change hierarchy priority
UpdateLightingHierarchyWeights(hierarchyWeight, weight); // Shouldn't be needed, but safer in case we decide to change hierarchy priority
// Reproject color pyramid
float4 hitVelocityBuffer = LOAD_TEXTURE2D_LOD(

float2 hitVelocityNDC;
DecodeVelocity(hitVelocityBuffer, hitVelocityNDC);
float3 preLD = SAMPLE_TEXTURE2D_LOD(
_ColorPyramidTexture,
s_trilinear_clamp_sampler,
float3 preLD = SAMPLE_TEXTURE2D_LOD(
_ColorPyramidTexture,
s_trilinear_clamp_sampler,
).rgb;
).rgb;
// With HiZ, we use a temporal filtering to reduce the noise from the ray origin jittering
if (projectionModel == PROJECTIONMODEL_HI_Z)

}
// We use specularFGD as an approximation of the fresnel effect (that also handle smoothness)
float3 F = preLightData.specularFGD;
float3 F = preLightData.specularFGD;
// -------------------------------
// Assign color

debug.lightingSpecularFGD = F;
debug.lightingWeight = weight;
_DebugScreenSpaceTracingData[0] = debug;
}
}
{
{
float weight = 1.0;
UpdateLightingHierarchyWeights(hierarchyWeight, weight);
if (GPUImageBasedLightingType == GPUIMAGEBASEDLIGHTINGTYPE_REFRACTION)

return lighting;
}
}
#endif
return lighting;

正在加载...
取消
保存