浏览代码

HDRenderLoop: Fix depthVS issue + introduce HDCamera

/main
Sebastien Lagarde 8 年前
当前提交
5d839c48
共有 12 个文件被更改,包括 160 次插入150 次删除
  1. 4
      Assets/ScriptableRenderLoop/HDRenderLoop/Debug/Resources/DebugViewMaterialGBuffer.shader
  2. 6
      Assets/ScriptableRenderLoop/HDRenderLoop/Debug/Resources/DebugViewTiles.shader
  3. 49
      Assets/ScriptableRenderLoop/HDRenderLoop/HDRenderLoop.cs
  4. 2
      Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/LightLoop.cs
  5. 4
      Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/Resources/Deferred.shader
  6. 22
      Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/TilePass.cs
  7. 166
      Assets/ScriptableRenderLoop/HDRenderLoop/ShaderVariables.hlsl
  8. 1
      Assets/ScriptableRenderLoop/HDRenderLoop/Sky/Resources/SkyHDRI.shader
  9. 3
      Assets/ScriptableRenderLoop/HDRenderLoop/Sky/Resources/SkyProcedural.shader
  10. 18
      Assets/ScriptableRenderLoop/HDRenderLoop/Sky/SkyRenderer.cs
  11. 28
      Assets/ScriptableRenderLoop/HDRenderLoop/Utilities.cs
  12. 7
      Assets/ScriptableRenderLoop/ShaderLibrary/Common.hlsl

4
Assets/ScriptableRenderLoop/HDRenderLoop/Debug/Resources/DebugViewMaterialGBuffer.shader


#include "Assets/ScriptableRenderLoop/HDRenderLoop/ShaderVariables.hlsl"
#include "Assets/ScriptableRenderLoop/HDRenderLoop/Debug/DebugViewMaterial.cs.hlsl"
#include "Assets/ScriptableRenderLoop/HDRenderLoop/Material/Material.hlsl"
float4x4 _InvViewProjMatrix;
DECLARE_GBUFFER_TEXTURE(_GBufferTexture);

// input.positionCS is SV_Position
PositionInputs posInput = GetPositionInput(input.positionCS.xy, _ScreenSize.zw);
float depth = LOAD_TEXTURE2D(_CameraDepthTexture, posInput.unPositionSS).x;
UpdatePositionInput(depth, _InvViewProjMatrix, GetWorldToViewMatrix(), posInput);
UpdatePositionInput(depth, _InvViewProjMatrix, _ViewProjMatrix, posInput);
FETCH_GBUFFER(gbuffer, _GBufferTexture, posInput.unPositionSS);
BSDFData bsdfData;

6
Assets/ScriptableRenderLoop/HDRenderLoop/Debug/Resources/DebugViewTiles.shader


TEXTURE2D(_CameraDepthTexture);
SAMPLER2D(sampler_CameraDepthTexture);
float4x4 _InvViewProjMatrix;
uint _ViewTilesFlags;
float2 _MousePixelCoord;

// positionCS is SV_Position
PositionInputs posInput = GetPositionInput(positionCS.xy, _ScreenSize.zw);
float depth = LOAD_TEXTURE2D(_CameraDepthTexture, posInput.unPositionSS).x;
UpdatePositionInput(depth, _InvViewProjMatrix, GetWorldToViewMatrix(), posInput);
UpdatePositionInput(depth, _InvViewProjMatrix, _ViewProjMatrix, posInput);
int2 pixelCoord = posInput.unPositionSS.xy;
int2 tileCoord = pixelCoord / TILE_SIZE;

{
PositionInputs mousePosInput = GetPositionInput(_MousePixelCoord, _ScreenSize.zw);
float depthMouse = LOAD_TEXTURE2D(_CameraDepthTexture, mousePosInput.unPositionSS).x;
UpdatePositionInput(depthMouse, _InvViewProjMatrix, GetWorldToViewMatrix(), mousePosInput);
UpdatePositionInput(depthMouse, _InvViewProjMatrix, _ViewProjMatrix, mousePosInput);
int category = (LIGHTCATEGORY_COUNT - 1) - tileCoord.y;
int start;

49
Assets/ScriptableRenderLoop/HDRenderLoop/HDRenderLoop.cs


// TODO TO CHECK: SebL I move allocation from Build() to here, but there was a comment "// Our object can be garbage collected, so need to be allocate here", it is still true ?
Lit.RenderLoop m_LitRenderLoop = new Lit.RenderLoop();
public struct HDCamera
{
public Camera camera;
public Vector4 screenSize;
public Matrix4x4 viewProjectionMatrix;
public Matrix4x4 invViewProjectionMatrix;
}
public override void Build()
{
#if UNITY_EDITOR

}
}
void RenderDebugViewMaterial(CullResults cull, Camera camera, RenderLoop renderLoop)
void RenderDebugViewMaterial(CullResults cull, HDCamera hdCamera, RenderLoop renderLoop)
{
using (new Utilities.ProfilingSample("DebugView Material Mode Pass", renderLoop))
// Render Opaque forward

Shader.SetGlobalInt("_DebugViewMaterial", (int)debugParameters.debugViewMaterial);
RenderOpaqueRenderList(cull, camera, renderLoop, "DebugViewMaterial");
RenderOpaqueRenderList(cull, hdCamera.camera, renderLoop, "DebugViewMaterial");
var invViewProj = Utilities.GetViewProjectionMatrix(camera).inverse;
var screenSize = Utilities.ComputeScreenSize(camera);
m_DebugViewMaterialGBuffer.SetVector("_ScreenSize", screenSize);
Utilities.SetupMaterialHDCamera(hdCamera, m_DebugViewMaterialGBuffer);
m_DebugViewMaterialGBuffer.SetMatrix("_InvViewProjMatrix", invViewProj);
// m_gbufferManager.BindBuffers(m_DebugViewMaterialGBuffer);
// TODO: Bind depth textures

// Render forward transparent
{
RenderTransparentRenderList(cull, camera, renderLoop, "DebugViewMaterial");
RenderTransparentRenderList(cull, hdCamera.camera, renderLoop, "DebugViewMaterial");
}
// Last blit

}
}
void RenderDeferredLighting(Camera camera, RenderLoop renderLoop)
void RenderDeferredLighting(HDCamera hdCamera, RenderLoop renderLoop)
{
if (debugParameters.useForwardRenderingOnly)
{

// Bind material data
m_LitRenderLoop.Bind();
m_lightLoop.RenderDeferredLighting(camera, renderLoop, m_CameraColorBuffer);
m_lightLoop.RenderDeferredLighting(hdCamera, renderLoop, m_CameraColorBuffer);
void RenderSky(Camera camera, RenderLoop renderLoop)
void RenderSky(HDCamera hdCamera, RenderLoop renderLoop)
m_SkyRenderer.RenderSky(camera, m_SkyParameters, m_CameraColorBufferRT, m_CameraDepthBufferRT, renderLoop);
m_SkyRenderer.RenderSky(hdCamera, m_SkyParameters, m_CameraColorBufferRT, m_CameraDepthBufferRT, renderLoop);
}
void RenderForward(CullResults cullResults, Camera camera, RenderLoop renderLoop, bool renderOpaque)

}
}
public void PushGlobalParams(Camera camera, RenderLoop renderLoop)
public void PushGlobalParams(HDCamera hdCamera, RenderLoop renderLoop)
{
if (m_SkyRenderer.IsSkyValid(m_SkyParameters))
{

Shader.SetGlobalInt("_EnvLightSkyEnabled", 0);
}
var invViewProj = Utilities.GetViewProjectionMatrix(camera).inverse;
var screenSize = Utilities.ComputeScreenSize(camera);
cmd.SetGlobalVector("_ScreenSize", screenSize);
cmd.SetGlobalMatrix("_InvViewProjMatrix", invViewProj);
cmd.SetGlobalVector("_ScreenSize", hdCamera.screenSize);
cmd.SetGlobalMatrix("_ViewProjMatrix", hdCamera.viewProjectionMatrix);
cmd.SetGlobalMatrix("_InvViewProjMatrix", hdCamera.invViewProjectionMatrix);
m_lightLoop.PushGlobalParams(camera, renderLoop);
m_lightLoop.PushGlobalParams(hdCamera.camera, renderLoop);
}
public override void Render(Camera[] cameras, RenderLoop renderLoop)

Resize(camera);
renderLoop.SetupCameraProperties(camera);
HDCamera hdCamera = Utilities.GetHDCamera(camera);
InitAndClearBuffer(camera, renderLoop);

if (debugParameters.debugViewMaterial != 0)
{
RenderDebugViewMaterial(cullResults, camera, renderLoop);
RenderDebugViewMaterial(cullResults, hdCamera, renderLoop);
}
else
{

m_lightLoop.PrepareLightsForGPU(cullResults, camera, ref shadows);
m_lightLoop.BuildGPULightLists(camera, renderLoop, m_CameraDepthBufferRT);
PushGlobalParams(camera, renderLoop);
PushGlobalParams(hdCamera, renderLoop);
RenderDeferredLighting(camera, renderLoop);
RenderDeferredLighting(hdCamera, renderLoop);
RenderSky(camera, renderLoop);
RenderSky(hdCamera, renderLoop);
RenderForward(cullResults, camera, renderLoop, false);

2
Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/LightLoop.cs


public virtual void PushGlobalParams(Camera camera, RenderLoop loop) {}
public virtual void RenderDeferredLighting(Camera camera, RenderLoop renderLoop, RenderTargetIdentifier cameraColorBufferRT) {}
public virtual void RenderDeferredLighting(HDRenderLoop.HDCamera hdCamera, RenderLoop renderLoop, RenderTargetIdentifier cameraColorBufferRT) {}
public virtual void RenderForward(Camera camera, RenderLoop renderLoop, bool renderOpaque) {}
}

4
Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/Resources/Deferred.shader


TEXTURE2D(_CameraDepthTexture);
SAMPLER2D(sampler_CameraDepthTexture);
float4x4 _InvViewProjMatrix;
struct Attributes
{
float3 positionOS : POSITION;

// input.positionCS is SV_Position
PositionInputs posInput = GetPositionInput(input.positionCS.xy, _ScreenSize.zw);
float depth = LOAD_TEXTURE2D(_CameraDepthTexture, posInput.unPositionSS).x;
UpdatePositionInput(depth, _InvViewProjMatrix, GetWorldToViewMatrix(), posInput);
UpdatePositionInput(depth, _InvViewProjMatrix, _ViewProjMatrix, posInput);
float3 V = GetWorldSpaceNormalizeViewDir(posInput.positionWS);
FETCH_GBUFFER(gbuffer, _GBufferTexture, posInput.unPositionSS);

22
Assets/ScriptableRenderLoop/HDRenderLoop/Lighting/TilePass/TilePass.cs


}
#endif
public override void RenderDeferredLighting(Camera camera, RenderLoop renderLoop, RenderTargetIdentifier cameraColorBufferRT)
public override void RenderDeferredLighting(HDRenderLoop.HDCamera hdCamera, RenderLoop renderLoop, RenderTargetIdentifier cameraColorBufferRT)
var invViewProj = Utilities.GetViewProjectionMatrix(camera).inverse;
var screenSize = Utilities.ComputeScreenSize(camera);
Vector2 mousePixelCoord = Input.mousePosition;
#if UNITY_EDITOR

mousePixelCoord.y = (screenSize.y - 1.0f) - mousePixelCoord.y;
mousePixelCoord.y = (hdCamera.screenSize.y - 1.0f) - mousePixelCoord.y;
m_DeferredDirectMaterial.SetMatrix("_InvViewProjMatrix", invViewProj);
m_DeferredDirectMaterial.SetVector("_ScreenSize", screenSize);
Utilities.SetupMaterialHDCamera(hdCamera, m_DeferredDirectMaterial);
m_DeferredIndirectMaterial.SetMatrix("_InvViewProjMatrix", invViewProj);
m_DeferredIndirectMaterial.SetVector("_ScreenSize", screenSize);
Utilities.SetupMaterialHDCamera(hdCamera, m_DeferredIndirectMaterial);
m_DeferredAllMaterial.SetMatrix("_InvViewProjMatrix", invViewProj);
m_DeferredAllMaterial.SetVector("_ScreenSize", screenSize);
Utilities.SetupMaterialHDCamera(hdCamera, m_DeferredAllMaterial);
m_DebugViewTilesMaterial.SetMatrix("_InvViewProjMatrix", invViewProj);
m_DebugViewTilesMaterial.SetVector("_ScreenSize", screenSize);
Utilities.SetupMaterialHDCamera(hdCamera, m_DebugViewTilesMaterial);
m_SingleDeferredMaterial.SetMatrix("_InvViewProjMatrix", invViewProj);
m_SingleDeferredMaterial.SetVector("_ScreenSize", screenSize);
Utilities.SetupMaterialHDCamera(hdCamera, m_SingleDeferredMaterial);
m_SingleDeferredMaterial.SetInt("_SrcBlend", (int)UnityEngine.Rendering.BlendMode.One);
m_SingleDeferredMaterial.SetInt("_DstBlend", (int)UnityEngine.Rendering.BlendMode.Zero);

166
Assets/ScriptableRenderLoop/HDRenderLoop/ShaderVariables.hlsl


CBUFFER_START(UnityPerCamera)
// Time (t = time since current level load) values from Unity
float4 _Time; // (t/20, t, t*2, t*3)
float4 _SinTime; // sin(t/8), sin(t/4), sin(t/2), sin(t)
float4 _CosTime; // cos(t/8), cos(t/4), cos(t/2), cos(t)
float4 unity_DeltaTime; // dt, 1/dt, smoothdt, 1/smoothdt
// Time (t = time since current level load) values from Unity
float4 _Time; // (t/20, t, t*2, t*3)
float4 _SinTime; // sin(t/8), sin(t/4), sin(t/2), sin(t)
float4 _CosTime; // cos(t/8), cos(t/4), cos(t/2), cos(t)
float4 unity_DeltaTime; // dt, 1/dt, smoothdt, 1/smoothdt
#ifndef UNITY_SINGLE_PASS_STEREO
float3 _WorldSpaceCameraPos;
#if !defined(USING_STEREO_MATRICES)
float3 _WorldSpaceCameraPos;
// x = 1 or -1 (-1 if projection is flipped)
// y = near plane
// z = far plane
// w = 1/far plane
float4 _ProjectionParams;
// x = 1 or -1 (-1 if projection is flipped)
// y = near plane
// z = far plane
// w = 1/far plane
float4 _ProjectionParams;
// x = width
// y = height
// z = 1 + 1.0/width
// w = 1 + 1.0/height
float4 _ScreenParams;
// x = width
// y = height
// z = 1 + 1.0/width
// w = 1 + 1.0/height
float4 _ScreenParams;
// Values used to linearize the Z buffer (http://www.humus.name/temp/Linearize%20depth.txt)
// x = 1-far/near
// y = far/near
// z = x/far
// w = y/far
float4 _ZBufferParams;
// Values used to linearize the Z buffer (http://www.humus.name/temp/Linearize%20depth.txt)
// x = 1-far/near
// y = far/near
// z = x/far
// w = y/far
float4 _ZBufferParams;
// x = orthographic camera's width
// y = orthographic camera's height
// z = unused
// w = 1.0 if camera is ortho, 0.0 if perspective
float4 unity_OrthoParams;
// x = orthographic camera's width
// y = orthographic camera's height
// z = unused
// w = 1.0 if camera is ortho, 0.0 if perspective
float4 unity_OrthoParams;
float4 unity_CameraWorldClipPlanes[6];
float4 unity_CameraWorldClipPlanes[6];
// Projection matrices of the camera. Note that this might be different from projection matrix
// that is set right now, e.g. while rendering shadows the matrices below are still the projection
// of original camera.
float4x4 unity_CameraProjection;
float4x4 unity_CameraInvProjection;
#ifndef UNITY_SINGLE_PASS_STEREO
float4x4 unity_WorldToCamera;
float4x4 unity_CameraToWorld;
#if !defined(USING_STEREO_MATRICES)
// Projection matrices of the camera. Note that this might be different from projection matrix
// that is set right now, e.g. while rendering shadows the matrices below are still the projection
// of original camera.
float4x4 unity_CameraProjection;
float4x4 unity_CameraInvProjection;
float4x4 unity_WorldToCamera;
float4x4 unity_CameraToWorld;
#endif
CBUFFER_END

#ifndef UNITY_SINGLE_PASS_STEREO
float4x4 glstate_matrix_mvp;
#ifdef UNITY_USE_PREMULTIPLIED_MATRICES
float4x4 glstate_matrix_mvp;
float4x4 glstate_matrix_modelview0;
float4x4 glstate_matrix_invtrans_modelview0;
// Use center position for stereo rendering
float4x4 glstate_matrix_modelview0;
float4x4 glstate_matrix_invtrans_modelview0;
float4x4 unity_ObjectToWorld;
float4x4 unity_WorldToObject;
float4 unity_LODFade; // x is the fade value ranging within [0,1]. y is x quantized into 16 levels
float4 unity_WorldTransformParams; // w is usually 1.0, or -1.0 for odd-negative scale transforms
float4x4 unity_ObjectToWorld;
float4x4 unity_WorldToObject;
float4 unity_LODFade; // x is the fade value ranging within [0,1]. y is x quantized into 16 levels
float4 unity_WorldTransformParams; // w is usually 1.0, or -1.0 for odd-negative scale transforms
#ifdef UNITY_SINGLE_PASS_STEREO
CBUFFER_START(UnityPerEye)
float3 _WorldSpaceCameraPos;
float4x4 glstate_matrix_projection;
#if defined(USING_STEREO_MATRICES)
CBUFFER_START(UnityStereoGlobals)
float4x4 unity_StereoMatrixP[2];
float4x4 unity_StereoMatrixV[2];
float4x4 unity_StereoMatrixInvV[2];
float4x4 unity_StereoMatrixVP[2];
float4x4 unity_MatrixV;
float4x4 unity_MatrixVP;
float4x4 unity_StereoCameraProjection[2];
float4x4 unity_StereoCameraInvProjection[2];
float4x4 unity_StereoWorldToCamera[2];
float4x4 unity_StereoCameraToWorld[2];
float3 unity_StereoWorldSpaceCameraPos[2];
float4 unity_StereoScaleOffset[2];
CBUFFER_END
float4x4 unity_WorldToCamera;
float4x4 unity_CameraToWorld;
#ifdef UNITY_SUPPORT_MULTIVIEW
#define unity_StereoEyeIndex UNITY_VIEWID
UNITY_DECLARE_MULTIVIEW(2);
#else
CBUFFER_START(UnityStereoEyeIndex)
int unity_StereoEyeIndex;
#endif
float4x4 glstate_matrix_transpose_modelview0;
#ifdef UNITY_SINGLE_PASS_STEREO
float4x4 glstate_matrix_mvp;
#endif
float4x4 glstate_matrix_transpose_modelview0;
CBUFFER_END

#ifndef UNITY_SINGLE_PASS_STEREO
float4x4 glstate_matrix_projection;
float4x4 unity_MatrixV;
float4x4 unity_MatrixVP;
float4 glstate_lightmodel_ambient;
float4 unity_AmbientSky;
float4 unity_AmbientEquator;
float4 unity_AmbientGround;
float4 unity_IndirectSpecColor;
#if !defined(USING_STEREO_MATRICES)
float4x4 glstate_matrix_projection;
float4x4 unity_MatrixV;
float4x4 unity_MatrixInvV;
float4x4 unity_MatrixVP;
int unity_StereoEyeIndex;
float4 glstate_lightmodel_ambient;
float4 unity_AmbientSky;
float4 unity_AmbientEquator;
float4 unity_AmbientGround;
float4 unity_IndirectSpecColor;
float4 unity_ShadowColor;
CBUFFER_END

// ----------------------------------------------------------------------------
// TODO: move this to constant buffer by Pass
float4x4 _InvViewProjMatrix;
float4x4 _ViewProjMatrix; // Looks like using UNITY_MATRIX_VP in pixel shader doesn't work ??? need to setup my own...
return unity_MatrixV;
return UNITY_MATRIX_V;
}
float4x4 GetObjectToWorldMatrix()

// Transform to homogenous clip space
float4x4 GetWorldToHClipMatrix()
{
return unity_MatrixVP;
return UNITY_MATRIX_VP;
}
// Transform from clip space to homogenous world space

return unity_WorldTransformParams.w;
}
float4x4 GetObjectToWorldViewMatrix()
{
return glstate_matrix_modelview0;
}
float3 TransformWorldToView(float3 positionWS)
{
return mul(GetWorldToViewMatrix(), float4(positionWS, 1.0)).xyz;

float3 TransformWorldToObject(float3 positionWS)
{
return mul(GetWorldToObjectMatrix(), float4(positionWS, 1.0)).xyz;
}
float3 TransformObjectToView(float3 positionOS)
{
return mul(GetObjectToWorldViewMatrix(), float4(positionOS, 1.0)).xyz;
}
float3 TransformObjectToWorldDir(float3 dirOS)

1
Assets/ScriptableRenderLoop/HDRenderLoop/Sky/Resources/SkyHDRI.shader


TEXTURECUBE(_Cubemap);
SAMPLERCUBE(sampler_Cubemap);
float4x4 _InvViewProjMatrix;
float4 _SkyParam; // x exposure, y multiplier, z rotation
struct Attributes

3
Assets/ScriptableRenderLoop/HDRenderLoop/Sky/Resources/SkyProcedural.shader


TEXTURECUBE(_Cubemap);
SAMPLERCUBE(sampler_Cubemap);
float4x4 _InvViewProjMatrix;
float4 _SkyParam; // x exposure, y multiplier, z rotation
struct Attributes

float skyTexWeight = 1.0;
#endif
UpdatePositionInput(rawDepth, _InvViewProjMatrix, GetWorldToViewMatrix(), posInput);
UpdatePositionInput(rawDepth, _InvViewProjMatrix, _ViewProjMatrix, posInput);
float4 c1, c2, c3;
VolundTransferScatter(posInput.positionWS, c1, c2, c3);

18
Assets/ScriptableRenderLoop/HDRenderLoop/Sky/SkyRenderer.cs


MaterialPropertyBlock m_RenderSkyPropertyBlock = null;
Vector4 m_screenSize;
Matrix4x4[] m_faceCameraViewProjectionMatrix = new Matrix4x4[6];
Matrix4x4[] m_faceCameraInvViewProjectionMatrix = new Matrix4x4[6];
Mesh[] m_CubemapFaceMesh = new Mesh[6];

m_SkyboxGGXCubemapRT.filterMode = FilterMode.Trilinear;
m_SkyboxGGXCubemapRT.Create();
}
m_screenSize = new Vector4((float)skyParameters.skyResolution, (float)skyParameters.skyResolution, 1.0f / (float)skyParameters.skyResolution, 1.0f / (float)skyParameters.skyResolution);
}
// Sets the global MIP-mapped cubemap '_SkyTexture' in the shader.

for (int i = 0; i < 6; ++i)
{
Matrix4x4 lookAt = Matrix4x4.LookAt(Vector3.zero, lookAtList[i], UpVectorList[i]);
m_faceCameraInvViewProjectionMatrix[i] = Utilities.GetViewProjectionMatrix(lookAt, cubeProj).inverse;
m_faceCameraViewProjectionMatrix[i] = Utilities.GetViewProjectionMatrix(lookAt, cubeProj);
m_faceCameraInvViewProjectionMatrix[i] = m_faceCameraViewProjectionMatrix[i].inverse;
// When rendering into a texture the render will be flip (due to legacy unity openGL behavior), so we need to flip UV here...
m_CubemapFaceMesh[i] = BuildSkyMesh(Vector3.zero, m_faceCameraInvViewProjectionMatrix[i], true);

return parameters.skyHDRI != null;
}
private void RenderSky(Matrix4x4 invViewProjectionMatrix, SkyParameters skyParameters, Mesh skyMesh, RenderLoop renderLoop)
private void RenderSky(Vector4 screenSize, Matrix4x4 viewProjectionMatrix, Matrix4x4 invViewProjectionMatrix, SkyParameters skyParameters, Mesh skyMesh, RenderLoop renderLoop)
m_RenderSkyPropertyBlock.SetVector("_ScreenSize", screenSize);
m_RenderSkyPropertyBlock.SetMatrix("_ViewProjMatrix", viewProjectionMatrix);
m_RenderSkyPropertyBlock.SetMatrix("_InvViewProjMatrix", invViewProjectionMatrix);
var cmd = new CommandBuffer { name = "" };

for (int i = 0; i < 6; ++i)
{
Utilities.SetRenderTarget(renderLoop, target, 0, (CubemapFace)i);
RenderSky(m_faceCameraInvViewProjectionMatrix[i], skyParameters, m_CubemapFaceMesh[i], renderLoop);
RenderSky(m_screenSize, m_faceCameraViewProjectionMatrix[i], m_faceCameraInvViewProjectionMatrix[i], skyParameters, m_CubemapFaceMesh[i], renderLoop);
}
}

}
}
public void RenderSky(Camera camera, SkyParameters skyParameters, RenderTargetIdentifier colorBuffer, RenderTargetIdentifier depthBuffer, RenderLoop renderLoop)
public void RenderSky(HDRenderLoop.HDCamera hdCamera, SkyParameters skyParameters, RenderTargetIdentifier colorBuffer, RenderTargetIdentifier depthBuffer, RenderLoop renderLoop)
{
using (new Utilities.ProfilingSample("Sky Pass", renderLoop))
{

// Render the sky itself
Utilities.SetRenderTarget(renderLoop, colorBuffer, depthBuffer);
Matrix4x4 invViewProjectionMatrix = Utilities.GetViewProjectionMatrix(camera).inverse;
RenderSky(invViewProjectionMatrix, skyParameters, BuildSkyMesh(camera.GetComponent<Transform>().position, invViewProjectionMatrix, false), renderLoop);
RenderSky(hdCamera.screenSize, hdCamera.viewProjectionMatrix, hdCamera.invViewProjectionMatrix, skyParameters, BuildSkyMesh(hdCamera.camera.GetComponent<Transform>().position, hdCamera.invViewProjectionMatrix, false), renderLoop);
}
}
}

28
Assets/ScriptableRenderLoop/HDRenderLoop/Utilities.cs


disposed = true;
}
}
public static Matrix4x4 GetViewProjectionMatrix(Matrix4x4 worldToViewMatrix, Matrix4x4 projectionMatrix)
{
// The actual projection matrix used in shaders is actually massaged a bit to work across all platforms

return gpuVP;
}
public static Matrix4x4 GetViewProjectionMatrix(Camera camera)
public static HDRenderLoop.HDCamera GetHDCamera(Camera camera)
// The actual projection matrix used in shaders is actually massaged a bit to work across all platforms
// (different Z value ranges etc.)
var gpuProj = GL.GetGPUProjectionMatrix(camera.projectionMatrix, false);
var gpuVP = gpuProj * camera.worldToCameraMatrix;
HDRenderLoop.HDCamera hdCamera = new HDRenderLoop.HDCamera();
hdCamera.camera = camera;
hdCamera.screenSize = new Vector4(camera.pixelWidth, camera.pixelHeight, 1.0f / camera.pixelWidth, 1.0f / camera.pixelHeight);
return gpuVP;
}
// The actual projection matrix used in shaders is actually massaged a bit to work across all platforms
// (different Z value ranges etc.)
var gpuProj = GL.GetGPUProjectionMatrix(camera.projectionMatrix, false);
var gpuVP = gpuProj * camera.worldToCameraMatrix;
public static Vector4 ComputeScreenSize(Camera camera)
hdCamera.viewProjectionMatrix = gpuVP;
hdCamera.invViewProjectionMatrix = gpuVP.inverse;
return hdCamera;
}
public static void SetupMaterialHDCamera(HDRenderLoop.HDCamera hdCamera, Material material)
return new Vector4(camera.pixelWidth, camera.pixelHeight, 1.0f / camera.pixelWidth, 1.0f / camera.pixelHeight);
material.SetVector("_ScreenSize", hdCamera.screenSize);
material.SetMatrix("_ViewProjMatrix", hdCamera.viewProjectionMatrix);
material.SetMatrix("_InvViewProjMatrix", hdCamera.invViewProjectionMatrix);
}
// TEMP: These functions should be implemented C++ side, for now do it in C#

7
Assets/ScriptableRenderLoop/ShaderLibrary/Common.hlsl


// From deferred or compute shader
// depth must be the depth from the raw depth buffer. This allow to handle all kind of depth automatically with the inverse view projection matrix.
// For information. In Unity Depth is always in range 0..1 (even on OpenGL) but can be reversed.
void UpdatePositionInput(float depth, float4x4 invViewProjectionMatrix, float4x4 worlToViewProjectionMatrix, inout PositionInputs posInput)
void UpdatePositionInput(float depth, float4x4 invViewProjectionMatrix, float4x4 ViewProjectionMatrix, inout PositionInputs posInput)
{
posInput.depthRaw = depth;

posInput.positionWS = hpositionWS.xyz / hpositionWS.w;
// The compiler should optimize this (less expensive than reconstruct depth VS from depth buffer)
posInput.depthVS = mul(worlToViewProjectionMatrix, float4(posInput.positionWS, 1.0)).z;
posInput.depthVS = mul(ViewProjectionMatrix, float4(posInput.positionWS, 1.0)).w;
posInput.positionCS *= posInput.depthVS;
}

// TODO: it is an approx, need a correct value where we use projection matrix to reproject the depth from VS
posInput.depthRaw = posInput.positionCS.z / posInput.depthVS;
posInput.positionCS = float4((posInput.positionSS - 0.5) * float2(2.0, -2.0), posInput.depthRaw, 1.0) * posInput.depthVS;
// TODO: Do we need to flip Y axis here on OGL ?
posInput.positionCS = float4(posInput.positionSS.xy * 2.0 - 1.0, posInput.depthRaw, 1.0) * posInput.depthVS;
// Just add the offset along the view vector is sufficiant for world position
posInput.positionWS += V * depthOffsetVS;
}

正在加载...
取消
保存