比较提交

...
此合并请求有变更与目标分支冲突。
/Assets/ScriptableRenderPipeline/fptl/UnityStandardForwardNew.cginc
/Assets/ScriptableRenderPipeline/fptl/LightingUtils.hlsl
/Assets/ScriptableRenderPipeline/fptl/lightlistbuild-bigtile.compute
/Assets/ScriptableRenderPipeline/fptl/FptlLighting.cs
/Assets/ScriptableRenderPipeline/fptl/Internal-DeferredComputeShading.compute
/Assets/ScriptableRenderPipeline/fptl/Internal-DeferredReflections.shader
/Assets/ScriptableRenderPipeline/fptl/Internal-DeferredShading.shader
/Assets/ScriptableRenderPipeline/fptl/LightingTemplate.hlsl
/Assets/ScriptableRenderPipeline/fptl/ReflectionTemplate.hlsl
/Assets/ScriptableRenderPipeline/fptl/TiledLightingTemplate.hlsl
/Assets/ScriptableRenderPipeline/fptl/TiledLightingUtils.hlsl
/Assets/ScriptableRenderPipeline/fptl/lightlistbuild.compute

5 次代码提交

作者 SHA1 备注 提交日期
robbiesri f7a1cea9 Final bits to support FPTL in VR 8 年前
Robert Srinivasiah 16072312 More progress to VR FPTL 8 年前
robbiesri 43c6af23 Partial commit of compute updating 8 年前
Robert Srinivasiah a269a18b Get rendertargets working with VR 8 年前
robbiesri 0e031b45 Starting FPTL VR work 8 年前
共有 12 个文件被更改,包括 587 次插入148 次删除
  1. 12
      Assets/ScriptableRenderPipeline/fptl/UnityStandardForwardNew.cginc
  2. 34
      Assets/ScriptableRenderPipeline/fptl/LightingUtils.hlsl
  3. 9
      Assets/ScriptableRenderPipeline/fptl/lightlistbuild-bigtile.compute
  4. 557
      Assets/ScriptableRenderPipeline/fptl/FptlLighting.cs
  5. 7
      Assets/ScriptableRenderPipeline/fptl/Internal-DeferredComputeShading.compute
  6. 15
      Assets/ScriptableRenderPipeline/fptl/Internal-DeferredReflections.shader
  7. 19
      Assets/ScriptableRenderPipeline/fptl/Internal-DeferredShading.shader
  8. 20
      Assets/ScriptableRenderPipeline/fptl/LightingTemplate.hlsl
  9. 7
      Assets/ScriptableRenderPipeline/fptl/ReflectionTemplate.hlsl
  10. 6
      Assets/ScriptableRenderPipeline/fptl/TiledLightingTemplate.hlsl
  11. 27
      Assets/ScriptableRenderPipeline/fptl/TiledLightingUtils.hlsl
  12. 22
      Assets/ScriptableRenderPipeline/fptl/lightlistbuild.compute

12
Assets/ScriptableRenderPipeline/fptl/UnityStandardForwardNew.cginc


{
float linZ = GetLinearZFromSVPosW(i.pos.w); // matching script side where camera space is right handed.
float3 vP = GetViewPosFromLinDepth(i.pos.xy, linZ);
float3 vPw = mul(g_mViewToWorld, float4(vP,1.0)).xyz;
float3 Vworld = normalize(mul((float3x3) g_mViewToWorld, -vP).xyz); // not same as unity_CameraToWorld
//float3 vPw = mul(g_mViewToWorld, float4(vP,1.0)).xyz;
//float3 Vworld = normalize(mul((float3x3) g_mViewToWorld, -vP).xyz); // not same as unity_CameraToWorld
float3 vPw = mul(g_mViewToWorldArr[unity_StereoEyeIndex], float4(vP, 1.0)).xyz;
float3 Vworld = normalize(mul((float3x3) g_mViewToWorldArr[unity_StereoEyeIndex], -vP).xyz); // not same as unity_CameraToWorld
#ifdef _PARALLAXMAP
half3 tangent = i.tangentToWorldAndParallax[0].xyz;

{
float linZ = GetLinearZFromSVPosW(i.pos.w); // matching script side where camera space is right handed.
float3 vP = GetViewPosFromLinDepth(i.pos.xy, linZ);
float3 vPw = mul(g_mViewToWorld, float4(vP,1.0)).xyz;
float3 Vworld = normalize(mul((float3x3) g_mViewToWorld, -vP).xyz); // not same as unity_CameraToWorld
//float3 vPw = mul(g_mViewToWorld, float4(vP,1.0)).xyz;
//float3 Vworld = normalize(mul((float3x3) g_mViewToWorld, -vP).xyz); // not same as unity_CameraToWorld
float3 vPw = mul(g_mViewToWorldArr[unity_StereoEyeIndex], float4(vP, 1.0)).xyz;
float3 Vworld = normalize(mul((float3x3) g_mViewToWorldArr[unity_StereoEyeIndex], -vP).xyz); // not same as unity_CameraToWorld
#ifdef _PARALLAXMAP
half3 tangent = i.tangentToWorldAndParallax[0].xyz;

34
Assets/ScriptableRenderPipeline/fptl/LightingUtils.hlsl


#include "LightDefinitions.cs.hlsl"
uniform float4x4 g_mViewToWorld;
uniform float4x4 g_mWorldToView; // used for reflection only
uniform float4x4 g_mScrProjection;
uniform float4x4 g_mInvScrProjection;
//uniform float4x4 g_mViewToWorld;
//uniform float4x4 g_mWorldToView; // used for reflection only
//uniform float4x4 g_mScrProjection;
//uniform float4x4 g_mInvScrProjection;
uniform float4x4 g_mViewToWorldArr[2];
uniform float4x4 g_mWorldToViewArr[2]; // used for reflection only
uniform float4x4 g_mScrProjectionArr[2];
uniform float4x4 g_mInvScrProjectionArr[2];
uniform uint g_widthRT;
uniform uint g_heightRT;

{
float fSx = g_mScrProjection[0].x;
//float fCx = g_mScrProjection[2].x;
float fCx = g_mScrProjection[0].z;
float fSy = g_mScrProjection[1].y;
//float fCy = g_mScrProjection[2].y;
float fCy = g_mScrProjection[1].z;
//float fSx = g_mScrProjection[0].x;
////float fCx = g_mScrProjection[2].x;
//float fCx = g_mScrProjection[0].z;
//float fSy = g_mScrProjection[1].y;
////float fCy = g_mScrProjection[2].y;
//float fCy = g_mScrProjection[1].z;
float fSx = g_mScrProjectionArr[unity_StereoEyeIndex][0].x;
float fCx = g_mScrProjectionArr[unity_StereoEyeIndex][0].z;
float fSy = g_mScrProjectionArr[unity_StereoEyeIndex][1].y;
float fCy = g_mScrProjectionArr[unity_StereoEyeIndex][1].z;
#if USE_LEFTHAND_CAMERASPACE
return fLinDepth*float3( ((v2ScrPos.x-fCx)/fSx), ((v2ScrPos.y-fCy)/fSy), 1.0 );

float GetLinearDepth(float zDptBufSpace) // 0 is near 1 is far
{
// todo (simplify): m22 is zero and m23 is +1/-1 (depends on left/right hand proj)
float m22 = g_mInvScrProjection[2].z, m23 = g_mInvScrProjection[2].w;
float m32 = g_mInvScrProjection[3].z, m33 = g_mInvScrProjection[3].w;
//float m22 = g_mInvScrProjection[2].z, m23 = g_mInvScrProjection[2].w;
//float m32 = g_mInvScrProjection[3].z, m33 = g_mInvScrProjection[3].w;
float m22 = g_mInvScrProjectionArr[unity_StereoEyeIndex][2].z, m23 = g_mInvScrProjectionArr[unity_StereoEyeIndex][2].w;
float m32 = g_mInvScrProjectionArr[unity_StereoEyeIndex][3].z, m33 = g_mInvScrProjectionArr[unity_StereoEyeIndex][3].w;
return (m22*zDptBufSpace+m23) / (m32*zDptBufSpace+m33);

9
Assets/ScriptableRenderPipeline/fptl/lightlistbuild-bigtile.compute


uniform int g_iNrVisibLights;
uniform uint2 g_viDimensions;
uniform float4x4 g_mInvScrProjection;
uniform float4x4 g_mScrProjection;
uniform float4x4 g_mScrProjection; // uh, I hope this doesn't conflict with LightingUtils.hlsl
uniform int g_iEye;
StructuredBuffer<float3> g_vBoundsBuffer : register( t1 );
StructuredBuffer<SFiniteLightData> g_vLightData : register( t2 );

GroupMemoryBarrierWithGroupSync();
iNrCoarseLights = lightOffs;
int offs = tileIDX.y*nrBigTilesX + tileIDX.x;
// VR - adjust this to offset into the correct portion of the light list
// right eye will get second half
// might not actually need to do this due to auto-sync between dispatches
//int offs = tileIDX.y*nrBigTilesX + tileIDX.x;
int offs = tileIDX.y*nrBigTilesX + tileIDX.x + (g_iEye * nrBigTilesX * nrBigTilesY);
for(i=t; i<(iNrCoarseLights+1); i+=NR_THREADS)
g_vLightList[MAX_NR_BIGTILE_LIGHTS_PLUSONE*offs + i] = i==0 ? iNrCoarseLights : lightsListLDS[i-1];

557
Assets/ScriptableRenderPipeline/fptl/FptlLighting.cs


using System;
using System.Collections.Generic;
using UnityEngine.VR;
namespace UnityEngine.Experimental.Rendering.Fptl
{
public class FptlLightingInstance : RenderPipeline

private static ComputeBuffer s_ConvexBoundsBuffer;
private static ComputeBuffer s_AABBBoundsBuffer;
private static ComputeBuffer s_LightList;
private static ComputeBuffer s_DirLightList;
private static ComputeBuffer s_DirLightList;
//private static int s_UnifiedDirLightListEyeOffset; // VR
private static ComputeBuffer s_UnifiedLightDataBuffer; // VR
private static int s_UnifiedLightDataEyeOffset; // VR
private static ComputeBuffer s_UnifiedDirLightList; // VR
private static float[] s_UnifiedDirLightListBaseCount; //VR
private static ComputeBuffer s_BigTileLightList; // used for pre-pass coarse culling on 64x64 tiles
private static int s_GenListPerBigTileKernel;

public bool enableReflectionProbeDebug = false;
public bool enableComputeLightEvaluation = false;
const bool k_UseDepthBuffer = true;// // only has an impact when EnableClustered is true (requires a depth-prepass)
const bool k_UseAsyncCompute = true; // should not use on mobile
//const bool k_UseAsyncCompute = true; // should not use on mobile
const bool k_UseAsyncCompute = false; // Easier for ordering for Stereo prototype
const int k_Log2NumClusters = 6; // accepted range is from 0 to 6. NumClusters is 1<<g_iLog2NumClusters
const int k_Log2NumClusters = 6; // accepted range is from 0 to 6. NumClusters is 1<<g_iLog2NumClusters
const float k_ClustLogBase = 1.02f; // each slice 2% bigger than the previous
float m_ClustScale;
private static ComputeBuffer s_PerVoxelLightLists;

private static int s_WidthOnRecord;
private static int s_HeightOnRecord;
private static bool s_stereoDoublewideOnRecord = false;
Matrix4x4[] m_MatWorldToShadow = new Matrix4x4[k_MaxLights * k_MaxShadowmapPerLights];
Vector4[] m_DirShadowSplitSpheres = new Vector4[k_MaxDirectionalSplit];

private Texture2D m_LightAttentuationTexture;
private int m_shadowBufferID;
// VR state bits
private bool stereoActive;
private bool stereoSinglePass;
private bool stereoDoublewide;
private RenderTextureDesc cachedStereoDesc;
public void Cleanup()
{
if (m_DeferredMaterial) DestroyImmediate(m_DeferredMaterial);

s_ConvexBoundsBuffer.Release();
s_LightDataBuffer.Release();
ReleaseResolutionDependentBuffers();
s_DirLightList.Release();
if (enableClustered)
s_DirLightList.Release();
s_UnifiedLightDataBuffer.Release();
s_UnifiedDirLightList.Release();
if (enableClustered)
{
if (s_GlobalLightListAtomic != null)
s_GlobalLightListAtomic.Release();

if (s_DirLightList != null)
s_DirLightList.Release();
if (enableClustered)
if (s_UnifiedLightDataBuffer != null)
s_UnifiedLightDataBuffer.Release();
if (s_UnifiedDirLightList != null)
s_UnifiedDirLightList.Release();
if (enableClustered)
{
if (s_GlobalLightListAtomic != null)
s_GlobalLightListAtomic.Release();

s_LightDataBuffer = new ComputeBuffer(MaxNumLights, System.Runtime.InteropServices.Marshal.SizeOf(typeof(SFiniteLightData)));
s_DirLightList = new ComputeBuffer(MaxNumDirLights, System.Runtime.InteropServices.Marshal.SizeOf(typeof(DirectionalLight)));
buildScreenAABBShader.SetBuffer(s_GenAABBKernel, "g_data", s_ConvexBoundsBuffer);
s_UnifiedLightDataBuffer = new ComputeBuffer(MaxNumLights*2, System.Runtime.InteropServices.Marshal.SizeOf(typeof(SFiniteLightData)));
s_UnifiedDirLightList = new ComputeBuffer(MaxNumDirLights*2, System.Runtime.InteropServices.Marshal.SizeOf(typeof(DirectionalLight)));
buildScreenAABBShader.SetBuffer(s_GenAABBKernel, "g_data", s_ConvexBoundsBuffer);
//m_BuildScreenAABBShader.SetBuffer(kGenAABBKernel, "g_vBoundsBuffer", m_aabbBoundsBuffer);
m_DeferredMaterial.SetBuffer("g_vLightData", s_LightDataBuffer);
m_DeferredMaterial.SetBuffer("g_dirLightData", s_DirLightList);

s_BigTileLightList = null;
m_shadowBufferID = Shader.PropertyToID("g_tShadowBuffer");
s_UnifiedDirLightListBaseCount = new float[4];
static void SetupGBuffer(int width, int height, CommandBuffer cmd)
{
//static void SetupGBuffer(int width, int height, CommandBuffer cmd)
static void SetupGBuffer(RenderTextureDesc baseDesc, CommandBuffer cmd)
{
var format10 = RenderTextureFormat.ARGB32;
if (SystemInfo.SupportsRenderTextureFormat(RenderTextureFormat.ARGB2101010))
format10 = RenderTextureFormat.ARGB2101010;

// so we make it think we always render in HDR
cmd.EnableShaderKeyword ("UNITY_HDR_ON");
//@TODO: GetGraphicsCaps().buggyMRTSRGBWriteFlag
cmd.GetTemporaryRT(s_GBufferAlbedo, width, height, 0, FilterMode.Point, RenderTextureFormat.ARGB32, RenderTextureReadWrite.Default);
cmd.GetTemporaryRT(s_GBufferSpecRough, width, height, 0, FilterMode.Point, RenderTextureFormat.ARGB32, RenderTextureReadWrite.Default);
cmd.GetTemporaryRT(s_GBufferNormal, width, height, 0, FilterMode.Point, format10, RenderTextureReadWrite.Linear);
cmd.GetTemporaryRT(s_GBufferEmission, width, height, 0, FilterMode.Point, formatHDR, RenderTextureReadWrite.Linear);
cmd.GetTemporaryRT(s_GBufferZ, width, height, 24, FilterMode.Point, RenderTextureFormat.Depth);
cmd.GetTemporaryRT(s_CameraDepthTexture, width, height, 24, FilterMode.Point, RenderTextureFormat.Depth);
cmd.GetTemporaryRT(s_CameraTarget, width, height, 0, FilterMode.Point, formatHDR, RenderTextureReadWrite.Default, 1, true); // rtv/uav
//@TODO: GetGraphicsCaps().buggyMRTSRGBWriteFlag
//cmd.GetTemporaryRT(s_GBufferAlbedo, width, height, 0, FilterMode.Point, RenderTextureFormat.ARGB32, RenderTextureReadWrite.Default);
//cmd.GetTemporaryRT(s_GBufferSpecRough, width, height, 0, FilterMode.Point, RenderTextureFormat.ARGB32, RenderTextureReadWrite.Default);
//cmd.GetTemporaryRT(s_GBufferNormal, width, height, 0, FilterMode.Point, format10, RenderTextureReadWrite.Linear);
//cmd.GetTemporaryRT(s_GBufferEmission, width, height, 0, FilterMode.Point, formatHDR, RenderTextureReadWrite.Linear);
//cmd.GetTemporaryRT(s_GBufferZ, width, height, 24, FilterMode.Point, RenderTextureFormat.Depth);
//cmd.GetTemporaryRT(s_CameraDepthTexture, width, height, 24, FilterMode.Point, RenderTextureFormat.Depth);
//cmd.GetTemporaryRT(s_CameraTarget, width, height, 0, FilterMode.Point, formatHDR, RenderTextureReadWrite.Default, 1, true); // rtv/uav
// finish this...
RenderTextureDesc modDesc = baseDesc;
// VR - do i need to set the AA value?
modDesc.depthBufferBits = 0;
modDesc.colorFormat = RenderTextureFormat.ARGB32;
modDesc.flags &= ~(RenderTextureCreationFlags.EnableRandomWrite);
modDesc.flags |= RenderTextureCreationFlags.SRGB;
cmd.GetTemporaryRT(s_GBufferAlbedo, modDesc, FilterMode.Point);
cmd.GetTemporaryRT(s_GBufferSpecRough, modDesc, FilterMode.Point);
modDesc.colorFormat = format10;
modDesc.flags &= ~(RenderTextureCreationFlags.SRGB);
cmd.GetTemporaryRT(s_GBufferNormal, modDesc, FilterMode.Point);
modDesc.colorFormat = formatHDR;
cmd.GetTemporaryRT(s_GBufferEmission, modDesc, FilterMode.Point);
modDesc.colorFormat = RenderTextureFormat.Depth;
modDesc.depthBufferBits = 24;
cmd.GetTemporaryRT(s_GBufferZ, modDesc, FilterMode.Point);
cmd.GetTemporaryRT(s_CameraDepthTexture, modDesc, FilterMode.Point);
modDesc.colorFormat = formatHDR;
modDesc.depthBufferBits = 0;
modDesc.flags |= RenderTextureCreationFlags.SRGB;
modDesc.flags |= RenderTextureCreationFlags.EnableRandomWrite;
cmd.GetTemporaryRT(s_CameraTarget, modDesc, FilterMode.Point);
var colorMRTs = new RenderTargetIdentifier[4] { s_GBufferAlbedo, s_GBufferSpecRough, s_GBufferNormal, s_GBufferEmission };
cmd.SetRenderTarget(colorMRTs, new RenderTargetIdentifier(s_GBufferZ));
cmd.ClearRenderTarget(true, true, new Color(0, 0, 0, 0));

static void RenderGBuffer(CullResults cull, Camera camera, ScriptableRenderContext loop)
static void RenderGBuffer(CullResults cull, Camera camera, ScriptableRenderContext loop, bool stereoDw, RenderTextureDesc stereoDesc)
SetupGBuffer(camera.pixelWidth, camera.pixelHeight, cmd);
if (stereoDw)
{
//SetupGBuffer(stereoDesc.width, stereoDesc.height, cmd);
SetupGBuffer(stereoDesc, cmd);
}
else
{
//SetupGBuffer(camera.pixelWidth, camera.pixelHeight, cmd);
RenderTextureDesc baseDesc = new RenderTextureDesc(camera.pixelWidth, camera.pixelHeight);
SetupGBuffer(baseDesc, cmd);
}
loop.ExecuteCommandBuffer(cmd);
cmd.Dispose();

cmd.SetGlobalFloat("g_isOpaquesOnlyEnabled", 0);
}
cmd.name = "DoTiledDeferredLighting";
// VR - There is a problem here with the deferred material overriding the light data list!
// we push the global param, but the material smooshes it!
if (stereoDoublewide)
{
m_DeferredMaterial.SetBuffer("g_vLightData", s_UnifiedLightDataBuffer);
m_DeferredMaterial.SetBuffer("g_dirLightData", s_UnifiedDirLightList);
m_DeferredReflectionMaterial.SetBuffer("g_vLightData", s_UnifiedLightDataBuffer);
}
else
{
m_DeferredMaterial.SetBuffer("g_vLightData", s_LightDataBuffer);
m_DeferredMaterial.SetBuffer("g_dirLightData", s_DirLightList);
m_DeferredReflectionMaterial.SetBuffer("g_vLightData", s_LightDataBuffer);
}
cmd.name = "DoTiledDeferredLighting";
//cmd.SetRenderTarget(new RenderTargetIdentifier(kGBufferEmission), new RenderTargetIdentifier(kGBufferZ));
//cmd.Blit (kGBufferNormal, (RenderTexture)null); // debug: display normals

// VR - none of this will work in VR...
var w = camera.pixelWidth;
var h = camera.pixelHeight;
var numTilesX = (w + 7) / 8;

}
else
{
cmd.Blit(BuiltinRenderTextureType.CameraTarget, s_CameraTarget, m_DeferredMaterial, 0);
cmd.Blit(BuiltinRenderTextureType.CameraTarget, s_CameraTarget, m_DeferredReflectionMaterial, 0);
}
cmd.Blit(BuiltinRenderTextureType.CameraTarget, s_CameraTarget, m_DeferredMaterial, 0);
cmd.Blit(BuiltinRenderTextureType.CameraTarget, s_CameraTarget, m_DeferredReflectionMaterial, 0);
}
// Set the intermediate target for compositing (skybox, etc)

static Matrix4x4 WorldToCamera(Camera camera)
{
return GetFlipMatrix() * camera.worldToCameraMatrix;
}
static Matrix4x4 StereoWorldToCamera(Camera camera, Camera.StereoscopicEye eye)
{
//return GetFlipMatrix() * camera.worldToCameraMatrix;
return GetFlipMatrix() * camera.GetStereoViewMatrix(eye);
}
static Matrix4x4 CameraToWorld(Camera camera)
{
return camera.cameraToWorldMatrix * GetFlipMatrix();
static Matrix4x4 CameraToWorld(Camera camera)
static Matrix4x4 StereoCameraToWorld(Camera camera, Camera.StereoscopicEye eye)
return camera.cameraToWorldMatrix * GetFlipMatrix();
return camera.GetStereoViewMatrix(eye).inverse * GetFlipMatrix();
}
static Matrix4x4 CameraProjection(Camera camera)

static int UpdateDirectionalLights(Camera camera, IList<VisibleLight> visibleLights)
static Matrix4x4 CameraStereoProjection(Camera camera, Camera.StereoscopicEye eye)
{
return (camera.GetStereoProjectionMatrix(eye) * GetFlipMatrix());
}
static int UpdateDirectionalLights(Camera camera, IList<VisibleLight> visibleLights)
{
var dirLightCount = 0;
var lights = new List<DirectionalLight>();

m_Shadow3X3PCFTerms[3] = new Vector4(-flTexelEpsilonX, -flTexelEpsilonY, 0.0f, 0.0f);
}
int GenerateSourceLightBuffers(Camera camera, CullResults inputs)
// VR - since the final deferred pass needs to access the light lists per eye
// We need to output a combined two eye light list to source from
// along with an offset index into the right eye portion of the light list
int GenerateSourceLightBuffers(Camera camera, CullResults inputs, Camera.StereoscopicEye eye)
{
var probes = inputs.visibleReflectionProbes;
//ReflectionProbe[] probes = Object.FindObjectsOfType<ReflectionProbe>();

var lightData = new SFiniteLightData[numVolumes];
var boundData = new SFiniteLightBound[numVolumes];
var worldToView = WorldToCamera(camera);
var boundData = new SFiniteLightBound[numVolumes];
//var worldToView = WorldToCamera(camera);
Matrix4x4 worldToView;
if (stereoDoublewide)
{
worldToView = StereoWorldToCamera(camera, eye);
}
else
{
worldToView = WorldToCamera(camera);
}
bool isNegDeterminant = Vector3.Dot(worldToView.GetColumn(0), Vector3.Cross(worldToView.GetColumn(1), worldToView.GetColumn(2)))<0.0f; // 3x3 Determinant.
uint shadowLightIndex = 0;

cmd.Dispose();
}
void CheckVRState()
{
stereoActive = VRSettings.isDeviceActive;
if (stereoActive)
{
cachedStereoDesc = VRDevice.GetVREyeTextureDesc();
stereoDoublewide = (cachedStereoDesc.dimension == TextureDimension.Tex2D);
}
else
{
stereoDoublewide = false;
}
}
void GetRTWH(Camera camera, out int width, out int height)
{
if (stereoDoublewide)
{
width = cachedStereoDesc.width;
height = cachedStereoDesc.height;
}
else
{
width = camera.pixelWidth;
height = camera.pixelHeight;
}
}
void GetEyeRTWH(Camera camera, out int width, out int height)
{
GetRTWH(camera, out width, out height);
if (stereoDoublewide)
{
width = width / 2;
}
}
var w = camera.pixelWidth;
var h = camera.pixelHeight;
CheckVRState();
ResizeIfNecessary(w, h);
int w, h;
// might be able to use GetEyeRTWH
GetRTWH(camera, out w, out h);
//if (stereoDoublewide)
//{
// w = cachedStereoDesc.width;
// h = cachedStereoDesc.height;
//}
//else
//{
// w = camera.pixelWidth;
// h = camera.pixelHeight;
//}
if (stereoDoublewide)
{
// VR - with double wide, our texture is double eye width
// That is the width we want to use for the light list generation...
ResizeIfNecessary(w/2, h, true);
}
else
{
ResizeIfNecessary(w, h, false);
}
// do anything we need to do upon a new frame.
NewFrame ();

#pragma warning restore 162
// generate g-buffer before shadows to leverage async compute
// forward opaques just write to depth.
loop.SetupCameraProperties(camera);
RenderGBuffer(cullResults, camera, loop);
if (stereoActive)
{
loop.StereoSetupCameraProperties(camera);
loop.StartMultiEye(camera);
}
else
{
loop.SetupCameraProperties(camera);
}
RenderGBuffer(cullResults, camera, loop, stereoDoublewide, cachedStereoDesc);
// camera to screen matrix (and it's inverse)
var proj = CameraProjection(camera);
var temp = new Matrix4x4();
temp.SetRow(0, new Vector4(0.5f * w, 0.0f, 0.0f, 0.5f * w));
temp.SetRow(1, new Vector4(0.0f, 0.5f * h, 0.0f, 0.5f * h));
temp.SetRow(2, new Vector4(0.0f, 0.0f, 0.5f, 0.5f));
temp.SetRow(3, new Vector4(0.0f, 0.0f, 0.0f, 1.0f));
var projscr = temp * proj;
var invProjscr = projscr.inverse;
if (stereoActive)
{
loop.StopMultiEye(camera);
}
var projScrArr = new Matrix4x4[2];
var invProjScrArr = new Matrix4x4[2];
var viewToWorldArr = new Matrix4x4[2];
int numLights = 0, numDirLights = 0;
if (stereoDoublewide)
{
var unifiedLightData = new List<SFiniteLightData>();
var unifiedDirLightData = new List<DirectionalLight>();
for (int eye = 0; eye < 2; eye++)
{
// camera to screen matrix (and it's inverse)
var proj = CameraStereoProjection(camera, (Camera.StereoscopicEye)eye);
int eyeWidth = w / 2;
var temp = new Matrix4x4();
// VR - I don't think I need to offset this because we are just trying to get
// working with an eye texture sized projection here, not the eye texture itself
temp.SetRow(0, new Vector4(0.5f * eyeWidth, 0.0f, 0.0f, 0.5f * eyeWidth));
temp.SetRow(1, new Vector4(0.0f, 0.5f * h, 0.0f, 0.5f * h));
temp.SetRow(2, new Vector4(0.0f, 0.0f, 0.5f, 0.5f));
temp.SetRow(3, new Vector4(0.0f, 0.0f, 0.0f, 1.0f));
var projscr = temp * proj;
var invProjscr = projscr.inverse;
projScrArr[eye] = projscr;
invProjScrArr[eye] = invProjscr;
viewToWorldArr[eye] = StereoCameraToWorld(camera, (Camera.StereoscopicEye)eye);
// build per tile light lists
// VR - we gotta stash out each eye's light list!
numLights = GenerateSourceLightBuffers(camera, cullResults, (Camera.StereoscopicEye)eye);
var generatedLightData = new SFiniteLightData[numLights];
s_LightDataBuffer.GetData(generatedLightData);
unifiedLightData.AddRange(generatedLightData);
if (0 == eye)
{
// this is the offset point in the final combined light data buffer
s_UnifiedLightDataEyeOffset = numLights;
}
BuildPerTileLightLists(camera, loop, numLights, projscr, invProjscr, (Camera.StereoscopicEye)eye);
// VR - same here, we need to double up the directional light lists
numDirLights = UpdateDirectionalLights(camera, cullResults.visibleLights);
var generatedDirLightData = new DirectionalLight[numDirLights];
s_DirLightList.GetData(generatedDirLightData);
unifiedDirLightData.AddRange(generatedDirLightData);
//if (0 == eye)
//{
// s_UnifiedDirLightListEyeOffset = numDirLights;
//}
s_UnifiedDirLightListBaseCount[eye * 2 + 0] = eye * s_UnifiedDirLightListBaseCount[1]; // 0 for eye=0, previous numdir for eye=1
s_UnifiedDirLightListBaseCount[eye * 2 + 1] = numDirLights;
}
// these are not allocated!
s_UnifiedLightDataBuffer.SetData(unifiedLightData.ToArray());
s_UnifiedDirLightList.SetData(unifiedDirLightData.ToArray());
// VR - make this VR aware of course!
// Push all global params
//PushGlobalParams(camera, loop, CameraToWorld(camera), projscr, invProjscr, numDirLights);
PushGlobalParams(camera, loop, viewToWorldArr, projScrArr, invProjScrArr, numDirLights);
// pulled out async shadow maps (for now)
}
else
{
// camera to screen matrix (and it's inverse)
var proj = CameraProjection(camera);
var temp = new Matrix4x4();
temp.SetRow(0, new Vector4(0.5f * w, 0.0f, 0.0f, 0.5f * w));
temp.SetRow(1, new Vector4(0.0f, 0.5f * h, 0.0f, 0.5f * h));
temp.SetRow(2, new Vector4(0.0f, 0.0f, 0.5f, 0.5f));
temp.SetRow(3, new Vector4(0.0f, 0.0f, 0.0f, 1.0f));
var projscr = temp * proj;
var invProjscr = projscr.inverse;
// build per tile light lists
numLights = GenerateSourceLightBuffers(camera, cullResults, Camera.StereoscopicEye.Left);
BuildPerTileLightLists(camera, loop, numLights, projscr, invProjscr, Camera.StereoscopicEye.Left);
// render shadow maps (for mobile shadow map rendering should happen before we render g-buffer).
// on GCN it needs to be after to leverage async compute since we need the depth-buffer for optimal light list building.
if (k_UseAsyncCompute)
{
RenderShadowMaps(cullResults, loop);
loop.SetupCameraProperties(camera);
}
// Push all global params
numDirLights = UpdateDirectionalLights(camera, cullResults.visibleLights);
s_UnifiedDirLightListBaseCount[0] = s_UnifiedDirLightListBaseCount[2] = 0;
s_UnifiedDirLightListBaseCount[1] = s_UnifiedDirLightListBaseCount[3] = numDirLights;
projScrArr[0] = projScrArr[1] = projscr;
invProjScrArr[0] = invProjScrArr[1]= invProjscr;
viewToWorldArr[0] = viewToWorldArr[1] = CameraToWorld(camera);
//PushGlobalParams(camera, loop, CameraToWorld(camera), projscr, invProjscr, numDirLights);
PushGlobalParams(camera, loop, viewToWorldArr, projScrArr, invProjScrArr, numDirLights);
}
if (stereoActive)
{
loop.StartMultiEye(camera);
}
// build per tile light lists
var numLights = GenerateSourceLightBuffers(camera, cullResults);
BuildPerTileLightLists(camera, loop, numLights, projscr, invProjscr);
// render shadow maps (for mobile shadow map rendering should happen before we render g-buffer).
// on GCN it needs to be after to leverage async compute since we need the depth-buffer for optimal light list building.
if(k_UseAsyncCompute)
{
RenderShadowMaps(cullResults, loop);
loop.SetupCameraProperties(camera);
}
// Push all global params
var numDirLights = UpdateDirectionalLights(camera, cullResults.visibleLights);
PushGlobalParams(camera, loop, CameraToWorld(camera), projscr, invProjscr, numDirLights);
// do deferred lighting
DoTiledDeferredLighting(camera, loop, numLights, numDirLights);
// do deferred lighting
DoTiledDeferredLighting(camera, loop, numLights, numDirLights);
// VR - no forward in the test scene
RenderForward(cullResults, camera, loop, true); // opaques only (requires a depth pre-pass)
//RenderForward(cullResults, camera, loop, true); // opaques only (requires a depth pre-pass)
m_SkyboxHelper.Draw(loop, camera);
//m_SkyboxHelper.Draw(loop, camera);
if(enableClustered) RenderForward(cullResults, camera, loop, false);
//if(enableClustered) RenderForward(cullResults, camera, loop, false);
// debug views.
if (enableDrawLightBoundsDebug) DrawLightBoundsDebug(loop, cullResults.visibleLights.Length);

// bind depth surface for editor grid/gizmo/selection rendering
if (camera.cameraType == CameraType.SceneView)
{
var cmd = new CommandBuffer();
cmd.SetRenderTarget(BuiltinRenderTextureType.CameraTarget, new RenderTargetIdentifier(s_CameraDepthTexture));
loop.ExecuteCommandBuffer(cmd);
cmd.Dispose();
}
loop.Submit();
//// bind depth surface for editor grid/gizmo/selection rendering
//if (camera.cameraType == CameraType.SceneView)
//{
// var cmd = new CommandBuffer();
// cmd.SetRenderTarget(BuiltinRenderTextureType.CameraTarget, new RenderTargetIdentifier(s_CameraDepthTexture));
// loop.ExecuteCommandBuffer(cmd);
// cmd.Dispose();
//}
if (stereoActive)
{
loop.StopMultiEye(camera);
loop.StereoEndRender(camera);
}
loop.Submit();
}

UpdateShadowConstants (cullResults.visibleLights, ref shadows);
}
void ResizeIfNecessary(int curWidth, int curHeight)
void ResizeIfNecessary(int curWidth, int curHeight, bool stereoDW)
if (curWidth != s_WidthOnRecord || curHeight != s_HeightOnRecord || s_LightList == null ||
if (curWidth != s_WidthOnRecord || curHeight != s_HeightOnRecord || stereoDW != s_stereoDoublewideOnRecord || s_LightList == null ||
AllocResolutionDependentBuffers(curWidth, curHeight);
if (stereoDW)
{
AllocResolutionDependentBuffers(curWidth, curHeight, 2);
}
else
{
AllocResolutionDependentBuffers(curWidth, curHeight, 1);
}
}
s_stereoDoublewideOnRecord = stereoDW;
}
}
void ReleaseResolutionDependentBuffers()

return 8 * (1 << k_Log2NumClusters); // total footprint for all layers of the tile (measured in light index entries)
}
void AllocResolutionDependentBuffers(int width, int height)
void AllocResolutionDependentBuffers(int width, int height, int stereoMultiplier)
{
var nrTilesX = (width + 15) / 16;
var nrTilesY = (height + 15) / 16;

s_LightList = new ComputeBuffer(LightDefinitions.NR_LIGHT_MODELS * dwordsPerTile * nrTiles, sizeof(uint)); // enough list memory for a 4k x 4k display
//s_LightList = new ComputeBuffer(LightDefinitions.NR_LIGHT_MODELS * dwordsPerTile * nrTiles, sizeof(uint)); // enough list memory for a 4k x 4k display
s_LightList = new ComputeBuffer(LightDefinitions.NR_LIGHT_MODELS * dwordsPerTile * nrTiles * stereoMultiplier, sizeof(uint)); // enough list memory for a 4k x 4k display
if (enableClustered)
if (enableClustered)
// VR TODO - No stereo support yet...
var tileSizeClust = LightDefinitions.TILE_SIZE_CLUSTERED;
var nrTilesClustX = (width + (tileSizeClust-1)) / tileSizeClust;
var nrTilesClustY = (height + (tileSizeClust-1)) / tileSizeClust;

var nrBigTilesX = (width + 63) / 64;
var nrBigTilesY = (height + 63) / 64;
var nrBigTiles = nrBigTilesX * nrBigTilesY;
s_BigTileLightList = new ComputeBuffer(LightDefinitions.MAX_NR_BIGTILE_LIGHTS_PLUSONE * nrBigTiles, sizeof(uint));
}
//s_BigTileLightList = new ComputeBuffer(LightDefinitions.MAX_NR_BIGTILE_LIGHTS_PLUSONE * nrBigTiles, sizeof(uint));
s_BigTileLightList = new ComputeBuffer(LightDefinitions.MAX_NR_BIGTILE_LIGHTS_PLUSONE * nrBigTiles * stereoMultiplier, sizeof(uint));
}
}
void VoxelLightListGeneration(CommandBuffer cmd, Camera camera, int numLights, Matrix4x4 projscr, Matrix4x4 invProjscr)

cmd.DispatchCompute(buildPerVoxelLightListShader, s_GenListPerVoxelKernel, nrTilesClustX, nrTilesClustY, 1);
}
void BuildPerTileLightLists(Camera camera, ScriptableRenderContext loop, int numLights, Matrix4x4 projscr, Matrix4x4 invProjscr)
{
var w = camera.pixelWidth;
var h = camera.pixelHeight;
void BuildPerTileLightLists(Camera camera, ScriptableRenderContext loop, int numLights, Matrix4x4 projscr, Matrix4x4 invProjscr, Camera.StereoscopicEye eye)
{
//var w = camera.pixelWidth;
//var h = camera.pixelHeight;
int w, h;
GetEyeRTWH(camera, out w, out h);
var numTilesX = (w + 15) / 16;
var numTilesY = (h + 15) / 16;
var numBigTilesX = (w + 63) / 64;

// generate screen-space AABBs (used for both fptl and clustered).
if (numLights != 0)
{
var proj = CameraProjection(camera);
{
//var proj = CameraProjection(camera);
Matrix4x4 proj;
if (stereoDoublewide)
{
proj = CameraStereoProjection(camera, eye);
}
else
{
proj = CameraProjection(camera);
}
var temp = new Matrix4x4();
temp.SetRow(0, new Vector4(1.0f, 0.0f, 0.0f, 0.0f));
temp.SetRow(1, new Vector4(0.0f, 1.0f, 0.0f, 0.0f));

cmd.DispatchCompute(buildScreenAABBShader, s_GenAABBKernel, (numLights + 7) / 8, 1, 1);
}
// VR - Ok, for VR, we can do this work just fine, but we need to make sure we offset into the correct spot in the light list!
// enable coarse 2D pass on 64x64 tiles (used for both fptl and clustered).
if(enableBigTilePrepass)
{

SetMatrixCS(cmd, buildPerBigTileLightListShader, "g_mInvScrProjection", invProjscr);
cmd.SetComputeFloatParam(buildPerBigTileLightListShader, "g_fNearPlane", camera.nearClipPlane);
cmd.SetComputeFloatParam(buildPerBigTileLightListShader, "g_fFarPlane", camera.farClipPlane);
cmd.SetComputeBufferParam(buildPerBigTileLightListShader, s_GenListPerBigTileKernel, "g_vLightList", s_BigTileLightList);
cmd.SetComputeIntParam(buildPerBigTileLightListShader, "g_iEye", (int)eye);
cmd.SetComputeBufferParam(buildPerBigTileLightListShader, s_GenListPerBigTileKernel, "g_vLightList", s_BigTileLightList);
// VR - Here, we need to do a couple other things
// We need to make sure we land in the right spot for the light list gen
// and we also need to sample from the depth texture correctly
if( usingFptl ) // optimized for opaques only
{
cmd.SetComputeIntParams(buildPerTileLightListShader, "g_viDimensions", new int[2] { w, h });

cmd.SetComputeTextureParam(buildPerTileLightListShader, s_GenListPerTileKernel, "g_depth_tex", new RenderTargetIdentifier(s_CameraDepthTexture));
cmd.SetComputeIntParam(buildPerTileLightListShader, "g_iEye", (int)eye);
cmd.SetComputeTextureParam(buildPerTileLightListShader, s_GenListPerTileKernel, "g_depth_tex", new RenderTargetIdentifier(s_CameraDepthTexture));
cmd.SetComputeBufferParam(buildPerTileLightListShader, s_GenListPerTileKernel, "g_vLightList", s_LightList);
if(enableBigTilePrepass) cmd.SetComputeBufferParam(buildPerTileLightListShader, s_GenListPerTileKernel, "g_vBigTileLightList", s_BigTileLightList);
cmd.DispatchCompute(buildPerTileLightListShader, s_GenListPerTileKernel, numTilesX, numTilesY, 1);

cmd.Dispose();
}
void PushGlobalParams(Camera camera, ScriptableRenderContext loop, Matrix4x4 viewToWorld, Matrix4x4 scrProj, Matrix4x4 incScrProj, int numDirLights)
//void PushGlobalParams(Camera camera, ScriptableRenderContext loop, Matrix4x4 viewToWorld, Matrix4x4 scrProj, Matrix4x4 incScrProj, int numDirLights)
void PushGlobalParams(Camera camera, ScriptableRenderContext loop, Matrix4x4[] viewToWorld, Matrix4x4[] scrProj, Matrix4x4[] incScrProj, int numDirLights)
// VR - this is fine, but I should make this 'correct'
cmd.SetGlobalFloat("g_heightRT", (float)camera.pixelHeight);
cmd.SetGlobalMatrix("g_mViewToWorld", viewToWorld);
cmd.SetGlobalMatrix("g_mWorldToView", viewToWorld.inverse);
cmd.SetGlobalMatrix("g_mScrProjection", scrProj);
cmd.SetGlobalMatrix("g_mInvScrProjection", incScrProj);
cmd.SetGlobalBuffer("g_vLightData", s_LightDataBuffer);
cmd.SetGlobalFloat("g_heightRT", (float)camera.pixelHeight);
// VR - turn these into VR enabled arrays
//cmd.SetGlobalMatrix("g_mViewToWorld", viewToWorld);
//cmd.SetGlobalMatrix("g_mWorldToView", viewToWorld.inverse);
//cmd.SetGlobalMatrix("g_mScrProjection", scrProj);
//cmd.SetGlobalMatrix("g_mInvScrProjection", incScrProj);
var worldToView = new Matrix4x4[2];
worldToView[0] = viewToWorld[0].inverse;
worldToView[1] = viewToWorld[1].inverse;
cmd.SetGlobalMatrixArray("g_mViewToWorldArr", viewToWorld);
cmd.SetGlobalMatrixArray("g_mWorldToViewArr", worldToView);
cmd.SetGlobalMatrixArray("g_mScrProjectionArr", scrProj);
cmd.SetGlobalMatrixArray("g_mInvScrProjectionArr", incScrProj);
// VR - make sure this list is the unified light data list
//cmd.SetGlobalBuffer("g_vLightData", s_LightDataBuffer);
if (stereoDoublewide)
{
cmd.SetGlobalBuffer("g_vLightData", s_UnifiedLightDataBuffer);
cmd.SetGlobalFloat("g_lightDataEyeOffset", (float)s_UnifiedLightDataEyeOffset);
}
else
{
cmd.SetGlobalBuffer("g_vLightData", s_LightDataBuffer);
cmd.SetGlobalFloat("g_lightDataEyeOffset", (float)0);
}
cmd.SetGlobalTexture("_spotCookieTextures", m_CookieTexArray.GetTexCache());
cmd.SetGlobalTexture("_pointCookieTextures", m_CubeCookieTexArray.GetTexCache());

cmd.SetGlobalFloat("_reflRootHdrDecodeMult", defdecode.x);
cmd.SetGlobalFloat("_reflRootHdrDecodeExp", defdecode.y);
// VR- is this even needed anymore?
if(enableBigTilePrepass)
cmd.SetGlobalBuffer("g_vBigTileLightList", s_BigTileLightList);

}
}
cmd.SetGlobalFloat("g_nNumDirLights", numDirLights);
cmd.SetGlobalBuffer("g_dirLightData", s_DirLightList);
// VR - these needs to be VR adjusted
//cmd.SetGlobalFloat("g_nNumDirLights", numDirLights);
//cmd.SetGlobalBuffer("g_dirLightData", s_DirLightList);
cmd.SetGlobalFloat("g_nNumDirLights", numDirLights); // not needed anymore
if (stereoDoublewide)
{
cmd.SetGlobalBuffer("g_dirLightData", s_UnifiedDirLightList);
}
else
{
cmd.SetGlobalBuffer("g_dirLightData", s_DirLightList);
}
cmd.SetGlobalFloatArray("g_UnifiedDirLightListBaseCount", s_UnifiedDirLightListBaseCount); // new dir light bounds
// Shadow constants
cmd.SetGlobalMatrixArray("g_matWorldToShadow", m_MatWorldToShadow);

7
Assets/ScriptableRenderPipeline/fptl/Internal-DeferredComputeShading.compute


float linDepth = GetLinearDepth(zbufDpth);
float3 vP = GetViewPosFromLinDepth(pixCoord, linDepth);
float3 vPw = mul(g_mViewToWorld, float4(vP, 1)).xyz;
float3 Vworld = normalize(mul((float3x3) g_mViewToWorld, -vP).xyz); //unity_CameraToWorld
//float3 vPw = mul(g_mViewToWorld, float4(vP, 1)).xyz;
//float3 Vworld = normalize(mul((float3x3) g_mViewToWorld, -vP).xyz); //unity_CameraToWorld
// VR - hacks to get this to compile since it is now an array
float3 vPw = mul(g_mViewToWorldArr[0], float4(vP, 1)).xyz;
float3 Vworld = normalize(mul((float3x3) g_mViewToWorldArr[0], -vP).xyz); //unity_CameraToWorld
float4 gbuffer0 = _CameraGBufferTexture0.Load(uint3(pixCoord.xy, 0));
float4 gbuffer1 = _CameraGBufferTexture1.Load(uint3(pixCoord.xy, 0));

15
Assets/ScriptableRenderPipeline/fptl/Internal-DeferredReflections.shader


#pragma multi_compile USE_FPTL_LIGHTLIST USE_CLUSTERED_LIGHTLIST
#pragma multi_compile __ ENABLE_DEBUG
#pragma enable_d3d11_debug_symbols
#include "UnityLightingCommon.cginc"
float3 EvalIndirectSpecular(UnityLight light, UnityIndirect ind);

float zbufDpth = FetchDepth(_CameraDepthTexture, pixCoord.xy).x;
float linDepth = GetLinearDepth(zbufDpth);
float3 vP = GetViewPosFromLinDepth(i.vertex.xy, linDepth);
float3 vPw = mul(g_mViewToWorld, float4(vP, 1)).xyz;
float3 Vworld = normalize(mul((float3x3) g_mViewToWorld, -vP).xyz); //unity_CameraToWorld
float2 eyeScrPos = i.vertex.xy;
eyeScrPos.x -= unity_StereoEyeIndex * g_widthRT;
//float3 vP = GetViewPosFromLinDepth(i.vertex.xy, linDepth);
float3 vP = GetViewPosFromLinDepth(eyeScrPos, linDepth);
//float3 vPw = mul(g_mViewToWorld, float4(vP, 1)).xyz;
//float3 Vworld = normalize(mul((float3x3) g_mViewToWorld, -vP).xyz); //unity_CameraToWorld
float3 vPw = mul(g_mViewToWorldArr[unity_StereoEyeIndex], float4(vP, 1)).xyz;
float3 Vworld = normalize(mul((float3x3) g_mViewToWorldArr[unity_StereoEyeIndex], -vP).xyz); //unity_CameraToWorld
float4 gbuffer0 = _CameraGBufferTexture0.Load( uint3(pixCoord.xy, 0) );
float4 gbuffer1 = _CameraGBufferTexture1.Load( uint3(pixCoord.xy, 0) );

19
Assets/ScriptableRenderPipeline/fptl/Internal-DeferredShading.shader


#pragma multi_compile USE_FPTL_LIGHTLIST USE_CLUSTERED_LIGHTLIST
#pragma multi_compile __ ENABLE_DEBUG
#pragma enable_d3d11_debug_symbols
#include "UnityLightingCommon.cginc"
float3 EvalMaterial(UnityLight light, UnityIndirect ind);

v2f vert (float4 vertex : POSITION, float2 texcoord : TEXCOORD0)
{
v2f o;
o.vertex = UnityObjectToClipPos(vertex);
o.vertex = UnityObjectToClipPos(vertex); // this does the double wide correction
o.texcoord = texcoord.xy;
return o;
}

half4 frag (v2f i) : SV_Target
{
uint2 pixCoord = ((uint2) i.vertex.xy);
uint2 pixCoord = ((uint2) i.vertex.xy); // this is corrected for VR
float3 vP = GetViewPosFromLinDepth(i.vertex.xy, linDepth);
float3 vPw = mul(g_mViewToWorld, float4(vP, 1)).xyz;
float3 Vworld = normalize(mul((float3x3) g_mViewToWorld, -vP).xyz); //unity_CameraToWorld
float2 eyeScrPos = i.vertex.xy;
eyeScrPos.x -= unity_StereoEyeIndex * g_widthRT;
//float3 vP = GetViewPosFromLinDepth(i.vertex.xy, linDepth);
float3 vP = GetViewPosFromLinDepth(eyeScrPos, linDepth);
//float3 vPw = mul(g_mViewToWorld, float4(vP, 1)).xyz;
//float3 Vworld = normalize(mul((float3x3) g_mViewToWorld, -vP).xyz); //unity_CameraToWorld
float3 vPw = mul(g_mViewToWorldArr[unity_StereoEyeIndex], float4(vP, 1)).xyz;
float3 Vworld = normalize(mul((float3x3) g_mViewToWorldArr[unity_StereoEyeIndex], -vP).xyz); //unity_CameraToWorld
float4 gbuffer0 = _CameraGBufferTexture0.Load( uint3(pixCoord.xy, 0) );
float4 gbuffer1 = _CameraGBufferTexture1.Load( uint3(pixCoord.xy, 0) );

uint numLightsProcessed = 0;
float3 c = data.emission + ExecuteLightList(numLightsProcessed, pixCoord, vP, vPw, Vworld);
//c = (1-unity_StereoEyeIndex)*c + (unity_StereoEyeIndex * data.specularColor);
#if ENABLE_DEBUG
c = OverlayHeatMap(pixCoord & 15, numLightsProcessed, c);

20
Assets/ScriptableRenderPipeline/fptl/LightingTemplate.hlsl


#include "UnityStandardBRDF.cginc"
#include "UnityStandardUtils.cginc"
#include "UnityPBSLighting.cginc"
//#include "..\common\ShaderBase.h"
uniform int g_UnifiedDirLightListBaseCount[4];
//---------------------------------------------------------------------------------------------------------------------------------------------------------
// TODO: clean up.. -va
#define MAX_SHADOW_LIGHTS 10

float3 ints = 0;
// VR adjustment for two dir light lists
int dirBase = g_UnifiedDirLightListBaseCount[unity_StereoEyeIndex * 2 + 0];
int dirCount = g_UnifiedDirLightListBaseCount[unity_StereoEyeIndex * 2 + 1];
//for (int i = dirBase; i < dirBase + dirCount; i++)
{
DirectionalLight lightData = g_dirLightData[i];
float atten = 1;

UnityLight light;
light.color.xyz = lightData.color.xyz * atten;
light.dir.xyz = mul((float3x3) g_mViewToWorld, -lightData.lightAxisZ).xyz;
//light.dir.xyz = mul((float3x3) g_mViewToWorld, -lightData.lightAxisZ).xyz;
light.dir.xyz = mul((float3x3) g_mViewToWorldArr[unity_StereoEyeIndex], -lightData.lightAxisZ).xyz;
//return ints;
uint l=0;
// don't need the outer loop since the lights are sorted by volume type
//while(l<numLights)

UnityLight light;
light.color.xyz = lgtDat.color.xyz*atten*angularAtt.xyz;
light.dir.xyz = mul((float3x3) g_mViewToWorld, vL).xyz; //unity_CameraToWorld
//light.dir.xyz = mul((float3x3) g_mViewToWorld, vL).xyz; //unity_CameraToWorld
light.dir.xyz = mul((float3x3) g_mViewToWorldArr[unity_StereoEyeIndex], vL).xyz; //unity_CameraToWorld
ints += EvalMaterial(light, ind);

float3 toLight = vLp - vP;
float dist = length(toLight);
float3 vL = toLight / dist;
float3 vLw = mul((float3x3) g_mViewToWorld, vL).xyz; //unity_CameraToWorld
//float3 vLw = mul((float3x3) g_mViewToWorld, vL).xyz; //unity_CameraToWorld
float3 vLw = mul((float3x3) g_mViewToWorldArr[unity_StereoEyeIndex], vL).xyz; //unity_CameraToWorld
float attLookUp = dist*lgtDat.recipRange; attLookUp *= attLookUp;
float atten = tex2Dlod(_LightTextureB0, float4(attLookUp.rr, 0.0, 0.0)).UNITY_ATTEN_CHANNEL;

7
Assets/ScriptableRenderPipeline/fptl/ReflectionTemplate.hlsl


#include "UnityStandardBRDF.cginc"
#include "UnityStandardUtils.cginc"
#include "UnityPBSLighting.cginc"
//#include "..\common\ShaderBase.h"
UNITY_DECLARE_ABSTRACT_CUBE_ARRAY(_reflCubeTextures);

{
float3 worldNormalRefl = reflect(-Vworld, vNw);
float3 vspaceRefl = mul((float3x3) g_mWorldToView, worldNormalRefl).xyz;
//float3 vspaceRefl = mul((float3x3) g_mWorldToView, worldNormalRefl).xyz;
float3 vspaceRefl = mul((float3x3) g_mWorldToViewArr[unity_StereoEyeIndex], worldNormalRefl).xyz;
float percRoughness = SmoothnessToPerceptualRoughness(smoothness);

#else
float3 volumeSpaceRefl = float3( dot(vspaceRefl, lgtDat.lightAxisX), dot(vspaceRefl, lgtDat.lightAxisY), dot(vspaceRefl, lgtDat.lightAxisZ) );
float3 vPR = BoxProjectedCubemapDirection(volumeSpaceRefl, posInReflVolumeSpace, float4(lgtDat.localCubeCapturePoint, 1.0), -boxOuterDistance, boxOuterDistance); // Volume space corrected reflection vector
sampleDir = mul( (float3x3) g_mViewToWorld, vPR.x*lgtDat.lightAxisX + vPR.y*lgtDat.lightAxisY + vPR.z*lgtDat.lightAxisZ );
//sampleDir = mul( (float3x3) g_mViewToWorld, vPR.x*lgtDat.lightAxisX + vPR.y*lgtDat.lightAxisY + vPR.z*lgtDat.lightAxisZ );
sampleDir = mul((float3x3) g_mViewToWorldArr[unity_StereoEyeIndex], vPR.x*lgtDat.lightAxisX + vPR.y*lgtDat.lightAxisY + vPR.z*lgtDat.lightAxisZ);
#endif
}
else

6
Assets/ScriptableRenderPipeline/fptl/TiledLightingTemplate.hlsl


#ifndef __TILEDLIGHTINGTEMPLATE_H__
#define __TILEDLIGHTINGTEMPLATE_H__
#include "UnityCG.cginc"
#include "LightingTemplate.hlsl"
#include "LightingTemplate.hlsl" // change order to get access to unity_StereoEyeIndex
//#include "TiledLightingUtils.hlsl"

27
Assets/ScriptableRenderPipeline/fptl/TiledLightingUtils.hlsl


StructuredBuffer<SFiniteLightData> g_vLightData;
StructuredBuffer<uint> g_vLightListGlobal; // don't support Buffer yet in unity
uniform float g_lightDataEyeOffset;
uint nrTilesX = ((uint) (g_widthRT+(tileSize-1)))/tileSize; uint nrTilesY = ((uint) (g_heightRT+(tileSize-1)))/tileSize;
uint2 tileIDX = pixCoord / tileSize;
const int tileOffs = (tileIDX.y+model*nrTilesY)*nrTilesX+tileIDX.x;
uint nrTilesX = ((uint) (g_widthRT+(tileSize-1)))/tileSize;
uint nrTilesY = ((uint) (g_heightRT+(tileSize-1)))/tileSize;
uint2 modPixCoord = pixCoord;
// g_widthRT should be the 'eye' width, so we need to generate the tile index based
// on the eye texture info, then offset into the appropriate half of the tile list
modPixCoord.x = modPixCoord.x - (unity_StereoEyeIndex * g_widthRT);
//uint2 tileIDX = pixCoord / tileSize;
uint2 tileIDX = modPixCoord / tileSize;
const int tileOffs = (tileIDX.y+model*nrTilesY)*nrTilesX+tileIDX.x;
const int modTileOffs = tileOffs + (unity_StereoEyeIndex * nrTilesY * nrTilesX * NR_LIGHT_MODELS);
uNrLights = g_vLightListGlobal[ 16*tileOffs + 0]&0xffff;
uStart = tileOffs;
// uNrLights = g_vLightListGlobal[ 16*tileOffs + 0]&0xffff;
//uStart = tileOffs;
uNrLights = g_vLightListGlobal[16 * modTileOffs + 0] & 0xffff;
uStart = modTileOffs;
return (g_vLightListGlobal[ 16*tileOffs + (l1>>1)]>>((l1&1)*16))&0xffff;
//return (g_vLightListGlobal[ 16*tileOffs + (l1>>1)]>>((l1&1)*16))&0xffff;
// VR FIX - revert this once we have the light list stuff sorted
return ((g_vLightListGlobal[16 * tileOffs + (l1 >> 1)] >> ((l1 & 1) * 16)) & 0xffff) + (unity_StereoEyeIndex * g_lightDataEyeOffset);
}
#ifdef OPAQUES_ONLY

22
Assets/ScriptableRenderPipeline/fptl/lightlistbuild.compute


uniform uint2 g_viDimensions;
uniform float4x4 g_mInvScrProjection;
uniform float4x4 g_mScrProjection;
uniform int g_iEye;
Texture2D g_depth_tex : register( t0 );

{
int idx = i * NR_THREADS + t;
uint2 uCrd = min( uint2(viTilLL.x+(idx&0xf), viTilLL.y+(idx>>4)), uint2(iWidth-1, iHeight-1) );
// VR - we need to fetch from the correct half of the depth texture
// for a double wide VR texture, the real width is actually double iWidth, so we can just add that
// This doesn't need to be done in a VS/PS if they use UnityObjectToClipPos, as that auto-corrects for stereo
uCrd.x += (uint)(g_iEye * iWidth);
const float fDepth = FetchDepth(g_depth_tex, uCrd);
vLinDepths[i] = GetLinearDepth(fDepth);
if(fDepth<VIEWPORT_SCALE_Z) // if not skydome

// build coarse list using AABB
#ifdef USE_TWO_PASS_TILED_LIGHTING
int NrBigTilesX = (nrTilesX+3)>>2;
const int bigTileIdx = (tileIDX.y>>2)*NrBigTilesX + (tileIDX.x>>2); // map the idx to 64x64 tiles
int NrBigTilesY = (nrTilesY + 3) >> 2;
// since we are fetching a value from the big tile list, and it's double long
// in VR, we need to use eye to index into the correct half
//const int bigTileIdx = (tileIDX.y>>2)*NrBigTilesX + (tileIDX.x>>2); // map the idx to 64x64 tiles
const int bigTileIdx = (tileIDX.y >> 2)*NrBigTilesX + (tileIDX.x >> 2)
+ (g_iEye * NrBigTilesX * NrBigTilesY); // map the idx to 64x64 tiles
int nrBigTileLights = g_vBigTileLightList[MAX_NR_BIGTILE_LIGHTS_PLUSONE*bigTileIdx+0];
for(int l0=(int) t; l0<(int) nrBigTileLights; l0 += NR_THREADS)
{

// write lights to global buffers
int localOffs=0;
int offs = tileIDX.y*nrTilesX + tileIDX.x;
// make sure the base index here corrects in the appropriate eye half
//int offs = tileIDX.y*nrTilesX + tileIDX.x;
int offs = tileIDX.y*nrTilesX + tileIDX.x + (g_iEye * nrTilesX * nrTilesY * NR_LIGHT_MODELS);
// VR TODO: It could be interesting to just generate the list list indices to point
// directly into the shared light data. For eye=1, the entirety of g_vLightData
// will be copied into the second part of the shared list.
// so to generate new indices, we'd simply just add that offset...
for(int category=0; category<NR_LIGHT_MODELS; category++)
{

正在加载...
取消
保存