浏览代码

Merge remote-tracking branch 'refs/remotes/origin/master' into Add-tesselation

/main
sebastienlagarde 8 年前
当前提交
dbf04c5b
共有 18 个文件被更改,包括 272 次插入227 次删除
  1. 2
      Assets/ScriptableRenderLoop/HDRenderPipeline/Material/LayeredLit/LayeredLit.shader
  2. 74
      Assets/ScriptableRenderLoop/HDRenderPipeline/Material/Lit/Lit.hlsl
  3. 2
      Assets/ScriptableRenderLoop/HDRenderPipeline/Material/Lit/Lit.shader
  4. 21
      Assets/ScriptableRenderLoop/HDRenderPipeline/Material/Lit/LitData.hlsl
  5. 4
      Assets/ScriptableRenderLoop/HDRenderPipeline/Material/Lit/Resources/PreIntegratedFGD.shader
  6. 8
      Assets/ScriptableRenderLoop/HDRenderPipeline/Material/MaterialUtilities.hlsl
  7. 9
      Assets/ScriptableRenderLoop/HDRenderPipeline/ShaderPass/ShaderPassForward.hlsl
  8. 4
      Assets/ScriptableRenderLoop/HDRenderPipeline/ShaderPass/ShaderPassGBuffer.hlsl
  9. 6
      Assets/ScriptableRenderLoop/HDRenderPipeline/ShaderPass/ShaderPassLightTransport.hlsl
  10. 11
      Assets/ScriptableRenderLoop/HDRenderPipeline/ShaderVariables.hlsl
  11. 9
      Assets/ScriptableRenderLoop/ShaderLibrary/AreaLighting.hlsl
  12. 16
      Assets/ScriptableRenderLoop/ShaderLibrary/BSDF.hlsl
  13. 23
      Assets/ScriptableRenderLoop/ShaderLibrary/Common.hlsl
  14. 24
      Assets/ScriptableRenderLoop/ShaderLibrary/CommonLighting.hlsl
  15. 62
      Assets/ScriptableRenderLoop/ShaderLibrary/EntityLighting.hlsl
  16. 4
      Assets/ScriptableRenderLoop/ShaderLibrary/Fibonacci.hlsl
  17. 203
      Assets/ScriptableRenderLoop/ShaderLibrary/ImageBasedLighting.hlsl
  18. 17
      Assets/ScriptableRenderLoop/ShaderLibrary/Packing.hlsl

2
Assets/ScriptableRenderLoop/HDRenderPipeline/Material/LayeredLit/LayeredLit.shader


}
}
CustomEditor "Experimental.ScriptableRenderLoop.LayeredLitGUI"
CustomEditor "Experimental.Rendering.HDPipeline.LayeredLitGUI"
}

74
Assets/ScriptableRenderLoop/HDRenderPipeline/Material/Lit/Lit.hlsl


bsdfData.diffuseColor = surfaceData.baseColor * (1.0 - surfaceData.metallic);
bsdfData.fresnel0 = lerp(float3(surfaceData.specular, surfaceData.specular, surfaceData.specular), surfaceData.baseColor, surfaceData.metallic);
bsdfData.tangentWS = surfaceData.tangentWS;
bsdfData.bitangentWS = cross(surfaceData.normalWS, surfaceData.tangentWS);
bsdfData.tangentWS = surfaceData.tangentWS;
bsdfData.bitangentWS = cross(surfaceData.normalWS, surfaceData.tangentWS);
ConvertAnisotropyToRoughness(bsdfData.roughness, surfaceData.anisotropy, bsdfData.roughnessT, bsdfData.roughnessB);
bsdfData.anisotropy = surfaceData.anisotropy;

bsdfData.fresnel0 = lerp(float3(specular, specular, specular), baseColor, metallic);
bsdfData.tangentWS = UnpackNormalOctEncode(float2(inGBuffer2.rg * 2.0 - 1.0));
// TODO: Do we need to orthonormalize here, IIRC Eric say that we should
bsdfData.bitangentWS = cross(bsdfData.normalWS, bsdfData.tangentWS);
ConvertAnisotropyToRoughness(bsdfData.roughness, anisotropy, bsdfData.roughnessT, bsdfData.roughnessB);
bsdfData.anisotropy = anisotropy;

{
PreLightData preLightData;
// TODO: check Eric idea about doing that when writting into the GBuffer (with our forward decal)
preLightData.NdotV = GetShiftedNdotV(bsdfData.normalWS, V, false);
// We do not saturate to correctly handle double-sided lighting.
preLightData.NdotV = dot(bsdfData.normalWS, V);
preLightData.ggxLambdaV = GetSmithJointGGXLambdaV(preLightData.NdotV, bsdfData.roughness);

// NOTE: If we follow the theory we should use the modified normal for the different calculation implying a normal (like NDotV) and use iblNormalWS
// into function like GetSpecularDominantDir(). However modified normal is just a hack. The goal is just to stretch a cubemap, no accuracy here.
// With this in mind and for performance reasons we chose to only use modified normal to calculate R.
// iblNdotV = GetShiftedNdotV(iblNormalWS, V), false);
}
GetPreIntegratedFGD(iblNdotV, bsdfData.perceptualRoughness, bsdfData.fresnel0, preLightData.specularFGD, preLightData.diffuseFGD);

// Area light specific
// UVs for sampling the LUTs
float theta = FastACos(dot(bsdfData.normalWS, V));
float theta = FastACos(preLightData.NdotV);
// Scale and bias for the current precomputed table - the constant use here are the one that have been use when the table in LtcData.DisneyDiffuse.cs and LtcData.GGX.cs was use
float2 uv = 0.0078125 + 0.984375 * float2(bsdfData.perceptualRoughness, theta * INV_HALF_PI);

float BdotH = dot(bsdfData.bitangentWS, H);
float BdotL = dot(bsdfData.bitangentWS, L);
bsdfData.roughnessT = ClampRoughnessForAnalyticalLights(bsdfData.roughnessT);
bsdfData.roughnessB = ClampRoughnessForAnalyticalLights(bsdfData.roughnessB);
#ifdef LIT_USE_BSDF_PRE_LAMBDAV
Vis = V_SmithJointGGXAnisoLambdaV( preLightData.TdotV, preLightData.BdotV, preLightData.NdotV, TdotL, BdotL, NdotL,
bsdfData.roughnessT, bsdfData.roughnessB, preLightData.anisoGGXLambdaV);

}
else
{
bsdfData.roughness = ClampRoughnessForAnalyticalLights(bsdfData.roughness);
#ifdef LIT_USE_BSDF_PRE_LAMBDAV
Vis = V_SmithJointGGX(NdotL, preLightData.NdotV, bsdfData.roughness, preLightData.ggxLambdaV);
#else

P1 -= positionWS;
P2 -= positionWS;
// Construct an orthonormal basis (local coordinate system) around N.
// TODO: it could be stored in PreLightData. All LTC lights compute it more than once!
// Also consider using 'bsdfData.tangentWS', 'bsdfData.bitangentWS', 'bsdfData.normalWS'.
// Construct a view-dependent orthonormal basis around N.
// TODO: it could be stored in PreLightData, since all LTC lights compute it more than once.
float3x3 basis;
basis[0] = normalize(V - bsdfData.normalWS * preLightData.NdotV);
basis[1] = normalize(cross(bsdfData.normalWS, basis[0]));

// ----------------------------------------------------------------------------
// Ref: Moving Frostbite to PBR (Appendix A)
float3 IntegrateLambertIBLRef( LightLoopContext lightLoopContext,
EnvLightData lightData, BSDFData bsdfData,
uint sampleCount = 2048)
float3 IntegrateLambertIBLRef(LightLoopContext lightLoopContext,
float3 V, EnvLightData lightData, BSDFData bsdfData,
uint sampleCount = 4096)
float3 N = bsdfData.normalWS;
float3 tangentX = bsdfData.tangentWS;
float3x3 localToWorld = GetLocalFrame(N, tangentX);
float3x3 localToWorld = float3x3(bsdfData.tangentWS, bsdfData.bitangentWS, bsdfData.normalWS);
float2 randNum = InitRandom(N.xy * 0.5 + 0.5);
float2 randNum = InitRandom(V.xy * 0.5 + 0.5);
for (uint i = 0; i < sampleCount; ++i)
{

}
float3 IntegrateDisneyDiffuseIBLRef(LightLoopContext lightLoopContext,
float3 V, EnvLightData lightData, BSDFData bsdfData,
uint sampleCount = 2048)
float3 V, PreLightData preLightData, EnvLightData lightData, BSDFData bsdfData,
uint sampleCount = 4096)
float3 N = bsdfData.normalWS;
float3 tangentX = bsdfData.tangentWS;
float3x3 localToWorld = GetLocalFrame(N, tangentX);
float NdotV = GetShiftedNdotV(N, V, false);
float3x3 localToWorld = float3x3(bsdfData.tangentWS, bsdfData.bitangentWS, bsdfData.normalWS);
float2 randNum = InitRandom(N.xy * 0.5 + 0.5);
float2 randNum = InitRandom(V.xy * 0.5 + 0.5);
for (uint i = 0; i < sampleCount; ++i)
{

float LdotH = dot(L, H);
// Note: we call DisneyDiffuse that require to multiply by Albedo / PI. Divide by PI is already taken into account
// in weightOverPdf of ImportanceSampleLambert call.
float disneyDiffuse = DisneyDiffuse(NdotV, NdotL, LdotH, bsdfData.perceptualRoughness);
float disneyDiffuse = DisneyDiffuse(preLightData.NdotV, NdotL, LdotH, bsdfData.perceptualRoughness);
// diffuse Albedo is apply here as describe in ImportanceSampleLambert function
float4 val = SampleEnv(lightLoopContext, lightData.envIndex, L, 0);

}
// Ref: Moving Frostbite to PBR (Appendix A)
float3 IntegrateSpecularGGXIBLRef( LightLoopContext lightLoopContext,
float3 V, EnvLightData lightData, BSDFData bsdfData,
uint sampleCount = 2048)
float3 IntegrateSpecularGGXIBLRef(LightLoopContext lightLoopContext,
float3 V, PreLightData preLightData, EnvLightData lightData, BSDFData bsdfData,
uint sampleCount = 4096)
float3 N = bsdfData.normalWS;
float3 tangentX = bsdfData.tangentWS;
float3 tangentY = bsdfData.bitangentWS;
float3x3 localToWorld = GetLocalFrame(N, tangentX);
float NdotV = GetShiftedNdotV(N, V, false);
float3x3 localToWorld = float3x3(bsdfData.tangentWS, bsdfData.bitangentWS, bsdfData.normalWS);
// Add some jittering on Hammersley2d
float2 randNum = InitRandom(V.xy * 0.5 + 0.5);

// GGX BRDF
if (bsdfData.materialId == MATERIALID_LIT_ANISO)
{
ImportanceSampleAnisoGGX(u, V, N, tangentX, tangentY, bsdfData.roughnessT, bsdfData.roughnessB, NdotV, L, VdotH, NdotL, weightOverPdf);
ImportanceSampleAnisoGGX(u, V, localToWorld, bsdfData.roughnessT, bsdfData.roughnessB, preLightData.NdotV, L, VdotH, NdotL, weightOverPdf);
ImportanceSampleGGX(u, V, localToWorld, bsdfData.roughness, NdotV, L, VdotH, NdotL, weightOverPdf);
ImportanceSampleGGX(u, V, localToWorld, bsdfData.roughness, preLightData.NdotV, L, VdotH, NdotL, weightOverPdf);
}

#ifdef LIT_DISPLAY_REFERENCE_IBL
specularLighting = IntegrateSpecularGGXIBLRef(lightLoopContext, V, lightData, bsdfData);
specularLighting = IntegrateSpecularGGXIBLRef(lightLoopContext, V, preLightData, lightData, bsdfData);
diffuseLighting = IntegrateLambertIBLRef(lightData, bsdfData);
diffuseLighting = IntegrateLambertIBLRef(lightData, V, bsdfData);
diffuseLighting = IntegrateDisneyDiffuseIBLRef(lightLoopContext, V, lightData, bsdfData);
diffuseLighting = IntegrateDisneyDiffuseIBLRef(lightLoopContext, V, preLightData, lightData, bsdfData);
#endif
*/
diffuseLighting = float3(0.0, 0.0, 0.0);

specularLighting *= bsdfData.specularOcclusion;
diffuseLighting = float3(0.0, 0.0, 0.0);
#endif
#endif

2
Assets/ScriptableRenderLoop/HDRenderPipeline/Material/Lit/Lit.shader


}
}
CustomEditor "Experimental.ScriptableRenderLoop.LitGUI"
CustomEditor "Experimental.Rendering.HDPipeline.LitGUI"
}

21
Assets/ScriptableRenderLoop/HDRenderPipeline/Material/Lit/LitData.hlsl


float3 normalTS;
float alpha = GetSurfaceData(input, layerTexCoord, surfaceData, normalTS);
surfaceData.normalWS = TransformTangentToWorld(normalTS, input.tangentToWorld);
surfaceData.tangentWS = input.tangentToWorld[0].xyz;
bool twoSided = false;
// This will always produce the correct 'NdotV' value, but potentially
// reduce the length of the normal at edges of geometry.
GetShiftedNdotV(surfaceData.normalWS, V, twoSided);
// Orthonormalize the basis vectors using the Gram-Schmidt process.
// We assume that the length of the surface normal is sufficiently close to 1.
surfaceData.tangentWS = normalize(surfaceData.tangentWS - dot(surfaceData.tangentWS, surfaceData.normalWS));
// Caution: surfaceData must be fully initialize before calling GetBuiltinData
GetBuiltinData(input, surfaceData, alpha, depthOffset, builtinData);
}

normalTS = BlendLayeredFloat3(normalTS0, normalTS1, normalTS2, normalTS3, weights);
#endif
surfaceData.normalWS = TransformTangentToWorld(normalTS, input.tangentToWorld);
surfaceData.tangentWS = input.tangentToWorld[0].xyz;
bool twoSided = false;
// This will potentially reduce the length of the normal at edges of geometry.
GetShiftedNdotV(surfaceData.normalWS, V, twoSided);
// Orthonormalize the basis vectors using the Gram-Schmidt process.
// We assume that the length of the surface normal is sufficiently close to 1.
surfaceData.tangentWS = normalize(surfaceData.tangentWS - dot(surfaceData.tangentWS, surfaceData.normalWS));
surfaceData.tangentWS = input.tangentToWorld[0].xyz;
surfaceData.anisotropy = 0;
surfaceData.specular = 0.04;
surfaceData.subSurfaceRadius = 1.0;

4
Assets/ScriptableRenderLoop/HDRenderPipeline/Material/Lit/Resources/PreIntegratedFGD.shader


float3 V = float3(sqrt(1 - NdotV * NdotV), 0, NdotV);
float3 N = float3(0.0, 0.0, 1.0);
const int numSamples = 2048;
float4 preFGD = IntegrateGGXAndDisneyFGD(V, N, PerceptualRoughnessToRoughness(perceptualRoughness), numSamples);
float4 preFGD = IntegrateGGXAndDisneyFGD(V, N, PerceptualRoughnessToRoughness(perceptualRoughness));
return float4(preFGD.xyz, 1.0);
}

8
Assets/ScriptableRenderLoop/HDRenderPipeline/Material/MaterialUtilities.hlsl


#ifdef DIRLIGHTMAP_COMBINED
bakeDiffuseLighting += SampleDirectionalLightmap(TEXTURE2D_PARAM(unity_Lightmap, samplerunity_Lightmap),
TEXTURE2D_PARAM(unity_LightmapInd, samplerunity_Lightmap),
uvStaticLightmap, unity_LightmapST, normalWS);
uvStaticLightmap, unity_LightmapST, normalWS, true);
bakeDiffuseLighting += SampleSingleLightmap(TEXTURE2D_PARAM(unity_Lightmap, samplerunity_Lightmap), uvStaticLightmap, unity_LightmapST);
bakeDiffuseLighting += SampleSingleLightmap(TEXTURE2D_PARAM(unity_Lightmap, samplerunity_Lightmap), uvStaticLightmap, unity_LightmapST, true);
#endif
#endif

TEXTURE2D_PARAM(unity_DynamicDirectionality, samplerunity_DynamicLightmap),
uvDynamicLightmap, unity_DynamicLightmapST, normalWS);
uvDynamicLightmap, unity_DynamicLightmapST, normalWS, false);
bakeDiffuseLighting += SampleSingleLightmap(TEXTURE2D_PARAM(unity_DynamicLightmap, samplerunity_DynamicLightmap), uvDynamicLightmap, unity_DynamicLightmapST);
bakeDiffuseLighting += SampleSingleLightmap(TEXTURE2D_PARAM(unity_DynamicLightmap, samplerunity_DynamicLightmap), uvDynamicLightmap, unity_DynamicLightmapST, false);
#endif
#endif

9
Assets/ScriptableRenderLoop/HDRenderPipeline/ShaderPass/ShaderPassForward.hlsl


UpdatePositionInput(input.unPositionSS.z, input.unPositionSS.w, input.positionWS, posInput);
float3 V = GetWorldSpaceNormalizeViewDir(input.positionWS);
SurfaceData surfaceData;
BuiltinData builtinData;
GetSurfaceAndBuiltinData(input, V, posInput, surfaceData, builtinData);
SurfaceData surfaceData;
BuiltinData builtinData;
GetSurfaceAndBuiltinData(input, V, posInput, surfaceData, builtinData);
BSDFData bsdfData = ConvertSurfaceDataToBSDFData(surfaceData);
BSDFData bsdfData = ConvertSurfaceDataToBSDFData(surfaceData);
PreLightData preLightData = GetPreLightData(V, posInput, bsdfData);
float3 diffuseLighting;

4
Assets/ScriptableRenderLoop/HDRenderPipeline/ShaderPass/ShaderPassGBuffer.hlsl


BuiltinData builtinData;
GetSurfaceAndBuiltinData(input, V, posInput, surfaceData, builtinData);
BSDFData bsdfData = ConvertSurfaceDataToBSDFData(surfaceData);
BSDFData bsdfData = ConvertSurfaceDataToBSDFData(surfaceData);
float3 bakeDiffuseLighting = GetBakedDiffuseLigthing(surfaceData, builtinData, bsdfData, preLightData);
ENCODE_INTO_GBUFFER(surfaceData, bakeDiffuseLighting, outGBuffer);

6
Assets/ScriptableRenderLoop/HDRenderPipeline/ShaderPass/ShaderPassLightTransport.hlsl


#include "Color.hlsl"
// TODO: This is the max value allowed for emissive (bad name - but keep for now to retrieve it) (It is 8^2.2 (gamma) and 8 is the limit of punctual light slider...), comme from UnityCg.cginc. Fix it!
// Ask Jesper if this can be change for HDRenderPipeline
#define EMISSIVE_RGBM_SCALE 97.0
float4 Frag(PackedVaryings packedInput) : SV_Target
{
FragInputs input = UnpackVaryings(packedInput);

{
// TODO: THIS LIMIT MUST BE REMOVE, IT IS NOT HDR, change when RGB9e5 is here.
// Do we assume here that emission is [0..1] ?
res = PackRGBM(lightTransportData.emissiveColor, EMISSIVE_RGBM_SCALE);
res = PackEmissiveRGBM(lightTransportData.emissiveColor);
}
return res;

11
Assets/ScriptableRenderLoop/HDRenderPipeline/ShaderVariables.hlsl


// Computes world space view direction, from object space position
float3 GetWorldSpaceNormalizeViewDir(float3 positionWS)
{
return normalize(_WorldSpaceCameraPos.xyz - positionWS);
float3 V = _WorldSpaceCameraPos.xyz - positionWS;
// Uncomment this once the compiler bug is fixed.
// if (unity_OrthoParams.w == 1.0)
// {
// float4x4 M = GetWorldToViewMatrix();
// V = M[1].xyz;
// }
return normalize(V);
}
float3 TransformTangentToWorld(float3 dirTS, float3 tangentToWorld[3])

9
Assets/ScriptableRenderLoop/ShaderLibrary/AreaLighting.hlsl


{
// 1. ClipQuadToHorizon
// detect clipping config
// detect clipping config
uint config = 0;
if (L[0].z > 0) config += 1;
if (L[1].z > 0) config += 2;

// For polygonal lights.
float LTCEvaluate(float4x3 L, float3 V, float3 N, float NdotV, bool twoSided, float3x3 invM)
{
// Construct local orthonormal basis around N, aligned with N
// TODO: it could be stored in PreLightData. All LTC lights compute it more than once!
// Also consider using 'bsdfData.tangentWS', 'bsdfData.bitangentWS', 'bsdfData.normalWS'.
// Construct a view-dependent orthonormal basis around N.
// TODO: it could be stored in PreLightData, since all LTC lights compute it more than once.
float3x3 basis;
basis[0] = normalize(V - N * NdotV);
basis[1] = normalize(cross(N, basis[0]));

// 'normal' is the direction orthogonal to the tangent. It is the shortest vector between
// the shaded point and the line, pointing away from the shaded point.
float LineIrradiance(float l1, float l2, float3 normal, float3 tangent)
{
{
float d = length(normal);
float l1rcpD = l1 * rcp(d);
float l2rcpD = l2 * rcp(d);

16
Assets/ScriptableRenderLoop/ShaderLibrary/BSDF.hlsl


// With analytical light (not image based light) we clamp the minimun roughness in the NDF to avoid numerical instability.
#define UNITY_MIN_ROUGHNESS 0.002
float ClampRoughnessForAnalyticalLights(float roughness)
{
return max(roughness, UNITY_MIN_ROUGHNESS);
}
roughness = max(roughness, UNITY_MIN_ROUGHNESS);
float a2 = roughness * roughness;
float f = (NdotH * a2 - NdotH) * NdotH + 1.0;
return a2 / (f * f);

float D_GGX_Inverse(float NdotH, float roughness)
{
roughness = max(roughness, UNITY_MIN_ROUGHNESS);
float a2 = roughness * roughness;
float f = (NdotH * a2 - NdotH) * NdotH + 1.0;
float g = (f * f) / a2;

// Ref: Understanding the Masking-Shadowing Function in Microfacet-Based BRDFs, p. 19, 29.
float G_MaskingSmithGGX(float NdotV, float VdotH, float roughness)
{
roughness = max(roughness, UNITY_MIN_ROUGHNESS);
// G1(V, H) = HeavisideStep(VdotH) / (1 + Λ(V)).
// Λ(V) = -0.5 + 0.5 * sqrt(1 + 1 / a²).
// a = 1 / (roughness * tan(theta)).

// lambda_l = (-1 + sqrt(a2 * (1 - NdotV2) / NdotV2 + 1)) * 0.5f;
// G = 1 / (1 + lambda_v + lambda_l);
float a = roughness;
float a = roughness;
float a2 = a * a;
// Reorder code to be more optimal
float lambdaV = NdotL * sqrt((-NdotV * a2 + NdotV) * NdotV + a2);

// roughnessB -> roughness in bitangent direction
float D_GGXAnisoNoPI(float TdotH, float BdotH, float NdotH, float roughnessT, float roughnessB)
{
roughnessT = max(roughnessT, UNITY_MIN_ROUGHNESS);
roughnessB = max(roughnessB, UNITY_MIN_ROUGHNESS);
float f = TdotH * TdotH / (roughnessT * roughnessT) + BdotH * BdotH / (roughnessB * roughnessB) + NdotH * NdotH;
return 1.0 / (roughnessT * roughnessB * f * f);
}

23
Assets/ScriptableRenderLoop/ShaderLibrary/Common.hlsl


posInput.positionWS += V * depthOffsetVS;
}
//-----------------------------------------------------------------------------
// various helper
//-----------------------------------------------------------------------------
// NdotV should not be negative for visible pixels, but it can happen due to the
// perspective projection and the normal mapping + decals. In that case, the normal
// should be modified to become valid (i.e facing the camera) to avoid weird artifacts.
// Note: certain applications (e.g. SpeedTree) make use of two-sided lighting.
float GetShiftedNdotV(inout float3 N, float3 V, bool twoSided)
{
float NdotV = dot(N, V);
float limit = 1e-4;
if (!twoSided && NdotV < limit)
{
// We do not renormalize the normal because { abs(length(N) - 1.0) < limit }.
N += (-NdotV + limit) * V;
NdotV = limit;
}
return NdotV;
}
#endif // UNITY_COMMON_INCLUDED

24
Assets/ScriptableRenderLoop/ShaderLibrary/CommonLighting.hlsl


}
//-----------------------------------------------------------------------------
// Get local frame
// Helper functions
// Generates an orthonormal basis from two orthogonal unit vectors.
float3x3 GetLocalFrame(float3 localZ, float3 localX)
// NdotV should not be negative for visible pixels, but it can happen due to the
// perspective projection and the normal mapping + decals. In that case, the normal
// should be modified to become valid (i.e facing the camera) to avoid weird artifacts.
// Note: certain applications (e.g. SpeedTree) make use of double-sided lighting.
float GetShiftedNdotV(inout float3 N, float3 V, bool twoSided)
float3 localY = cross(localZ, localX);
float NdotV = dot(N, V);
float limit = rcp(256.0); // Determined mostly by the quality of the G-buffer normal encoding
return float3x3(localX, localY, localZ);
if (!twoSided && NdotV < limit)
{
// We do not renormalize the normal because { abs(length(N) - 1.0) < limit }.
N += (-NdotV + limit) * V;
NdotV = limit;
}
return NdotV;
}
// Generates an orthonormal basis from a unit vector.

float3 localX = normalize(cross(upVector, localZ));
float3 localY = cross(localZ, localX);
return GetLocalFrame(localZ, localX);
return float3x3(localX, localY, localZ);
}
// TODO: test

62
Assets/ScriptableRenderLoop/ShaderLibrary/EntityLighting.hlsl


}
// Following functions are to sample enlighten lightmaps (or lightmaps encoded the same way as our
// enlighten implementation). They assume use of RGB9E5 for illuminance map.
// enlighten implementation). They assume use of RGB9E5 for dynamic illuminance map and RGBM for baked ones.
float3 SampleSingleLightmap(TEXTURE2D_ARGS(lightmapTex, lightmapSampler), float2 uv, float4 transform)
#define LIGHTMAP_RGBM_RANGE 5.0
// TODO: This is the max value allowed for emissive (bad name - but keep for now to retrieve it) (It is 8^2.2 (gamma) and 8 is the limit of punctual light slider...), comme from UnityCg.cginc. Fix it!
// Ask Jesper if this can be change for HDRenderPipeline
#define EMISSIVE_RGBM_SCALE 97.0
// RGBM stuff is temporary. For now baked lightmap are in RGBM and the RGBM range for lightmaps is specific so we can't use the generic method.
// In the end baked lightmaps are going to be BC6H so the code will be the same as dynamic lightmaps.
// Same goes for emissive packed as an input for Enlighten with another hard coded multiplier.
// TODO: This function is used with the LightTransport pass to encode lightmap or emissive
float4 PackEmissiveRGBM(float3 rgb)
{
float kOneOverRGBMMaxRange = 1.0 / EMISSIVE_RGBM_SCALE;
const float kMinMultiplier = 2.0 * 1e-2;
float4 rgbm = float4(rgb * kOneOverRGBMMaxRange, 1.0);
rgbm.a = max(max(rgbm.r, rgbm.g), max(rgbm.b, kMinMultiplier));
rgbm.a = ceil(rgbm.a * 255.0) / 255.0;
// Division-by-zero warning from d3d9, so make compiler happy.
rgbm.a = max(rgbm.a, kMinMultiplier);
rgbm.rgb /= rgbm.a;
return rgbm;
}
float3 UnpackLightmapRGBM(float4 rgbmInput)
{
return rgbmInput.rgb * rgbmInput.a * LIGHTMAP_RGBM_RANGE;
}
float3 SampleSingleLightmap(TEXTURE2D_ARGS(lightmapTex, lightmapSampler), float2 uv, float4 transform, bool lightmapRGBM)
// Remark: Lightmap is RGB9E5
float3 illuminance = float3(0.0, 0.0, 0.0);
// Remark: baked lightmap is RGBM for now, dynamic lightmap is RGB9E5
if (lightmapRGBM)
{
illuminance = UnpackLightmapRGBM(SAMPLE_TEXTURE2D(lightmapTex, lightmapSampler, uv).rgba);
}
else
{
illuminance = SAMPLE_TEXTURE2D(lightmapTex, lightmapSampler, uv).rgb;
}
float3 SampleDirectionalLightmap(TEXTURE2D_ARGS(lightmapTex, lightmapSampler), TEXTURE2D_ARGS(lightmapDirTex, lightmapDirSampler), float2 uv, float4 transform, float3 normalWS)
float3 SampleDirectionalLightmap(TEXTURE2D_ARGS(lightmapTex, lightmapSampler), TEXTURE2D_ARGS(lightmapDirTex, lightmapDirSampler), float2 uv, float4 transform, float3 normalWS, bool lightmapRGBM)
{
// In directional mode Enlighten bakes dominant light direction
// in a way, that using it for half Lambert and then dividing by a "rebalancing coefficient"

uv = uv * transform.xy + transform.zw;
float4 direction = SAMPLE_TEXTURE2D(lightmapDirTex, lightmapDirSampler, uv);
// Remark: Lightmap is RGB9E5
float3 illuminance = SAMPLE_TEXTURE2D(lightmapTex, lightmapSampler, uv).rgb;
// Remark: baked lightmap is RGBM for now, dynamic lightmap is RGB9E5
float3 illuminance = float3(0.0, 0.0, 0.0);
if (lightmapRGBM)
{
illuminance = UnpackLightmapRGBM(SAMPLE_TEXTURE2D(lightmapTex, lightmapSampler, uv).rgba);
}
else
{
illuminance = SAMPLE_TEXTURE2D(lightmapTex, lightmapSampler, uv).rgb;
}
#endif // UNITY_ENTITY_LIGHTING_INCLUDED
#endif // UNITY_ENTITY_LIGHTING_INCLUDED

4
Assets/ScriptableRenderLoop/ShaderLibrary/Fibonacci.hlsl


}
static const int k_FibonacciSeq[] = {
0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610
0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181
};
static const float2 k_Fibonacci2dSeq21[] = {

int fibN2 = sampleCount;
// These are all constants, so this loop will be optimized away.
for (int j = 1; j < 16; j++)
for (int j = 1; j < 20; j++)
{
if (k_FibonacciSeq[j] == fibN1)
{

203
Assets/ScriptableRenderLoop/ShaderLibrary/ImageBasedLighting.hlsl


// Util image based lighting
//-----------------------------------------------------------------------------
// Performs a *non-linear* remapping which improves the perceptual roughness distribution
// and adds reflection (contact) hardening. The *approximated* version.
// TODO: Clean a bit this code
// CAUTION: remap from Morten may work only with offline convolution, see impact with runtime convolution!
perceptualRoughness = perceptualRoughness * (1.7 - 0.7 * perceptualRoughness);
// For now disabled
#if 0
float m = PerceptualRoughnessToRoughness(perceptualRoughness); // m is the real roughness parameter
float n = (2.0 / max(FLT_EPSILON, m*m)) - 2.0; // remap to spec power. See eq. 21 in --> https://dl.dropboxusercontent.com/u/55891920/papers/mm_brdf.pdf
return perceptualRoughness * UNITY_SPECCUBE_LOD_STEPS;
}
n /= 4.0; // remap from n_dot_h formulatino to n_dot_r. See section "Pre-convolved Cube Maps vs Path Tracers" --> https://s3.amazonaws.com/docs.knaldtech.com/knald/1.0.0/lys_power_drops.html
// Performs a *non-linear* remapping which improves the perceptual roughness distribution
// and adds reflection (contact) hardening. The *accurate* version.
// TODO: optimize!
float perceptualRoughnessToMipmapLevel(float perceptualRoughness, float NdotR)
{
float m = PerceptualRoughnessToRoughness(perceptualRoughness);
perceptualRoughness = pow(2.0 / (n + 2.0), 0.25); // remap back to square root of real roughness (0.25 include both the sqrt root of the conversion and sqrt for going from roughness to perceptualRoughness)
#else
// MM: came up with a surprisingly close approximation to what the #if 0'ed out code above does.
perceptualRoughness = perceptualRoughness * (1.7 - 0.7 * perceptualRoughness);
#endif
// Remap to spec power. See eq. 21 in --> https://dl.dropboxusercontent.com/u/55891920/papers/mm_brdf.pdf
float n = (2.0 / max(FLT_EPSILON, m * m)) - 2.0;
// Remap from n_dot_h formulation to n_dot_r. See section "Pre-convolved Cube Maps vs Path Tracers" --> https://s3.amazonaws.com/docs.knaldtech.com/knald/1.0.0/lys_power_drops.html
n /= (4.0 * max(NdotR, FLT_EPSILON));
// remap back to square root of real roughness (0.25 include both the sqrt root of the conversion and sqrt for going from roughness to perceptualRoughness)
perceptualRoughness = pow(2.0 / (n + 2.0), 0.25);
// Performs *linear* remapping for runtime EnvMap filtering.
// Ref: See "Moving Frostbite to PBR" Listing 22
// This formulation is for GGX only (with smith joint visibility or regular)
float3 GetSpecularDominantDir(float3 N, float3 R, float roughness)
{
float a = 1.0 - roughness;
float lerpFactor = a * (sqrt(a) + roughness);
// The result is not normalized as we fetch in a cubemap
return lerp(N, R, lerpFactor);
}
//-----------------------------------------------------------------------------
// Anisotropic image based lighting
//-----------------------------------------------------------------------------
// To simulate the streching of highlight at grazing angle for IBL we shrink the roughness
// which allow to fake an anisotropic specular lobe.
// Ref: http://www.frostbite.com/2015/08/stochastic-screen-space-reflections/ - slide 84
float AnisotropicStrechAtGrazingAngle(float roughness, float perceptualRoughness, float NdotV)
{
return roughness * lerp(saturate(NdotV * 2.0), 1.0, perceptualRoughness);
}
float3 SphericalToCartesian(float phi, float sinTheta, float cosTheta)
float3 SphericalToCartesian(float phi, float cosTheta)
float sinTheta = sqrt(saturate(1.0 - cosTheta * cosTheta));
return float3(sinTheta * cosPhi, sinTheta * sinPhi, cosTheta);
}

float3 TransformGLtoDX(float x, float y, float z)
{
return float3(x, z, y);
}
float3 TransformGLtoDX(float3 v)
{
return v.xzy;

float3 ConvertEquiarealToCubemap(float u, float v)
{
// The equiareal mapping is defined as follows:
// phi = TWO_PI * (1.0 - u)
// cos(theta) = 1.0 - 2.0 * v
// sin(theta) = sqrt(1.0 - cos^2(theta)) = 2.0 * sqrt(v - v * v)
float sinTheta = 2.0 * sqrt(v - v * v);
return TransformGLtoDX(SphericalToCartesian(phi, sinTheta, cosTheta));
return TransformGLtoDX(SphericalToCartesian(phi, cosTheta));
// Ref: See "Moving Frostbite to PBR" Listing 22
// This formulation is for GGX only (with smith joint visibility or regular)
float3 GetSpecularDominantDir(float3 N, float3 R, float roughness)
// ----------------------------------------------------------------------------
// Importance sampling BSDF functions
// ----------------------------------------------------------------------------
// Performs uniform sampling of the unit disk.
// Ref: PBRT v3, p. 777.
float2 SampleDiskUniform(float2 u)
float a = 1.0 - roughness;
float lerpFactor = a * (sqrt(a) + roughness);
// The result is not normalized as we fetch in a cubemap
return lerp(N, R, lerpFactor);
}
float r = sqrt(u.x);
float phi = TWO_PI * u.y;
float sinPhi, cosPhi;
sincos(phi, sinPhi, cosPhi);
//-----------------------------------------------------------------------------
// Anisotropic image based lighting
//-----------------------------------------------------------------------------
// To simulate the streching of highlight at grazing angle for IBL we shrink the roughness
// which allow to fake an anisotropic specular lobe.
// Ref: http://www.frostbite.com/2015/08/stochastic-screen-space-reflections/ - slide 84
float AnisotropicStrechAtGrazingAngle(float roughness, float perceptualRoughness, float NdotV)
{
return roughness * lerp(saturate(NdotV * 2.0), 1.0, perceptualRoughness);
return r * float2(cosPhi, sinPhi);
// ----------------------------------------------------------------------------
// Importance sampling BSDF functions
// ----------------------------------------------------------------------------
void ImportanceSampleCosDir(float2 u,
// Performs cosine-weighted sampling of the hemisphere.
// Ref: PBRT v3, p. 780.
void SampleHemisphereCosine(float2 u,
// Cosine sampling - ref: http://www.rorydriscoll.com/2009/01/07/better-sampling/
float cosTheta = sqrt(1.0 - u.x);
float sinTheta = sqrt(u.x);
float phi = TWO_PI * u.y;
float3 localL;
float3 localL = SphericalToCartesian(phi, sinTheta, cosTheta);
// Since we don't really care about the area distortion,
// we substitute uniform disk sampling for the concentric one.
localL.xy = SampleDiskUniform(u);
// Project the point from the disk onto the hemisphere.
localL.z = sqrt(1.0 - u.x);
NdotL = localL.z;

void ImportanceSampleGGXDir(float2 u,
float3 V,
float3x3 localToWorld,
float roughness,
out float3 L,
out float NdotL,
out float NdotH,
out float VdotH,
bool VeqN = false)
void SampleGGXDir(float2 u,
float3 V,
float3x3 localToWorld,
float roughness,
out float3 L,
out float NdotL,
out float NdotH,
out float VdotH,
bool VeqN = false)
float sinTheta = sqrt(1.0 - cosTheta * cosTheta);
float3 localH = SphericalToCartesian(phi, sinTheta, cosTheta);
float3 localH = SphericalToCartesian(phi, cosTheta);
NdotH = cosTheta;

}
// ref: http://blog.selfshadow.com/publications/s2012-shading-course/burley/s2012_pbs_disney_brdf_notes_v3.pdf p26
void ImportanceSampleAnisoGGXDir( float2 u,
float3 V,
float3 N,
float3 tangentX,
float3 tangentY,
float roughnessT,
float roughnessB,
out float3 H,
out float3 L)
void SampleAnisoGGXDir(float2 u,
float3 V,
float3 N,
float3 tangentX,
float3 tangentY,
float roughnessT,
float roughnessB,
out float3 H,
out float3 L)
// Local to world
// H = tangentX * H.x + tangentY * H.y + N * H.z;
// Convert sample from half angle to incident angle
L = 2.0 * saturate(dot(V, H)) * H - V;
}

out float NdotL,
out float weightOverPdf)
{
ImportanceSampleCosDir(u, localToWorld, L, NdotL);
SampleHemisphereCosine(u, localToWorld, L, NdotL);
// Importance sampling weight for each sample
// pdf = N.L / PI

out float weightOverPdf)
{
float NdotH;
ImportanceSampleGGXDir(u, V, localToWorld, roughness, L, NdotL, NdotH, VdotH);
SampleGGXDir(u, V, localToWorld, roughness, L, NdotL, NdotH, VdotH);
// Importance sampling weight for each sample
// pdf = D(H) * (N.H) / (4 * (L.H))

}
// weightOverPdf return the weight (without the Fresnel term) over pdf. Fresnel term must be apply by the caller.
void ImportanceSampleAnisoGGX(
float2 u,
float3 V,
float3 N,
float3 tangentX,
float3 tangentY,
float roughnessT,
float roughnessB,
float NdotV,
out float3 L,
out float VdotH,
out float NdotL,
out float weightOverPdf)
void ImportanceSampleAnisoGGX(float2 u,
float3 V,
float3x3 localToWorld,
float roughnessT,
float roughnessB,
float NdotV,
out float3 L,
out float VdotH,
out float NdotL,
out float weightOverPdf)
float3 tangentX = localToWorld[0];
float3 tangentY = localToWorld[1];
float3 N = localToWorld[2];
ImportanceSampleAnisoGGXDir(u, V, N, tangentX, tangentY, roughnessT, roughnessB, H, L);
SampleAnisoGGXDir(u, V, N, tangentX, tangentY, roughnessT, roughnessB, H, L);
float NdotH = saturate(dot(N, H));
// Note: since L and V are symmetric around H, LdotH == VdotH

// ----------------------------------------------------------------------------
// Ref: Listing 18 in "Moving Frostbite to PBR" + https://knarkowicz.wordpress.com/2014/12/27/analytical-dfg-term-for-ibl/
float4 IntegrateGGXAndDisneyFGD(float3 V, float3 N, float roughness, uint sampleCount)
float4 IntegrateGGXAndDisneyFGD(float3 V, float3 N, float roughness, uint sampleCount = 4096)
{
float NdotV = saturate(dot(N, V));
float4 acc = float4(0.0, 0.0, 0.0, 0.0);

float3 L;
float NdotL, NdotH, VdotH;
ImportanceSampleGGXDir(u, V, localToWorld, roughness, L, NdotL, NdotH, VdotH, true);
SampleGGXDir(u, V, localToWorld, roughness, L, NdotL, NdotH, VdotH, true);
float mipLevel;

// This will blur the reflection.
// TODO: find a more accurate MIP bias function.
mipLevel = lerp(mipLevel, maxMipLevel, bias);
// All MIP map levels beyond UNITY_SPECCUBE_LOD_STEPS contain invalid data.
mipLevel = min(mipLevel, UNITY_SPECCUBE_LOD_STEPS);
}
if (NdotL > 0.0)

17
Assets/ScriptableRenderLoop/ShaderLibrary/Packing.hlsl


return max(vRGB, float3(0.0, 0.0, 0.0));
}
// TODO: This function is used with the LightTransport pass to encode lightmap or emissive
float4 PackRGBM(float3 rgb, float maxRGBM)
{
float kOneOverRGBMMaxRange = 1.0 / maxRGBM;
const float kMinMultiplier = 2.0 * 1e-2;
float4 rgbm = float4(rgb * kOneOverRGBMMaxRange, 1.0);
rgbm.a = max(max(rgbm.r, rgbm.g), max(rgbm.b, kMinMultiplier));
rgbm.a = ceil(rgbm.a * 255.0) / 255.0;
// Division-by-zero warning from d3d9, so make compiler happy.
rgbm.a = max(rgbm.a, kMinMultiplier);
rgbm.rgb /= rgbm.a;
return rgbm;
}
// Alternative...
#define RGBMRANGE (8.0)
float4 PackRGBM(float3 color)

正在加载...
取消
保存