浏览代码

Merge pull request #82 from EvgeniiG/master

Improve consistency
/main
GitHub 8 年前
当前提交
b2c7555b
共有 12 个文件被更改,包括 210 次插入192 次删除
  1. 74
      Assets/ScriptableRenderLoop/HDRenderPipeline/Material/Lit/Lit.hlsl
  2. 21
      Assets/ScriptableRenderLoop/HDRenderPipeline/Material/Lit/LitData.hlsl
  3. 4
      Assets/ScriptableRenderLoop/HDRenderPipeline/Material/Lit/Resources/PreIntegratedFGD.shader
  4. 9
      Assets/ScriptableRenderLoop/HDRenderPipeline/ShaderPass/ShaderPassForward.hlsl
  5. 4
      Assets/ScriptableRenderLoop/HDRenderPipeline/ShaderPass/ShaderPassGBuffer.hlsl
  6. 11
      Assets/ScriptableRenderLoop/HDRenderPipeline/ShaderVariables.hlsl
  7. 9
      Assets/ScriptableRenderLoop/ShaderLibrary/AreaLighting.hlsl
  8. 16
      Assets/ScriptableRenderLoop/ShaderLibrary/BSDF.hlsl
  9. 23
      Assets/ScriptableRenderLoop/ShaderLibrary/Common.hlsl
  10. 24
      Assets/ScriptableRenderLoop/ShaderLibrary/CommonLighting.hlsl
  11. 4
      Assets/ScriptableRenderLoop/ShaderLibrary/Fibonacci.hlsl
  12. 203
      Assets/ScriptableRenderLoop/ShaderLibrary/ImageBasedLighting.hlsl

74
Assets/ScriptableRenderLoop/HDRenderPipeline/Material/Lit/Lit.hlsl


bsdfData.diffuseColor = surfaceData.baseColor * (1.0 - surfaceData.metallic);
bsdfData.fresnel0 = lerp(float3(surfaceData.specular, surfaceData.specular, surfaceData.specular), surfaceData.baseColor, surfaceData.metallic);
bsdfData.tangentWS = surfaceData.tangentWS;
bsdfData.bitangentWS = cross(surfaceData.normalWS, surfaceData.tangentWS);
bsdfData.tangentWS = surfaceData.tangentWS;
bsdfData.bitangentWS = cross(surfaceData.normalWS, surfaceData.tangentWS);
ConvertAnisotropyToRoughness(bsdfData.roughness, surfaceData.anisotropy, bsdfData.roughnessT, bsdfData.roughnessB);
bsdfData.anisotropy = surfaceData.anisotropy;

bsdfData.fresnel0 = lerp(float3(specular, specular, specular), baseColor, metallic);
bsdfData.tangentWS = UnpackNormalOctEncode(float2(inGBuffer2.rg * 2.0 - 1.0));
// TODO: Do we need to orthonormalize here, IIRC Eric say that we should
bsdfData.bitangentWS = cross(bsdfData.normalWS, bsdfData.tangentWS);
ConvertAnisotropyToRoughness(bsdfData.roughness, anisotropy, bsdfData.roughnessT, bsdfData.roughnessB);
bsdfData.anisotropy = anisotropy;

{
PreLightData preLightData;
// TODO: check Eric idea about doing that when writting into the GBuffer (with our forward decal)
preLightData.NdotV = GetShiftedNdotV(bsdfData.normalWS, V, false);
// We do not saturate to correctly handle double-sided lighting.
preLightData.NdotV = dot(bsdfData.normalWS, V);
preLightData.ggxLambdaV = GetSmithJointGGXLambdaV(preLightData.NdotV, bsdfData.roughness);

// NOTE: If we follow the theory we should use the modified normal for the different calculation implying a normal (like NDotV) and use iblNormalWS
// into function like GetSpecularDominantDir(). However modified normal is just a hack. The goal is just to stretch a cubemap, no accuracy here.
// With this in mind and for performance reasons we chose to only use modified normal to calculate R.
// iblNdotV = GetShiftedNdotV(iblNormalWS, V), false);
}
GetPreIntegratedFGD(iblNdotV, bsdfData.perceptualRoughness, bsdfData.fresnel0, preLightData.specularFGD, preLightData.diffuseFGD);

// Area light specific
// UVs for sampling the LUTs
float theta = FastACos(dot(bsdfData.normalWS, V));
float theta = FastACos(preLightData.NdotV);
// Scale and bias for the current precomputed table - the constant use here are the one that have been use when the table in LtcData.DisneyDiffuse.cs and LtcData.GGX.cs was use
float2 uv = 0.0078125 + 0.984375 * float2(bsdfData.perceptualRoughness, theta * INV_HALF_PI);

float BdotH = dot(bsdfData.bitangentWS, H);
float BdotL = dot(bsdfData.bitangentWS, L);
bsdfData.roughnessT = ClampRoughnessForAnalyticalLights(bsdfData.roughnessT);
bsdfData.roughnessB = ClampRoughnessForAnalyticalLights(bsdfData.roughnessB);
#ifdef LIT_USE_BSDF_PRE_LAMBDAV
Vis = V_SmithJointGGXAnisoLambdaV( preLightData.TdotV, preLightData.BdotV, preLightData.NdotV, TdotL, BdotL, NdotL,
bsdfData.roughnessT, bsdfData.roughnessB, preLightData.anisoGGXLambdaV);

}
else
{
bsdfData.roughness = ClampRoughnessForAnalyticalLights(bsdfData.roughness);
#ifdef LIT_USE_BSDF_PRE_LAMBDAV
Vis = V_SmithJointGGX(NdotL, preLightData.NdotV, bsdfData.roughness, preLightData.ggxLambdaV);
#else

P1 -= positionWS;
P2 -= positionWS;
// Construct an orthonormal basis (local coordinate system) around N.
// TODO: it could be stored in PreLightData. All LTC lights compute it more than once!
// Also consider using 'bsdfData.tangentWS', 'bsdfData.bitangentWS', 'bsdfData.normalWS'.
// Construct a view-dependent orthonormal basis around N.
// TODO: it could be stored in PreLightData, since all LTC lights compute it more than once.
float3x3 basis;
basis[0] = normalize(V - bsdfData.normalWS * preLightData.NdotV);
basis[1] = normalize(cross(bsdfData.normalWS, basis[0]));

// ----------------------------------------------------------------------------
// Ref: Moving Frostbite to PBR (Appendix A)
float3 IntegrateLambertIBLRef( LightLoopContext lightLoopContext,
EnvLightData lightData, BSDFData bsdfData,
uint sampleCount = 2048)
float3 IntegrateLambertIBLRef(LightLoopContext lightLoopContext,
float3 V, EnvLightData lightData, BSDFData bsdfData,
uint sampleCount = 4096)
float3 N = bsdfData.normalWS;
float3 tangentX = bsdfData.tangentWS;
float3x3 localToWorld = GetLocalFrame(N, tangentX);
float3x3 localToWorld = float3x3(bsdfData.tangentWS, bsdfData.bitangentWS, bsdfData.normalWS);
float2 randNum = InitRandom(N.xy * 0.5 + 0.5);
float2 randNum = InitRandom(V.xy * 0.5 + 0.5);
for (uint i = 0; i < sampleCount; ++i)
{

}
float3 IntegrateDisneyDiffuseIBLRef(LightLoopContext lightLoopContext,
float3 V, EnvLightData lightData, BSDFData bsdfData,
uint sampleCount = 2048)
float3 V, PreLightData preLightData, EnvLightData lightData, BSDFData bsdfData,
uint sampleCount = 4096)
float3 N = bsdfData.normalWS;
float3 tangentX = bsdfData.tangentWS;
float3x3 localToWorld = GetLocalFrame(N, tangentX);
float NdotV = GetShiftedNdotV(N, V, false);
float3x3 localToWorld = float3x3(bsdfData.tangentWS, bsdfData.bitangentWS, bsdfData.normalWS);
float2 randNum = InitRandom(N.xy * 0.5 + 0.5);
float2 randNum = InitRandom(V.xy * 0.5 + 0.5);
for (uint i = 0; i < sampleCount; ++i)
{

float LdotH = dot(L, H);
// Note: we call DisneyDiffuse that require to multiply by Albedo / PI. Divide by PI is already taken into account
// in weightOverPdf of ImportanceSampleLambert call.
float disneyDiffuse = DisneyDiffuse(NdotV, NdotL, LdotH, bsdfData.perceptualRoughness);
float disneyDiffuse = DisneyDiffuse(preLightData.NdotV, NdotL, LdotH, bsdfData.perceptualRoughness);
// diffuse Albedo is apply here as describe in ImportanceSampleLambert function
float4 val = SampleEnv(lightLoopContext, lightData.envIndex, L, 0);

}
// Ref: Moving Frostbite to PBR (Appendix A)
float3 IntegrateSpecularGGXIBLRef( LightLoopContext lightLoopContext,
float3 V, EnvLightData lightData, BSDFData bsdfData,
uint sampleCount = 2048)
float3 IntegrateSpecularGGXIBLRef(LightLoopContext lightLoopContext,
float3 V, PreLightData preLightData, EnvLightData lightData, BSDFData bsdfData,
uint sampleCount = 4096)
float3 N = bsdfData.normalWS;
float3 tangentX = bsdfData.tangentWS;
float3 tangentY = bsdfData.bitangentWS;
float3x3 localToWorld = GetLocalFrame(N, tangentX);
float NdotV = GetShiftedNdotV(N, V, false);
float3x3 localToWorld = float3x3(bsdfData.tangentWS, bsdfData.bitangentWS, bsdfData.normalWS);
// Add some jittering on Hammersley2d
float2 randNum = InitRandom(V.xy * 0.5 + 0.5);

// GGX BRDF
if (bsdfData.materialId == MATERIALID_LIT_ANISO)
{
ImportanceSampleAnisoGGX(u, V, N, tangentX, tangentY, bsdfData.roughnessT, bsdfData.roughnessB, NdotV, L, VdotH, NdotL, weightOverPdf);
ImportanceSampleAnisoGGX(u, V, localToWorld, bsdfData.roughnessT, bsdfData.roughnessB, preLightData.NdotV, L, VdotH, NdotL, weightOverPdf);
ImportanceSampleGGX(u, V, localToWorld, bsdfData.roughness, NdotV, L, VdotH, NdotL, weightOverPdf);
ImportanceSampleGGX(u, V, localToWorld, bsdfData.roughness, preLightData.NdotV, L, VdotH, NdotL, weightOverPdf);
}

#ifdef LIT_DISPLAY_REFERENCE_IBL
specularLighting = IntegrateSpecularGGXIBLRef(lightLoopContext, V, lightData, bsdfData);
specularLighting = IntegrateSpecularGGXIBLRef(lightLoopContext, V, preLightData, lightData, bsdfData);
diffuseLighting = IntegrateLambertIBLRef(lightData, bsdfData);
diffuseLighting = IntegrateLambertIBLRef(lightData, V, bsdfData);
diffuseLighting = IntegrateDisneyDiffuseIBLRef(lightLoopContext, V, lightData, bsdfData);
diffuseLighting = IntegrateDisneyDiffuseIBLRef(lightLoopContext, V, preLightData, lightData, bsdfData);
#endif
*/
diffuseLighting = float3(0.0, 0.0, 0.0);

specularLighting *= bsdfData.specularOcclusion;
diffuseLighting = float3(0.0, 0.0, 0.0);
#endif
#endif

21
Assets/ScriptableRenderLoop/HDRenderPipeline/Material/Lit/LitData.hlsl


float3 normalTS;
float alpha = GetSurfaceData(input, layerTexCoord, surfaceData, normalTS);
surfaceData.normalWS = TransformTangentToWorld(normalTS, input.tangentToWorld);
surfaceData.tangentWS = input.tangentToWorld[0].xyz;
bool twoSided = false;
// This will always produce the correct 'NdotV' value, but potentially
// reduce the length of the normal at edges of geometry.
GetShiftedNdotV(surfaceData.normalWS, V, twoSided);
// Orthonormalize the basis vectors using the Gram-Schmidt process.
// We assume that the length of the surface normal is sufficiently close to 1.
surfaceData.tangentWS = normalize(surfaceData.tangentWS - dot(surfaceData.tangentWS, surfaceData.normalWS));
// Caution: surfaceData must be fully initialize before calling GetBuiltinData
GetBuiltinData(input, surfaceData, alpha, depthOffset, builtinData);
}

normalTS = BlendLayeredFloat3(normalTS0, normalTS1, normalTS2, normalTS3, weights);
#endif
surfaceData.normalWS = TransformTangentToWorld(normalTS, input.tangentToWorld);
surfaceData.tangentWS = input.tangentToWorld[0].xyz;
bool twoSided = false;
// This will potentially reduce the length of the normal at edges of geometry.
GetShiftedNdotV(surfaceData.normalWS, V, twoSided);
// Orthonormalize the basis vectors using the Gram-Schmidt process.
// We assume that the length of the surface normal is sufficiently close to 1.
surfaceData.tangentWS = normalize(surfaceData.tangentWS - dot(surfaceData.tangentWS, surfaceData.normalWS));
surfaceData.tangentWS = input.tangentToWorld[0].xyz;
surfaceData.anisotropy = 0;
surfaceData.specular = 0.04;
surfaceData.subSurfaceRadius = 1.0;

4
Assets/ScriptableRenderLoop/HDRenderPipeline/Material/Lit/Resources/PreIntegratedFGD.shader


float3 V = float3(sqrt(1 - NdotV * NdotV), 0, NdotV);
float3 N = float3(0.0, 0.0, 1.0);
const int numSamples = 2048;
float4 preFGD = IntegrateGGXAndDisneyFGD(V, N, PerceptualRoughnessToRoughness(perceptualRoughness), numSamples);
float4 preFGD = IntegrateGGXAndDisneyFGD(V, N, PerceptualRoughnessToRoughness(perceptualRoughness));
return float4(preFGD.xyz, 1.0);
}

9
Assets/ScriptableRenderLoop/HDRenderPipeline/ShaderPass/ShaderPassForward.hlsl


UpdatePositionInput(input.unPositionSS.z, input.unPositionSS.w, input.positionWS, posInput);
float3 V = GetWorldSpaceNormalizeViewDir(input.positionWS);
SurfaceData surfaceData;
BuiltinData builtinData;
GetSurfaceAndBuiltinData(input, V, posInput, surfaceData, builtinData);
SurfaceData surfaceData;
BuiltinData builtinData;
GetSurfaceAndBuiltinData(input, V, posInput, surfaceData, builtinData);
BSDFData bsdfData = ConvertSurfaceDataToBSDFData(surfaceData);
BSDFData bsdfData = ConvertSurfaceDataToBSDFData(surfaceData);
PreLightData preLightData = GetPreLightData(V, posInput, bsdfData);
float3 diffuseLighting;

4
Assets/ScriptableRenderLoop/HDRenderPipeline/ShaderPass/ShaderPassGBuffer.hlsl


BuiltinData builtinData;
GetSurfaceAndBuiltinData(input, V, posInput, surfaceData, builtinData);
BSDFData bsdfData = ConvertSurfaceDataToBSDFData(surfaceData);
BSDFData bsdfData = ConvertSurfaceDataToBSDFData(surfaceData);
float3 bakeDiffuseLighting = GetBakedDiffuseLigthing(surfaceData, builtinData, bsdfData, preLightData);
ENCODE_INTO_GBUFFER(surfaceData, bakeDiffuseLighting, outGBuffer);

11
Assets/ScriptableRenderLoop/HDRenderPipeline/ShaderVariables.hlsl


// Computes world space view direction, from object space position
float3 GetWorldSpaceNormalizeViewDir(float3 positionWS)
{
return normalize(_WorldSpaceCameraPos.xyz - positionWS);
float3 V = _WorldSpaceCameraPos.xyz - positionWS;
// Uncomment this once the compiler bug is fixed.
// if (unity_OrthoParams.w == 1.0)
// {
// float4x4 M = GetWorldToViewMatrix();
// V = M[1].xyz;
// }
return normalize(V);
}
float3 TransformTangentToWorld(float3 dirTS, float3 tangentToWorld[3])

9
Assets/ScriptableRenderLoop/ShaderLibrary/AreaLighting.hlsl


{
// 1. ClipQuadToHorizon
// detect clipping config
// detect clipping config
uint config = 0;
if (L[0].z > 0) config += 1;
if (L[1].z > 0) config += 2;

// For polygonal lights.
float LTCEvaluate(float4x3 L, float3 V, float3 N, float NdotV, bool twoSided, float3x3 invM)
{
// Construct local orthonormal basis around N, aligned with N
// TODO: it could be stored in PreLightData. All LTC lights compute it more than once!
// Also consider using 'bsdfData.tangentWS', 'bsdfData.bitangentWS', 'bsdfData.normalWS'.
// Construct a view-dependent orthonormal basis around N.
// TODO: it could be stored in PreLightData, since all LTC lights compute it more than once.
float3x3 basis;
basis[0] = normalize(V - N * NdotV);
basis[1] = normalize(cross(N, basis[0]));

// 'normal' is the direction orthogonal to the tangent. It is the shortest vector between
// the shaded point and the line, pointing away from the shaded point.
float LineIrradiance(float l1, float l2, float3 normal, float3 tangent)
{
{
float d = length(normal);
float l1rcpD = l1 * rcp(d);
float l2rcpD = l2 * rcp(d);

16
Assets/ScriptableRenderLoop/ShaderLibrary/BSDF.hlsl


// With analytical light (not image based light) we clamp the minimun roughness in the NDF to avoid numerical instability.
#define UNITY_MIN_ROUGHNESS 0.002
float ClampRoughnessForAnalyticalLights(float roughness)
{
return max(roughness, UNITY_MIN_ROUGHNESS);
}
roughness = max(roughness, UNITY_MIN_ROUGHNESS);
float a2 = roughness * roughness;
float f = (NdotH * a2 - NdotH) * NdotH + 1.0;
return a2 / (f * f);

float D_GGX_Inverse(float NdotH, float roughness)
{
roughness = max(roughness, UNITY_MIN_ROUGHNESS);
float a2 = roughness * roughness;
float f = (NdotH * a2 - NdotH) * NdotH + 1.0;
float g = (f * f) / a2;

// Ref: Understanding the Masking-Shadowing Function in Microfacet-Based BRDFs, p. 19, 29.
float G_MaskingSmithGGX(float NdotV, float VdotH, float roughness)
{
roughness = max(roughness, UNITY_MIN_ROUGHNESS);
// G1(V, H) = HeavisideStep(VdotH) / (1 + Λ(V)).
// Λ(V) = -0.5 + 0.5 * sqrt(1 + 1 / a²).
// a = 1 / (roughness * tan(theta)).

// lambda_l = (-1 + sqrt(a2 * (1 - NdotV2) / NdotV2 + 1)) * 0.5f;
// G = 1 / (1 + lambda_v + lambda_l);
float a = roughness;
float a = roughness;
float a2 = a * a;
// Reorder code to be more optimal
float lambdaV = NdotL * sqrt((-NdotV * a2 + NdotV) * NdotV + a2);

// roughnessB -> roughness in bitangent direction
float D_GGXAnisoNoPI(float TdotH, float BdotH, float NdotH, float roughnessT, float roughnessB)
{
roughnessT = max(roughnessT, UNITY_MIN_ROUGHNESS);
roughnessB = max(roughnessB, UNITY_MIN_ROUGHNESS);
float f = TdotH * TdotH / (roughnessT * roughnessT) + BdotH * BdotH / (roughnessB * roughnessB) + NdotH * NdotH;
return 1.0 / (roughnessT * roughnessB * f * f);
}

23
Assets/ScriptableRenderLoop/ShaderLibrary/Common.hlsl


posInput.positionWS += V * depthOffsetVS;
}
//-----------------------------------------------------------------------------
// various helper
//-----------------------------------------------------------------------------
// NdotV should not be negative for visible pixels, but it can happen due to the
// perspective projection and the normal mapping + decals. In that case, the normal
// should be modified to become valid (i.e facing the camera) to avoid weird artifacts.
// Note: certain applications (e.g. SpeedTree) make use of two-sided lighting.
float GetShiftedNdotV(inout float3 N, float3 V, bool twoSided)
{
float NdotV = dot(N, V);
float limit = 1e-4;
if (!twoSided && NdotV < limit)
{
// We do not renormalize the normal because { abs(length(N) - 1.0) < limit }.
N += (-NdotV + limit) * V;
NdotV = limit;
}
return NdotV;
}
#endif // UNITY_COMMON_INCLUDED

24
Assets/ScriptableRenderLoop/ShaderLibrary/CommonLighting.hlsl


}
//-----------------------------------------------------------------------------
// Get local frame
// Helper functions
// Generates an orthonormal basis from two orthogonal unit vectors.
float3x3 GetLocalFrame(float3 localZ, float3 localX)
// NdotV should not be negative for visible pixels, but it can happen due to the
// perspective projection and the normal mapping + decals. In that case, the normal
// should be modified to become valid (i.e facing the camera) to avoid weird artifacts.
// Note: certain applications (e.g. SpeedTree) make use of double-sided lighting.
float GetShiftedNdotV(inout float3 N, float3 V, bool twoSided)
float3 localY = cross(localZ, localX);
float NdotV = dot(N, V);
float limit = rcp(256.0); // Determined mostly by the quality of the G-buffer normal encoding
return float3x3(localX, localY, localZ);
if (!twoSided && NdotV < limit)
{
// We do not renormalize the normal because { abs(length(N) - 1.0) < limit }.
N += (-NdotV + limit) * V;
NdotV = limit;
}
return NdotV;
}
// Generates an orthonormal basis from a unit vector.

float3 localX = normalize(cross(upVector, localZ));
float3 localY = cross(localZ, localX);
return GetLocalFrame(localZ, localX);
return float3x3(localX, localY, localZ);
}
// TODO: test

4
Assets/ScriptableRenderLoop/ShaderLibrary/Fibonacci.hlsl


}
static const int k_FibonacciSeq[] = {
0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610
0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181
};
static const float2 k_Fibonacci2dSeq21[] = {

int fibN2 = sampleCount;
// These are all constants, so this loop will be optimized away.
for (int j = 1; j < 16; j++)
for (int j = 1; j < 20; j++)
{
if (k_FibonacciSeq[j] == fibN1)
{

203
Assets/ScriptableRenderLoop/ShaderLibrary/ImageBasedLighting.hlsl


// Util image based lighting
//-----------------------------------------------------------------------------
// Performs a *non-linear* remapping which improves the perceptual roughness distribution
// and adds reflection (contact) hardening. The *approximated* version.
// TODO: Clean a bit this code
// CAUTION: remap from Morten may work only with offline convolution, see impact with runtime convolution!
perceptualRoughness = perceptualRoughness * (1.7 - 0.7 * perceptualRoughness);
// For now disabled
#if 0
float m = PerceptualRoughnessToRoughness(perceptualRoughness); // m is the real roughness parameter
float n = (2.0 / max(FLT_EPSILON, m*m)) - 2.0; // remap to spec power. See eq. 21 in --> https://dl.dropboxusercontent.com/u/55891920/papers/mm_brdf.pdf
return perceptualRoughness * UNITY_SPECCUBE_LOD_STEPS;
}
n /= 4.0; // remap from n_dot_h formulatino to n_dot_r. See section "Pre-convolved Cube Maps vs Path Tracers" --> https://s3.amazonaws.com/docs.knaldtech.com/knald/1.0.0/lys_power_drops.html
// Performs a *non-linear* remapping which improves the perceptual roughness distribution
// and adds reflection (contact) hardening. The *accurate* version.
// TODO: optimize!
float perceptualRoughnessToMipmapLevel(float perceptualRoughness, float NdotR)
{
float m = PerceptualRoughnessToRoughness(perceptualRoughness);
perceptualRoughness = pow(2.0 / (n + 2.0), 0.25); // remap back to square root of real roughness (0.25 include both the sqrt root of the conversion and sqrt for going from roughness to perceptualRoughness)
#else
// MM: came up with a surprisingly close approximation to what the #if 0'ed out code above does.
perceptualRoughness = perceptualRoughness * (1.7 - 0.7 * perceptualRoughness);
#endif
// Remap to spec power. See eq. 21 in --> https://dl.dropboxusercontent.com/u/55891920/papers/mm_brdf.pdf
float n = (2.0 / max(FLT_EPSILON, m * m)) - 2.0;
// Remap from n_dot_h formulation to n_dot_r. See section "Pre-convolved Cube Maps vs Path Tracers" --> https://s3.amazonaws.com/docs.knaldtech.com/knald/1.0.0/lys_power_drops.html
n /= (4.0 * max(NdotR, FLT_EPSILON));
// remap back to square root of real roughness (0.25 include both the sqrt root of the conversion and sqrt for going from roughness to perceptualRoughness)
perceptualRoughness = pow(2.0 / (n + 2.0), 0.25);
// Performs *linear* remapping for runtime EnvMap filtering.
// Ref: See "Moving Frostbite to PBR" Listing 22
// This formulation is for GGX only (with smith joint visibility or regular)
float3 GetSpecularDominantDir(float3 N, float3 R, float roughness)
{
float a = 1.0 - roughness;
float lerpFactor = a * (sqrt(a) + roughness);
// The result is not normalized as we fetch in a cubemap
return lerp(N, R, lerpFactor);
}
//-----------------------------------------------------------------------------
// Anisotropic image based lighting
//-----------------------------------------------------------------------------
// To simulate the streching of highlight at grazing angle for IBL we shrink the roughness
// which allow to fake an anisotropic specular lobe.
// Ref: http://www.frostbite.com/2015/08/stochastic-screen-space-reflections/ - slide 84
float AnisotropicStrechAtGrazingAngle(float roughness, float perceptualRoughness, float NdotV)
{
return roughness * lerp(saturate(NdotV * 2.0), 1.0, perceptualRoughness);
}
float3 SphericalToCartesian(float phi, float sinTheta, float cosTheta)
float3 SphericalToCartesian(float phi, float cosTheta)
float sinTheta = sqrt(saturate(1.0 - cosTheta * cosTheta));
return float3(sinTheta * cosPhi, sinTheta * sinPhi, cosTheta);
}

float3 TransformGLtoDX(float x, float y, float z)
{
return float3(x, z, y);
}
float3 TransformGLtoDX(float3 v)
{
return v.xzy;

float3 ConvertEquiarealToCubemap(float u, float v)
{
// The equiareal mapping is defined as follows:
// phi = TWO_PI * (1.0 - u)
// cos(theta) = 1.0 - 2.0 * v
// sin(theta) = sqrt(1.0 - cos^2(theta)) = 2.0 * sqrt(v - v * v)
float sinTheta = 2.0 * sqrt(v - v * v);
return TransformGLtoDX(SphericalToCartesian(phi, sinTheta, cosTheta));
return TransformGLtoDX(SphericalToCartesian(phi, cosTheta));
// Ref: See "Moving Frostbite to PBR" Listing 22
// This formulation is for GGX only (with smith joint visibility or regular)
float3 GetSpecularDominantDir(float3 N, float3 R, float roughness)
// ----------------------------------------------------------------------------
// Importance sampling BSDF functions
// ----------------------------------------------------------------------------
// Performs uniform sampling of the unit disk.
// Ref: PBRT v3, p. 777.
float2 SampleDiskUniform(float2 u)
float a = 1.0 - roughness;
float lerpFactor = a * (sqrt(a) + roughness);
// The result is not normalized as we fetch in a cubemap
return lerp(N, R, lerpFactor);
}
float r = sqrt(u.x);
float phi = TWO_PI * u.y;
float sinPhi, cosPhi;
sincos(phi, sinPhi, cosPhi);
//-----------------------------------------------------------------------------
// Anisotropic image based lighting
//-----------------------------------------------------------------------------
// To simulate the streching of highlight at grazing angle for IBL we shrink the roughness
// which allow to fake an anisotropic specular lobe.
// Ref: http://www.frostbite.com/2015/08/stochastic-screen-space-reflections/ - slide 84
float AnisotropicStrechAtGrazingAngle(float roughness, float perceptualRoughness, float NdotV)
{
return roughness * lerp(saturate(NdotV * 2.0), 1.0, perceptualRoughness);
return r * float2(cosPhi, sinPhi);
// ----------------------------------------------------------------------------
// Importance sampling BSDF functions
// ----------------------------------------------------------------------------
void ImportanceSampleCosDir(float2 u,
// Performs cosine-weighted sampling of the hemisphere.
// Ref: PBRT v3, p. 780.
void SampleHemisphereCosine(float2 u,
// Cosine sampling - ref: http://www.rorydriscoll.com/2009/01/07/better-sampling/
float cosTheta = sqrt(1.0 - u.x);
float sinTheta = sqrt(u.x);
float phi = TWO_PI * u.y;
float3 localL;
float3 localL = SphericalToCartesian(phi, sinTheta, cosTheta);
// Since we don't really care about the area distortion,
// we substitute uniform disk sampling for the concentric one.
localL.xy = SampleDiskUniform(u);
// Project the point from the disk onto the hemisphere.
localL.z = sqrt(1.0 - u.x);
NdotL = localL.z;

void ImportanceSampleGGXDir(float2 u,
float3 V,
float3x3 localToWorld,
float roughness,
out float3 L,
out float NdotL,
out float NdotH,
out float VdotH,
bool VeqN = false)
void SampleGGXDir(float2 u,
float3 V,
float3x3 localToWorld,
float roughness,
out float3 L,
out float NdotL,
out float NdotH,
out float VdotH,
bool VeqN = false)
float sinTheta = sqrt(1.0 - cosTheta * cosTheta);
float3 localH = SphericalToCartesian(phi, sinTheta, cosTheta);
float3 localH = SphericalToCartesian(phi, cosTheta);
NdotH = cosTheta;

}
// ref: http://blog.selfshadow.com/publications/s2012-shading-course/burley/s2012_pbs_disney_brdf_notes_v3.pdf p26
void ImportanceSampleAnisoGGXDir( float2 u,
float3 V,
float3 N,
float3 tangentX,
float3 tangentY,
float roughnessT,
float roughnessB,
out float3 H,
out float3 L)
void SampleAnisoGGXDir(float2 u,
float3 V,
float3 N,
float3 tangentX,
float3 tangentY,
float roughnessT,
float roughnessB,
out float3 H,
out float3 L)
// Local to world
// H = tangentX * H.x + tangentY * H.y + N * H.z;
// Convert sample from half angle to incident angle
L = 2.0 * saturate(dot(V, H)) * H - V;
}

out float NdotL,
out float weightOverPdf)
{
ImportanceSampleCosDir(u, localToWorld, L, NdotL);
SampleHemisphereCosine(u, localToWorld, L, NdotL);
// Importance sampling weight for each sample
// pdf = N.L / PI

out float weightOverPdf)
{
float NdotH;
ImportanceSampleGGXDir(u, V, localToWorld, roughness, L, NdotL, NdotH, VdotH);
SampleGGXDir(u, V, localToWorld, roughness, L, NdotL, NdotH, VdotH);
// Importance sampling weight for each sample
// pdf = D(H) * (N.H) / (4 * (L.H))

}
// weightOverPdf return the weight (without the Fresnel term) over pdf. Fresnel term must be apply by the caller.
void ImportanceSampleAnisoGGX(
float2 u,
float3 V,
float3 N,
float3 tangentX,
float3 tangentY,
float roughnessT,
float roughnessB,
float NdotV,
out float3 L,
out float VdotH,
out float NdotL,
out float weightOverPdf)
void ImportanceSampleAnisoGGX(float2 u,
float3 V,
float3x3 localToWorld,
float roughnessT,
float roughnessB,
float NdotV,
out float3 L,
out float VdotH,
out float NdotL,
out float weightOverPdf)
float3 tangentX = localToWorld[0];
float3 tangentY = localToWorld[1];
float3 N = localToWorld[2];
ImportanceSampleAnisoGGXDir(u, V, N, tangentX, tangentY, roughnessT, roughnessB, H, L);
SampleAnisoGGXDir(u, V, N, tangentX, tangentY, roughnessT, roughnessB, H, L);
float NdotH = saturate(dot(N, H));
// Note: since L and V are symmetric around H, LdotH == VdotH

// ----------------------------------------------------------------------------
// Ref: Listing 18 in "Moving Frostbite to PBR" + https://knarkowicz.wordpress.com/2014/12/27/analytical-dfg-term-for-ibl/
float4 IntegrateGGXAndDisneyFGD(float3 V, float3 N, float roughness, uint sampleCount)
float4 IntegrateGGXAndDisneyFGD(float3 V, float3 N, float roughness, uint sampleCount = 4096)
{
float NdotV = saturate(dot(N, V));
float4 acc = float4(0.0, 0.0, 0.0, 0.0);

float3 L;
float NdotL, NdotH, VdotH;
ImportanceSampleGGXDir(u, V, localToWorld, roughness, L, NdotL, NdotH, VdotH, true);
SampleGGXDir(u, V, localToWorld, roughness, L, NdotL, NdotH, VdotH, true);
float mipLevel;

// This will blur the reflection.
// TODO: find a more accurate MIP bias function.
mipLevel = lerp(mipLevel, maxMipLevel, bias);
// All MIP map levels beyond UNITY_SPECCUBE_LOD_STEPS contain invalid data.
mipLevel = min(mipLevel, UNITY_SPECCUBE_LOD_STEPS);
}
if (NdotL > 0.0)

正在加载...
取消
保存