浏览代码

Merge pull request #1853 from Unity-Technologies/HDRP/staging

Merging Hdrp/staging
/main
GitHub 6 年前
当前提交
9a93b084
共有 14 个文件被更改,包括 383 次插入187 次删除
  1. 34
      com.unity.render-pipelines.core/CoreRP/ShaderLibrary/CommonLighting.hlsl
  2. 18
      com.unity.render-pipelines.core/CoreRP/ShaderLibrary/CommonMaterial.hlsl
  3. 12
      com.unity.render-pipelines.high-definition/CHANGELOG.md
  4. 2
      com.unity.render-pipelines.high-definition/HDRP/Editor/AssetProcessors/NormalMapFilteringTexturePostprocessor.cs.meta
  5. 2
      com.unity.render-pipelines.high-definition/HDRP/Editor/Lighting/HDLightExplorerExtension.cs
  6. 6
      com.unity.render-pipelines.high-definition/HDRP/Material/LayeredLit/LayeredLitData.hlsl
  7. 8
      com.unity.render-pipelines.high-definition/HDRP/Material/Lit/Lit.hlsl
  8. 20
      com.unity.render-pipelines.high-definition/HDRP/Material/Lit/LitData.hlsl
  9. 16
      com.unity.render-pipelines.high-definition/HDRP/Material/Lit/LitDataIndividualLayer.hlsl
  10. 40
      com.unity.render-pipelines.high-definition/HDRP/Material/MaterialEvaluation.hlsl
  11. 16
      com.unity.render-pipelines.high-definition/HDRP/RenderPipeline/HDRenderPipeline.cs
  12. 272
      com.unity.render-pipelines.high-definition/HDRP/Editor/AssetProcessors/NormalMapFilteringTexturePostprocessor.cs
  13. 124
      com.unity.render-pipelines.high-definition/HDRP/Editor/AssetProcessors/NormalMapAverageLengthTexturePostprocessor.cs
  14. 0
      /com.unity.render-pipelines.high-definition/HDRP/Editor/AssetProcessors/NormalMapFilteringTexturePostprocessor.cs.meta

34
com.unity.render-pipelines.core/CoreRP/ShaderLibrary/CommonLighting.hlsl


return area;
}
// ref: Practical Realtime Strategies for Accurate Indirect Occlusion
// http://blog.selfshadow.com/publications/s2016-shading-course/#course_content
// Original Cone-Cone method with cosine weighted assumption (p129 s2016_pbs_activision_occlusion)
real GetSpecularOcclusionFromBentAO(real3 V, real3 bentNormalWS, real3 normalWS, real ambientOcclusion, real roughness)
{
// Retrieve cone angle
// Ambient occlusion is cosine weighted, thus use following equation. See slide 129
real cosAv = sqrt(1.0 - ambientOcclusion);
roughness = max(roughness, 0.01); // Clamp to 0.01 to avoid edge cases
real cosAs = exp2((-log(10.0) / log(2.0)) * Sq(roughness));
real cosB = dot(bentNormalWS, reflect(-V, normalWS));
return SphericalCapIntersectionSolidArea(cosAv, cosAs, cosB) / (TWO_PI * (1.0 - cosAs));
}
// Ref: Steve McAuley - Energy-Conserving Wrapped Diffuse
real ComputeWrappedDiffuseLighting(real NdotL, real w)
{

real3 localY = cross(localZ, localX);
return real3x3(localX, localY, localZ);
}
// Construct a right-handed view-dependent orthogonal basis around the normal:
// b0-b2 is the view-normal aka reflection plane.
real3x3 GetOrthoBasisViewNormal(real3 V, real3 N, real unclampedNdotV, bool testSingularity = false)
{
real3x3 orthoBasisViewNormal;
if (testSingularity && (abs(1.0 - unclampedNdotV) <= FLT_EPS))
{
// In this case N == V, and azimuth orientation around N shouldn't matter for the caller,
// we can use any quaternion-based method, like Frisvad or Reynold's (Pixar):
orthoBasisViewNormal = GetLocalFrame(N);
}
else
{
orthoBasisViewNormal[0] = normalize(V - N * unclampedNdotV);
orthoBasisViewNormal[2] = N;
orthoBasisViewNormal[1] = cross(orthoBasisViewNormal[2], orthoBasisViewNormal[0]);
}
return orthoBasisViewNormal;
}
#endif // UNITY_COMMON_LIGHTING_INCLUDED

18
com.unity.render-pipelines.core/CoreRP/ShaderLibrary/CommonMaterial.hlsl


void ConvertRoughnessToAnisotropy(real roughnessT, real roughnessB, out real anisotropy)
{
anisotropy = ((roughnessT - roughnessB) / (roughnessT + roughnessB + 0.0001));
anisotropy = ((roughnessT - roughnessB) / max(roughnessT + roughnessB, 0.0001));
}
// Same as ConvertAnisotropyToRoughness but

return sqrt(2.0 / (variance + 2.0));
}
// Normal Map Filtering - This must match HDRP\Editor\AssetProcessors\NormalMapFilteringTexturePostprocessor.cs - highestVarianceAllowed (TODO: Move in core)
#define NORMALMAP_HIGHEST_VARIANCE 0.03125
float DecodeVariance(float gradientW)
{
return gradientW * NORMALMAP_HIGHEST_VARIANCE;
}
// Return modified perceptualSmoothness based on provided variance (get from GeometricNormalVariance + TextureNormalVariance)
float NormalFiltering(float perceptualSmoothness, float variance, float threshold)
{

return 1.0 - RoughnessToPerceptualRoughness(sqrt(squaredRoughness));
return RoughnessToPerceptualSmoothness(sqrt(squaredRoughness));
}
// Reference: Error Reduction and Simplification for Shading Anti-Aliasing

// like Toksvig.
float TextureNormalVariance(float avgNormalLength)
{
float variance = 0.0;
if (avgNormalLength < 1.0)
{
float avgNormLen2 = avgNormalLength * avgNormalLength;

// Relationship between gaussian lobe and vMF lobe is 2 * variance = 1 / (2 * kappa) = roughness^2
// (Equation 36 of Normal map filtering based on The Order : 1886 SIGGRAPH course notes implementation).
// So to get variance we must use variance = 1 / (4 * kappa)
return 0.25 * kappa;
variance = 0.25 / kappa;
return 0.0;
return variance;
}
float TextureNormalFiltering(float perceptualSmoothness, float avgNormalLength, float threshold)

12
com.unity.render-pipelines.high-definition/CHANGELOG.md


## [3.3.0-preview]
### Fixed
- Fix an issue where the screen where darken when rendering camera preview
- Fix display correct target platform when showing message to inform user that a platform is not supported
- Remove workaround for metal and vulkan in normal buffer encoding/decoding
- Fixed an issue with color picker not working in forward
### Changed
- Changed default reflection probe to be 256x256x6 and array size to be 64
## [3.2.0-preview]
### Added

- Fix planar reflections that were not working with tile/cluster (olbique matrix)
- Fix debug menu at runtime not working after nested prefab PR come to trunk
- Fix scrolling issue in density volume
- Fix an issue where the screen where darken when rendering camera preview
- Fix display correct target platform when showing message to inform user that a platform is not supported
- Remove workaround for metal and vulkan in normal buffer encoding/decoding
### Changed
- Shader code refactor: Split MaterialUtilities file in two parts BuiltinUtilities (independent of FragInputs) and MaterialUtilities (Dependent of FragInputs)

2
com.unity.render-pipelines.high-definition/HDRP/Editor/AssetProcessors/NormalMapFilteringTexturePostprocessor.cs.meta


fileFormatVersion: 2
guid: 3ceae5765b6bbee4fb4f76acdaae9130
guid: db58d0243609fae48a1a8f3460bdbd4c
MonoImporter:
externalObjects: {}
serializedVersion: 2

2
com.unity.render-pipelines.high-definition/HDRP/Editor/Lighting/HDLightExplorerExtension.cs


lightDataPairing[light].hdAdditionalLightData.intensity = intensity;
}
}),
new LightingExplorerTableColumn(LightingExplorerTableColumn.DataType.Float, HDStyles.Intensity, "m_Intensity", 60, (r, prop, dep) => // 9: Unit
new LightingExplorerTableColumn(LightingExplorerTableColumn.DataType.Float, HDStyles.Unit, "m_Intensity", 60, (r, prop, dep) => // 9: Unit
{
Light light = prop.serializedObject.targetObject as Light;
LightUnit unit = lightDataPairing[light].hdAdditionalLightData.lightUnit;

6
com.unity.render-pipelines.high-definition/HDRP/Material/LayeredLit/LayeredLitData.hlsl


// If user provide bent normal then we process a better term
#if (defined(_BENTNORMALMAP0) || defined(_BENTNORMALMAP1) || defined(_BENTNORMALMAP2) || defined(_BENTNORMALMAP3)) && defined(_ENABLESPECULAROCCLUSION)
// If we have bent normal and ambient occlusion, process a specular occlusion
surfaceData.specularOcclusion = GetSpecularOcclusionFromBentAO(V, bentNormalWS, surfaceData);
#ifdef SPECULAR_OCCLUSION_USE_SPTD
surfaceData.specularOcclusion = GetSpecularOcclusionFromBentAOPivot(V, bentNormalWS, surfaceData.normalWS, surfaceData.ambientOcclusion, PerceptualSmoothnessToPerceptualRoughness(surfaceData.perceptualSmoothness));
#else
surfaceData.specularOcclusion = GetSpecularOcclusionFromBentAO(V, bentNormalWS, surfaceData.normalWS, surfaceData.ambientOcclusion, PerceptualSmoothnessToRoughness(surfaceData.perceptualSmoothness));
#endif
#elif defined(_MASKMAP0) || defined(_MASKMAP1) || defined(_MASKMAP2) || defined(_MASKMAP3)
surfaceData.specularOcclusion = GetSpecularOcclusionFromAmbientOcclusion(dot(surfaceData.normalWS, V), surfaceData.ambientOcclusion, PerceptualSmoothnessToRoughness(surfaceData.perceptualSmoothness));
#else

8
com.unity.render-pipelines.high-definition/HDRP/Material/Lit/Lit.hlsl


preLightData.ltcTransformSpecular._m00_m02_m11_m20 = SAMPLE_TEXTURE2D_ARRAY_LOD(_LtcData, s_linear_clamp_sampler, uv, LTC_GGX_MATRIX_INDEX, 0);
// Construct a right-handed view-dependent orthogonal basis around the normal
preLightData.orthoBasisViewNormal[0] = normalize(V - N * preLightData.NdotV); // Do not clamp NdotV here
preLightData.orthoBasisViewNormal[2] = N;
preLightData.orthoBasisViewNormal[1] = cross(preLightData.orthoBasisViewNormal[2], preLightData.orthoBasisViewNormal[0]);
preLightData.orthoBasisViewNormal = GetOrthoBasisViewNormal(V, N, preLightData.NdotV);
preLightData.ltcTransformCoat = 0.0;
if (HasFlag(bsdfData.materialFeatures, MATERIALFEATUREFLAGS_LIT_CLEAR_COAT))

// If refraction is enable we use the transmittanceMask to lerp between current diffuse lighting and refraction value
// Physically speaking, transmittanceMask should be 1, but for artistic reasons, we let the value vary
//
// Note we also transfer the refracted light (lighting.indirect.specularTransmitted) into diffuseLighting
// since we know it won't be further processed: it is called at the end of the LightLoop(), but doing this
// enables opacity to affect it (in ApplyBlendMode()) while the rest of specularLighting escapes it.
#if HAS_REFRACTION
diffuseLighting = lerp(diffuseLighting, lighting.indirect.specularTransmitted, bsdfData.transmittanceMask);
#endif

20
com.unity.render-pipelines.high-definition/HDRP/Material/Lit/LitData.hlsl


#include "HDRP/Material/MaterialUtilities.hlsl"
#include "HDRP/Material/Decal/DecalUtilities.hlsl"
// TODO: move this function to commonLighting.hlsl once validated it work correctly
float GetSpecularOcclusionFromBentAO(float3 V, float3 bentNormalWS, SurfaceData surfaceData)
{
// Retrieve cone angle
// Ambient occlusion is cosine weighted, thus use following equation. See slide 129
float cosAv = sqrt(1.0 - surfaceData.ambientOcclusion);
float roughness = max(PerceptualSmoothnessToRoughness(surfaceData.perceptualSmoothness), 0.01); // Clamp to 0.01 to avoid edge cases
float cosAs = exp2((-log(10.0)/log(2.0)) * Sq(roughness));
float cosB = dot(bentNormalWS, reflect(-V, surfaceData.normalWS));
return SphericalCapIntersectionSolidArea(cosAv, cosAs, cosB) / (TWO_PI * (1.0 - cosAs));
}
//#include "HDRP/Material/SphericalCapPivot/SPTDistribution.hlsl"
//#define SPECULAR_OCCLUSION_USE_SPTD
// Struct that gather UVMapping info of all layers + common calculation
// This is use to abstract the mapping that can differ on layers

// If user provide bent normal then we process a better term
#if defined(_BENTNORMALMAP) && defined(_ENABLESPECULAROCCLUSION)
// If we have bent normal and ambient occlusion, process a specular occlusion
surfaceData.specularOcclusion = GetSpecularOcclusionFromBentAO(V, bentNormalWS, surfaceData);
#ifdef SPECULAR_OCCLUSION_USE_SPTD
surfaceData.specularOcclusion = GetSpecularOcclusionFromBentAOPivot(V, bentNormalWS, surfaceData.normalWS, surfaceData.ambientOcclusion, PerceptualSmoothnessToPerceptualRoughness(surfaceData.perceptualSmoothness));
#else
surfaceData.specularOcclusion = GetSpecularOcclusionFromBentAO(V, bentNormalWS, surfaceData.normalWS, surfaceData.ambientOcclusion, PerceptualSmoothnessToRoughness(surfaceData.perceptualSmoothness));
#endif
#elif defined(_MASKMAP)
surfaceData.specularOcclusion = GetSpecularOcclusionFromAmbientOcclusion(ClampNdotV(dot(surfaceData.normalWS, V)), surfaceData.ambientOcclusion, PerceptualSmoothnessToRoughness(surfaceData.perceptualSmoothness));
#else

16
com.unity.render-pipelines.high-definition/HDRP/Material/Lit/LitDataIndividualLayer.hlsl


#ifdef SURFACE_GRADIENT
normalTS += detailNormalTS * detailMask;
#else
normalTS = lerp(normalTS, BlendNormalRNM(normalTS, detailNormalTS), detailMask);
normalTS = lerp(normalTS, BlendNormalRNM(normalTS, detailNormalTS), detailMask); // todo: detailMask should lerp the angle of the quaternion rotation, not the normals
#endif
#endif
#else

surfaceData.baseColor = SAMPLE_UVMAPPING_TEXTURE2D(ADD_IDX(_BaseColorMap), ADD_ZERO_IDX(sampler_BaseColorMap), ADD_IDX(layerTexCoord.base)).rgb * ADD_IDX(_BaseColor).rgb;
#ifdef _DETAIL_MAP_IDX
// Goal: we want the detail albedo map to be able to darken down to black and brighten up to white the surface albedo.
// The scale control the speed of the gradient. We simply remap detailAlbedo from [0..1] to [-1..1] then perform a lerp to black or white
// with a factor based on speed.
// Goal: we want the detail albedo map to be able to darken down to black and brighten up to white the surface albedo.
// The scale control the speed of the gradient. We simply remap detailAlbedo from [0..1] to [-1..1] then perform a lerp to black or white
// with a factor based on speed.
float albedoDetailSpeed = saturate(abs(detailAlbedo) * ADD_IDX(_DetailAlbedoScale));
float3 baseColorOverlay = lerp(sqrt(surfaceData.baseColor), (detailAlbedo < 0.0) ? float3(0.0, 0.0, 0.0) : float3(1.0, 1.0, 1.0), albedoDetailSpeed * albedoDetailSpeed);
baseColorOverlay *= baseColorOverlay;
float albedoDetailSpeed = saturate(abs(detailAlbedo) * ADD_IDX(_DetailAlbedoScale));
float3 baseColorOverlay = lerp(sqrt(surfaceData.baseColor), (detailAlbedo < 0.0) ? float3(0.0, 0.0, 0.0) : float3(1.0, 1.0, 1.0), albedoDetailSpeed * albedoDetailSpeed);
baseColorOverlay *= baseColorOverlay;
// Lerp with details mask
surfaceData.baseColor = lerp(surfaceData.baseColor, saturate(baseColorOverlay), detailMask);
#endif

#ifdef _DETAIL_MAP_IDX
// See comment for baseColorOverlay
float smoothnessDetailSpeed = saturate(abs(detailSmoothness) * ADD_IDX(_DetailSmoothnessScale));
float smoothnessOverlay = lerp(surfaceData.perceptualSmoothness, (detailSmoothness < 0.0) ? 0.0 : 1.0, smoothnessDetailSpeed);
float smoothnessOverlay = lerp(surfaceData.perceptualSmoothness, (detailSmoothness < 0.0) ? 0.0 : 1.0, smoothnessDetailSpeed);
// Lerp with details mask
surfaceData.perceptualSmoothness = lerp(surfaceData.perceptualSmoothness, saturate(smoothnessOverlay), detailMask);
#endif

40
com.unity.render-pipelines.high-definition/HDRP/Material/MaterialEvaluation.hlsl


float3 indirectSpecularOcclusion;
};
void GetScreenSpaceAmbientOcclusion(float2 positionSS, float NdotV, float perceptualRoughness, float ambientOcclusionFromData, float specularOcclusionFromData, out AmbientOcclusionFactor aoFactor)
// Get screen space ambient occlusion only:
float GetScreenSpaceDiffuseOcclusion(float2 positionSS)
// Ambient occlusion use for indirect lighting (reflection probe, baked diffuse lighting)
// Ambient occlusion use for indirect lighting (reflection probe, baked diffuse lighting)
// Ambient occlusion use for direct lighting (directional, punctual, area)
float directAmbientOcclusion = lerp(1.0, indirectAmbientOcclusion, _AmbientOcclusionParam.w);
float directAmbientOcclusion = 1.0;
return indirectAmbientOcclusion;
}
void GetScreenSpaceAmbientOcclusion(float2 positionSS, float NdotV, float perceptualRoughness, float ambientOcclusionFromData, float specularOcclusionFromData, out AmbientOcclusionFactor aoFactor)
{
float indirectAmbientOcclusion = GetScreenSpaceDiffuseOcclusion(positionSS);
float directAmbientOcclusion = lerp(1.0, indirectAmbientOcclusion, _AmbientOcclusionParam.w);
float roughness = PerceptualRoughnessToRoughness(perceptualRoughness);
float specularOcclusion = GetSpecularOcclusionFromAmbientOcclusion(ClampNdotV(NdotV), indirectAmbientOcclusion, roughness);

aoFactor.directAmbientOcclusion = lerp(_AmbientOcclusionParam.rgb, float3(1.0, 1.0, 1.0), directAmbientOcclusion);
}
// Use GTAOMultiBounce approximation for ambient occlusion (allow to get a tint from the diffuseColor)
// Use GTAOMultiBounce approximation for ambient occlusion (allow to get a tint from the diffuseColor)
// Note: When we ImageLoad outside of texture size, the value returned by Load is 0 (Note: On Metal maybe it clamp to value of texture which is also fine)
// We use this property to have a neutral value for AO that doesn't consume a sampler and work also with compute shader (i.e use ImageLoad)
// We store inverse AO so neutral is black. So either we sample inside or outside the texture it return 0 in case of neutral
// Ambient occlusion use for indirect lighting (reflection probe, baked diffuse lighting)
#ifndef _SURFACE_TYPE_TRANSPARENT
float indirectAmbientOcclusion = 1.0 - LOAD_TEXTURE2D(_AmbientOcclusionTexture, positionSS).x;
// Ambient occlusion use for direct lighting (directional, punctual, area)
float indirectAmbientOcclusion = GetScreenSpaceDiffuseOcclusion(positionSS);
#else
float indirectAmbientOcclusion = 1.0;
float directAmbientOcclusion = 1.0;
#endif
float roughness = PerceptualRoughnessToRoughness(perceptualRoughness);
float specularOcclusion = GetSpecularOcclusionFromAmbientOcclusion(ClampNdotV(NdotV), indirectAmbientOcclusion, roughness);

void ApplyAmbientOcclusionFactor(AmbientOcclusionFactor aoFactor, inout BuiltinData builtinData, inout AggregateLighting lighting)
{
// Note: in case of deferred Lit, builtinData.bakeDiffuseLighting contain indirect diffuse * surfaceData.ambientOcclusion + emissive,
// so emissive is affected by SSAO and we get a double darkening from SSAO and from AO which is incorrect but we accept the tradeoff
// Note: In case of deferred Lit, builtinData.bakeDiffuseLighting contains indirect diffuse * surfaceData.ambientOcclusion + emissive,
// so SSAO is multiplied by emissive which is wrong.
// Also, we have double occlusion for diffuse lighting since it already had precomputed AO (aka "FromData") applied
// (the * surfaceData.ambientOcclusion above)
// This is a tradeoff to avoid storing the precomputed (from data) AO in the GBuffer.
// (This is also why GetScreenSpaceAmbientOcclusion*() is effectively called with AOFromData = 1.0 in Lit:PostEvaluateBSDF() in the
// deferred case since DecodeFromGBuffer will init bsdfData.ambientOcclusion to 1.0 and we will only have SSAO in the aoFactor here)
builtinData.bakeDiffuseLighting *= aoFactor.indirectAmbientOcclusion;
lighting.indirect.specularReflected *= aoFactor.indirectSpecularOcclusion;
lighting.direct.diffuse *= aoFactor.directAmbientOcclusion;

16
com.unity.render-pipelines.high-definition/HDRP/RenderPipeline/HDRenderPipeline.cs


public void ApplyDebugDisplaySettings(HDCamera hdCamera, CommandBuffer cmd)
{
// See ShaderPassForward.hlsl: for forward shaders, if DEBUG_DISPLAY is enabled and no DebugLightingMode or DebugMipMapMod
// modes have been set, lighting is automatically skipped (To avoid some crashed due to lighting RT not set on console).
// However debug mode like colorPickerModes and false color don't need DEBUG_DISPLAY and must work with the lighting.
// So we will enabled DEBUG_DISPLAY independently
// Enable globally the keyword DEBUG_DISPLAY on shader that support it with multicompile
CoreUtils.SetKeyword(cmd, "DEBUG_DISPLAY", m_CurrentDebugDisplaySettings.IsDebugDisplayEnabled());
// enable globally the keyword DEBUG_DISPLAY on shader that support it with multicompile
cmd.EnableShaderKeyword("DEBUG_DISPLAY");
// This is for texture streaming
m_CurrentDebugDisplaySettings.UpdateMaterials();

// The DebugNeedsExposure test allows us to set a neutral value if exposure is not needed. This way we don't need to make various tests inside shaders but only in this function.
cmd.SetGlobalFloat(HDShaderIDs._DebugExposure, m_CurrentDebugDisplaySettings.DebugNeedsExposure() ? lightingDebugSettings.debugExposure : 0.0f);
}
else
{
// TODO: Be sure that if there is no change in the state of this keyword, it doesn't imply any work on CPU side! else we will need to save the sate somewher
cmd.DisableShaderKeyword("DEBUG_DISPLAY");
}
}

272
com.unity.render-pipelines.high-definition/HDRP/Editor/AssetProcessors/NormalMapFilteringTexturePostprocessor.cs


using System;
using UnityEngine;
using System.IO;
namespace UnityEditor.Experimental.Rendering.HDPipeline
{
public class NormalMapFilteringTexturePostprocessor : AssetPostprocessor
{
// This class will process a normal map and add the value of average normal length to the blue or alpha channel
// The texture is saved as BC7.
// Tangent space normal map: BC7 RGB (normal xy - encoded variance)
// Object space normal map: BC7 RGBA (normal xyz - encoded variance)
static string s_Suffix = "_NF";
static string s_SuffixOS = "_OSNF"; // Suffix for object space case
bool IsAssetTaggedAsTangentSpaceNormalMap()
{
return Path.GetFileNameWithoutExtension(assetPath).EndsWith(s_Suffix, StringComparison.InvariantCultureIgnoreCase);
}
bool IsAssetTaggedAsObjectSpaceNormalMap()
{
return Path.GetFileNameWithoutExtension(assetPath).EndsWith(s_SuffixOS, StringComparison.InvariantCultureIgnoreCase);
}
void OnPreprocessTexture()
{
bool isNormalMapTangentSpace = IsAssetTaggedAsTangentSpaceNormalMap();
bool isNormalMapObjectSpace = isNormalMapTangentSpace ? false : IsAssetTaggedAsObjectSpaceNormalMap();
if (isNormalMapTangentSpace || isNormalMapObjectSpace)
{
// Make sure we don't convert as a normal map.
TextureImporter textureImporter = (TextureImporter)assetImporter;
textureImporter.convertToNormalmap = false;
//textureImporter.alphaSource = isNormalMapTangentSpace ? TextureImporterAlphaSource.None : TextureImporterAlphaSource.FromInput;
//bool inputHasAlphaChannel = textureImporter.DoesSourceTextureHaveAlpha();
// Hack:
// We need Unity to create an alpha channel when using object space normal maps!
// If it doesn't find one in input, and we set TextureImporterAlphaSource.FromInput, it will ignore our alpha values we
// set with SetPixels in OnPostProcess, even if the Texture2D format is eg RGBA.
// So here, we force it to create a dummy one in all cases (from gray scale)
textureImporter.alphaSource = isNormalMapTangentSpace ? TextureImporterAlphaSource.None : TextureImporterAlphaSource.FromGrayScale;
textureImporter.alphaIsTransparency = false;
textureImporter.mipmapEnabled = true;
textureImporter.textureCompression = TextureImporterCompression.CompressedHQ; // This is BC7 for Mac/PC
//textureImporter.textureCompression = TextureImporterCompression.Uncompressed;
// We can also force a format like TextureImporterFormat.BC6H or TextureImporterFormat.BC7:
var settings = textureImporter.GetPlatformTextureSettings("Standalone");
settings.format = TextureImporterFormat.BC7;
settings.overridden = true;
textureImporter.SetPlatformTextureSettings(settings);
textureImporter.isReadable = true;
// ...works without, but Unity doc says to set it if we need read access during OnPostProcess:
// https://docs.unity3d.com/ScriptReference/AssetPostprocessor.OnPostprocessTexture.html
#pragma warning disable 618 // remove obsolete warning for this one
textureImporter.linearTexture = true; // Says deprecated but won't work without it.
#pragma warning restore 618
textureImporter.sRGBTexture = false; // But we're setting the new property just in case it changes later...
}
}
private static Color GetColor(Color[] source, int x, int y, int width, int height)
{
x = (x + width) % width; // for NPOT textures
y = (y + height) % height;
int index = y * width + x;
var c = source[index];
return c;
}
private static Vector3 GetNormal(Color[] source, int x, int y, int width, int height)
{
Vector3 n = (Vector4)GetColor(source, x, y, width, height);
n = 2.0f * n - Vector3.one;
n.Normalize();
return n;
}
private static Vector3 GetAverageNormal(Color[] source, int x, int y, int width, int height, int texelFootprintW, int texelFootprintH)
{
Vector3 averageNormal = new Vector3(0, 0, 0);
// Calculate the average color over the texel footprint.
for (int i = 0; i < texelFootprintH; ++i)
{
for (int j = 0; j < texelFootprintW; ++j)
{
averageNormal += GetNormal(source, x + j, y + i, width, height);
}
}
averageNormal /= (texelFootprintW * texelFootprintH);
return averageNormal;
}
// Converts averageNormalLength to variance and
// thresholds and remaps variance from [0, highestVarianceAllowed] to [0, 1]
private static float GetEncodedVariance(float averageNormalLength)
{
// Caution: This constant must be in sync with CommonMaterial.hlsl #define NORMALMAP_HIGHEST_VARIANCE
const float highestVarianceAllowed = 0.03125f; // 0.25 * 0.25 / 2 = 0.0625 / 2 = 0.03125;
// To decide to store or not the averageNormalLength directly we need to consider:
//
// 1) useful range vs block compression and bit encoding of that range,
// 2) the possibly nonlinear conversion to variance,
// 3) computation in the shader.
//
// For 2) we need something that can be linearly filtered by the hardware as much as possible.
// Averages of length(average normal) are obviously not equal to length( of averages of normal)
// (that's the point of normal map inferred-NDF filtering vs just using mip-mapping hardware on normal maps),
// and the formula to get to variance via the vMF lobe fit is also quite nonlinear, although not
// everywhere. We show below that the most useful part of this fit (near the 1.0 end of the length
// of the average normal) is linear and so if we would store and filter averageNormalLength anyway
// but limit our range to that part, we could just store and filter directly the variance in that
// range too. (Note though that moments are linearly filterable cf LEAN, LEADR).
// For 1), compression can further compound artifacts too so we need to consider the useful range.
//
// We recall:
//
// Ref: Frequency Domain Normal Map Filtering - http://www.cs.columbia.edu/cg/normalmap/normalmap.pdf
// (equation 21)
// The relationship between between the standard deviation of a Gaussian distribution and
// the roughness parameter of a Beckmann distribution.is roughness^2 = 2 variance
// Ref: Filtering Distributions of Normals for Shading Antialiasing, equation just after (14).
// Relationship between gaussian lobe and vMF lobe is 2 * variance = 1 / (2 * kappa) = roughness^2
// (Equation 36 of Normal map filtering based on The Order : 1886 SIGGRAPH course notes implementation).
//
// So to get variance we must use variance = 1 / (4 * kappa)
// where
// kappa = (averageNormalLength*(3.0f - averageNormalLengthSquared)) / (1.0f - averageNormalLengthSquared);
//
float averageNormalLengthSquared = averageNormalLength * averageNormalLength;
float variance = 0.0f;
if (averageNormalLengthSquared < 1.0f)
{
float kappa = (averageNormalLength * (3.0f - averageNormalLengthSquared)) / (1.0f - averageNormalLengthSquared);
variance = 1.0f / (4.0f * kappa);
}
// The variance as a function of (averageNormalLength) is quite steep near 0 length, and
// from about averageNormalLength = 0.376, variance stays under 0.2, and
// from about averageNormalLength = 0.603, variance stays under 0.1, and goes to 0 quite
// linearly as averageNormalLength goes to 1 with a slope of -1/4
// http://www.wolframalpha.com/input/?i=y(x)+:%3D+(1+-+x*x)%2F(4*(3x+-+x*x*x));+x+from+0+to+1
// Remember we do "+ min(2.0 * variance, threshold * threshold)" in NormalFiltering of CommonMaterial.hlsl
// to effectively limit the added_roughness^2
// when doing normal map filtering by modifying underlying BSDF roughness.
//
// An added variance of 0.1 gives an increase of roughness = sqrt(2 * 0.1) = 0.447, which is a huge increase already.
// An added variance of 0.03125 gives an increase of roughness = sqrt(2 * 0.03125) = 0.25, which still a lot
//
// Also remember that we use a user specified threshold to effectively limit the added_roughness^2,
// as shown above with + min(2.0 * variance, threshold * threshold)).
// We consider the relationship between the considered range of useful added variance vs that threshold:
// We have
// 2*variance_max_allowed = roughness_threshold_max_allowed^2
// variance_max_allowed = 0.5 * roughness_threshold_max_allowed^2
//
// Let's say we think an increased roughness^2 of threshold * threshold = 0.250^2 is enough such
// that we will never set the threshold in the UI to anything higher than 0.250.
// We then have:
//
// (0.250)^2 = (2 * variance_max_allowed)
// variance_max_allowed = 0.25*0.25 / 2 = 0.0625/2 = 0.03125
// 0.03125 = (1-xx)/(4*(3*x-x*x*x)) where x is lowestAverageNormalLengthAllowed
// http://www.wolframalpha.com/input/?i=0.03125+%3D++(1+-+x*x)%2F(4*(3x+-+x*x*x));+solve+for+x+from+0+to+1
// which gives our constants:
//
// highestVarianceAllowed = 0.03125f
// lowestAverageNormalLengthAllowed = 0.8695f;
//
float encodedVariance = Math.Min(variance, highestVarianceAllowed) / highestVarianceAllowed;
return encodedVariance;
}
void OnPostprocessTexture(Texture2D texture)
{
bool isNormalMapTangentSpace = IsAssetTaggedAsTangentSpaceNormalMap();
bool isNormalMapObjectSpace = isNormalMapTangentSpace ? false : IsAssetTaggedAsObjectSpaceNormalMap();
if (isNormalMapTangentSpace || isNormalMapObjectSpace)
{
// Based on The Order : 1886 SIGGRAPH course notes implementation. Sample all normal map
// texels from the base mip level that are within the footprint of the current mipmap texel.
Color[] source = texture.GetPixels(0);
for (int m = 1; m < texture.mipmapCount; m++)
{
Color[] c = texture.GetPixels(m);
int mipWidth = Math.Max(1, texture.width >> m);
int mipHeight = Math.Max(1, texture.height >> m);
int texelFootprintW = texture.width / mipWidth;
int texelFootprintH = texture.height / mipHeight;
for (int y = 0; y < mipHeight; ++y)
{
for (int x = 0; x < mipWidth; ++x)
{
Vector3 averageNormal = GetAverageNormal(source, x * texelFootprintW, y * texelFootprintH,
texture.width, texture.height, texelFootprintW, texelFootprintH);
int outputPosition = y * mipWidth + x;
// Note: As an optimization we could check what is generated in the mipmap (as it is suppose to be the average already)
// TODO: Do some test and see if it is equivalent, for now reprocess all normal from top mips.
// Vector3 existingAverageNormal = (Vector4)c[outputPosition];
// existingAverageNormal = 2.0f * existingAverageNormal - Vector3.one;
// Clamp to avoid any issue (shouldn't be required but sanitizes the normal map if needed)
// We will also write the custom data into the blue channel to streamline the unpacking
// shader code to fetch a 2 channel normal in RG whether we use normal map filtering or not.
float averageNormalLength = Math.Max(0.0f, Math.Min(1.0f, averageNormal.magnitude));
float outputValue = GetEncodedVariance(averageNormalLength);
// Finally, note that since we need to add custom data in a map channel, we can't use the Unity
// importer UI settings TextureType == NormalMap, since it leaves channel control to Unity in
// that case and will make it interpret the x,y,z input as the normal to compress (which it might
// do eg in 2 channels BC5)
//
// We need to normalize the resulting average normal and store the x,y components in the
// proper (n + 1)/2 range encoded values in the R,G channels:
averageNormal.Normalize();
c[outputPosition].r = (averageNormal.x + 1.0f) / 2.0f;
c[outputPosition].g = (averageNormal.y + 1.0f) / 2.0f;
if (isNormalMapTangentSpace)
{
c[outputPosition].b = outputValue;
c[outputPosition].a = 1.0f;
}
else
{
// Object space normal map needs 3 channels
c[outputPosition].b = (averageNormal.z + 1.0f) / 2.0f;
c[outputPosition].a = outputValue;
}
}
}
texture.SetPixels(c, m);
}
// Now overwrite the first mip average normal channel - order is important as above we read the mip0
// For mip 0, set the normal length to 1.
{
Color[] c = texture.GetPixels(0);
float outputValue = GetEncodedVariance(1.0f);
for (int i = 0; i < c.Length; i++)
{
if (isNormalMapTangentSpace)
{
c[i].b = outputValue;
c[i].a = 1.0f;
}
else
{
c[i].a = outputValue;
}
}
texture.SetPixels(c, 0);
}
// Compression will be apply after this.
texture.Apply(updateMipmaps: false, makeNoLongerReadable: true);
}
}
}
}

124
com.unity.render-pipelines.high-definition/HDRP/Editor/AssetProcessors/NormalMapAverageLengthTexturePostprocessor.cs


using System;
using UnityEngine;
using System.IO;
namespace UnityEditor.Experimental.Rendering.HDPipeline
{
public class NormalMapAverageLengthTexturePostprocessor : AssetPostprocessor
{
// This class will process a normal map and add the value of average normal length to the blue or alpha channel
// The texture is save as BC7.
// Tangent space normal map: BC7 RGB (normal xy - average normal length)
// Object space normal map: BC7 RGBA (normal xyz - average normal length)
static string s_Suffix = "_NA";
//static string s_SuffixOS = "_OSNA"; // Suffix for object space case - TODO
void OnPreprocessTexture()
{
// Any texture with _NA suffix will store average normal lenght in alpha
if (Path.GetFileNameWithoutExtension(assetPath).EndsWith(s_Suffix, StringComparison.InvariantCultureIgnoreCase))
{
// Make sure we don't convert as a normal map.
TextureImporter textureImporter = (TextureImporter)assetImporter;
textureImporter.convertToNormalmap = false;
textureImporter.alphaSource = TextureImporterAlphaSource.None;
textureImporter.mipmapEnabled = true;
textureImporter.textureCompression = TextureImporterCompression.CompressedHQ; // This is BC7 for Mac/PC
#pragma warning disable 618 // remove obsolete warning for this one
textureImporter.linearTexture = true; // Says deprecated but won't work without it.
#pragma warning restore 618
textureImporter.sRGBTexture = false; // But we're setting the new property just in case it changes later...
}
}
private static Color GetColor(Color[] source, int x, int y, int width, int height)
{
x = (x + width) % width;
y = (y + height) % height;
int index = y * width + x;
var c = source[index];
return c;
}
private static Vector3 GetNormal(Color[] source, int x, int y, int width, int height)
{
Vector3 n = (Vector4)GetColor(source, x, y, width, height);
n = 2.0f * n - Vector3.one;
n.Normalize();
return n;
}
private static Vector3 GetAverageNormal(Color[] source, int x, int y, int width, int height, int texelFootprint)
{
Vector3 averageNormal = new Vector3(0, 0, 0);
// Calculate the average color over the texel footprint.
for (int i = 0; i < texelFootprint; ++i)
{
for (int j = 0; j < texelFootprint; ++j)
{
averageNormal += GetNormal(source, x + i, y + j, width, height);
}
}
averageNormal /= (texelFootprint * texelFootprint);
return averageNormal;
}
void OnPostprocessTexture(Texture2D texture)
{
if (Path.GetFileNameWithoutExtension(assetPath).EndsWith(s_Suffix, StringComparison.InvariantCultureIgnoreCase))
{
// Based on The Order : 1886 SIGGRAPH course notes implementation. Sample all normal map
// texels from the base mip level that are within the footprint of the current mipmap texel.
Color[] source = texture.GetPixels(0);
for (int m = 1; m < texture.mipmapCount; m++)
{
Color[] c = texture.GetPixels(m);
int mipWidth = Math.Max(1, texture.width >> m);
int mipHeight = Math.Max(1, texture.height >> m);
for (int x = 0; x < mipWidth; ++x)
{
for (int y = 0; y < mipHeight; ++y)
{
int texelFootprint = 1 << m;
Vector3 averageNormal = GetAverageNormal(source, x * texelFootprint, y * texelFootprint,
texture.width, texture.height, texelFootprint);
// Store the normal length for the average normal.
int outputPosition = y * mipWidth + x;
// Clamp to avoid any issue (TODO: Check this)
// Write into the blue channel
float averageNormalLength = Math.Max(0.0f, Math.Min(1.0f, averageNormal.magnitude));
c[outputPosition].b = averageNormalLength;
c[outputPosition].a = 1.0f;
}
}
texture.SetPixels(c, m);
}
// Now overwrite the first mip average normal channel - order is important as above we read the mip0
// For mip 0, set the normal length to 1.
{
Color[] c = texture.GetPixels(0);
for (int i = 0; i < c.Length; i++)
{
c[i].b = 1.0f;
c[i].a = 1.0f;
}
texture.SetPixels(c, 0);
}
}
}
}
}

/com.unity.render-pipelines.high-definition/HDRP/Editor/AssetProcessors/NormalMapAverageLengthTexturePostprocessor.cs.meta → /com.unity.render-pipelines.high-definition/HDRP/Editor/AssetProcessors/NormalMapFilteringTexturePostprocessor.cs.meta

正在加载...
取消
保存