diff --git a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2551_SSR.png b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2551_SSR.png index 5072855f828..11d290bcef7 100644 --- a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2551_SSR.png +++ b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2551_SSR.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8683fa5746afb3e8639585cc601c1911884aa5cbdcea098428c36872a2046640 -size 147475 +oid sha256:235f591a18ba1f53e0144f5996fcfb91b3a2704ae65459618ece20bafcffd284 +size 146936 diff --git a/com.unity.render-pipelines.core/Runtime/Common/ComponentSingleton.cs b/com.unity.render-pipelines.core/Runtime/Common/ComponentSingleton.cs index fb02eab90cb..993c1283d03 100644 --- a/com.unity.render-pipelines.core/Runtime/Common/ComponentSingleton.cs +++ b/com.unity.render-pipelines.core/Runtime/Common/ComponentSingleton.cs @@ -20,7 +20,7 @@ public static TType instance { if (s_Instance == null) { - GameObject go = new GameObject("Default " + typeof(TType)) { hideFlags = HideFlags.HideAndDontSave }; + GameObject go = new GameObject("Default " + typeof(TType).Name) { hideFlags = HideFlags.HideAndDontSave }; go.SetActive(false); s_Instance = go.AddComponent(); } @@ -28,5 +28,18 @@ public static TType instance return s_Instance; } } + + /// + /// Release the component singleton. + /// + public static void Release() + { + if (s_Instance != null) + { + var go = s_Instance.gameObject; + CoreUtils.Destroy(go); + s_Instance = null; + } + } } } diff --git a/com.unity.render-pipelines.core/Runtime/Utilities/BitArray.cs b/com.unity.render-pipelines.core/Runtime/Utilities/BitArray.cs index 252c36efed9..ad773481f85 100644 --- a/com.unity.render-pipelines.core/Runtime/Utilities/BitArray.cs +++ b/com.unity.render-pipelines.core/Runtime/Utilities/BitArray.cs @@ -9,35 +9,35 @@ namespace UnityEngine.Rendering /// public interface IBitArray { - /// Number of elements in the bit array. + /// Gets the capacity of this BitArray. This is the number of bits that are usable. uint capacity { get; } - /// True if all bits are 0. + /// Return `true` if all the bits of this BitArray are set to 0. Returns `false` otherwise. bool allFalse { get; } - /// True if all bits are 1. + /// Return `true` if all the bits of this BitArray are set to 1. Returns `false` otherwise. bool allTrue { get; } /// - /// Returns the state of the bit at a specific index. + /// An indexer that allows access to the bit at a given index. This provides both read and write access. /// /// Index of the bit. /// State of the bit at the provided index. bool this[uint index] { get; set; } - /// Returns the bit array in a human readable form. + /// Writes the bits in the array in a human-readable form. This is as a string of 0s and 1s packed by 8 bits. This is useful for debugging. string humanizedData { get; } /// - /// Bit-wise And operation. + /// Perform an AND bitwise operation between this BitArray and the one you pass into the function and return the result. Both BitArrays must have the same capacity. This will not change current BitArray values. /// - /// Bit array with which to the And operation. + /// BitArray with which to the And operation. /// The resulting bit array. IBitArray BitAnd(IBitArray other); /// - /// Bit-wise Or operation. + /// Perform an OR bitwise operation between this BitArray and the one you pass into the function and return the result. Both BitArrays must have the same capacity. This will not change current BitArray values. /// - /// Bit array with which to the Or operation. + /// BitArray with which to the Or operation. /// The resulting bit array. IBitArray BitOr(IBitArray other); /// - /// Invert the bit array. + /// Return the BitArray with every bit inverted. /// /// IBitArray BitNot(); diff --git a/com.unity.render-pipelines.high-definition/CHANGELOG.md b/com.unity.render-pipelines.high-definition/CHANGELOG.md index 95651cd1f71..40d167030e9 100644 --- a/com.unity.render-pipelines.high-definition/CHANGELOG.md +++ b/com.unity.render-pipelines.high-definition/CHANGELOG.md @@ -9,6 +9,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. ### Added - Add XR setting to control camera jitter for temporal effects #6259 - Added an error message in the DrawRenderers custom pass when rendering opaque objects with an HDRP asset in DeferredOnly mode. +- Added support for specular AA from geometric curvature in AxF +- Added support for baked AO (no input for now) in AxF +- Added an info box to warn about depth test artifacts when rendering object twice in custom passes with MSAA. +- Added support for rasterized area light shadows in StackLit ### Fixed - Fixed an issue where a dynamic sky changing any frame may not update the ambient probe. @@ -39,6 +43,13 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. - Fixed an issue where changing the default volume profile from another inspector would not update the default volume editor. - Fixed path validation when creating new volume profile (case 1229933) - Fix for range compression factor for probes going negative (now clamped to positive values). +- Fixed various object leaks in HDRP. +- Fix for assertion triggering sometimes when saving a newly created lit shader graph (case 1230996) +- Fixed MSAA depth resolve when there is no motion vectors +- Fix issue causing wrong planar reflection rendering when more than one camera is present. +- Fixed culling of planar reflection probes that change position (case 1218651) +- Fixed null reference when processing lightprobe (case 1235285) +- Fix black screen in XR when HDRP package is present but not used. ### Changed - Rejecting history for ray traced reflections based on a threshold evaluated on the neighborhood of the sampled history. @@ -98,6 +109,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. - Fixed an issue with refraction model and ray traced recursive rendering (case 1198578). - Fixed cubemap thumbnail generation at project load time. - Half fixed shuriken particle light that cast shadows (only the first one will be correct) +- Fixed an issue with the specularFGD term being used when the material has a clear coat (lit shader). ### Changed - Renamed the cubemap used for diffuse convolution to a more explicit name for the memory profiler. @@ -263,6 +275,98 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. - Fixed an issue with MipRatio debug mode showing _DebugMatCapTexture not being set. - Fixed missing initialization of input params in Blit for VR. - Fix Inf source in LTC for area lights. +<<<<<<< HEAD +======= +- Fix issue with AO being misaligned when multiple view are visible. +- Fix issue that caused the clamp of camera rotation motion for motion blur to be ineffective. +- Fixed issue with AssetPostprocessors dependencies causing models to be imported twice when upgrading the package version. +- Fixed culling of lights with XR SDK +- Fixed memory stomp in shadow caching code, leading to overflow of Shadow request array and runtime errors. +- Fixed an issue related to transparent objects reading the ray traced indirect diffuse buffer +- Fixed an issue with filtering ray traced area lights when the intensity is high or there is an exposure. +- Fixed ill-formed include path in Depth Of Field shader. +- Fixed shader graph and ray tracing after the shader target PR. +- Fixed a bug in semi-transparent shadows (object further than the light casting shadows) +- Fix state enabled of default volume profile when in package. +- Fixed removal of MeshRenderer and MeshFilter on adding Light component. +- Fixed Ray Traced SubSurface Scattering not working with ray traced area lights +- Fixed Ray Traced SubSurface Scattering not working in forward mode. +- Fixed a bug in debug light volumes. +- Fixed a bug related to ray traced area light shadow history. +- Fixed an issue where fog sky color mode could sample NaNs in the sky cubemap. +- Fixed a leak in the PBR sky renderer. +- Added a tooltip to the Ambient Mode parameter in the Visual Envionment volume component. +- Static lighting sky now takes the default volume into account (this fixes discrepancies between baked and realtime lighting). +- Fixed a leak in the sky system. +- Removed MSAA Buffers allocation when lit shader mode is set to "deferred only". +- Fixed invalid cast for realtime reflection probes (case 1220504) +- Fixed invalid game view rendering when disabling all cameras in the scene (case 1105163) +- Hide reflection probes in the renderer components. +- Fixed infinite reload loop while displaying Light's Shadow's Link Light Layer in Inspector of Prefab Asset. +- Fixed the culling was not disposed error in build log. +- Fixed the cookie atlas size and planar atlas size being too big after an upgrade of the HDRP asset. +- Fixed transparent SSR for shader graph. +- Fixed an issue with emissive light meshes not being in the RAS. +- Fixed DXR player build +- Fixed the HDRP asset migration code not being called after an upgrade of the package +- Fixed draw renderers custom pass out of bound exception +- Fixed the PBR shader rendering in deferred +- Fixed some typos in debug menu (case 1224594) +- Fixed ray traced point and spot lights shadows not rejecting istory when semi-transparent or colored. +- Fixed a warning due to StaticLightingSky when reloading domain in some cases. +- Fixed the MaxLightCount being displayed when the light volume debug menu is on ColorAndEdge. +- Fixed issue with unclear naming of debug menu for decals. +- Fixed z-fighting in scene view when scene lighting is off (case 1203927) +- Fixed issue that prevented cubemap thumbnails from rendering. +- Fixed ray tracing with VR single-pass +- Fix an exception in ray tracing that happens if two LOD levels are using the same mesh renderer. +- Fixed error in the console when switching shader to decal in the material UI. +- Fixed an issue with refraction model and ray traced recursive rendering (case 1198578). +- Fixed an issue where a dynamic sky changing any frame may not update the ambient probe. +- Fixed cubemap thumbnail generation at project load time. +- Fixed cubemap thumbnail generation at project load time. +- Fixed XR culling with multiple cameras +- Fixed XR single-pass with Mock HMD plugin +- Fixed sRGB mismatch with XR SDK +- Fixed an issue where default volume would not update when switching profile. +- Fixed issue with uncached reflection probe cameras reseting the debug mode (case 1224601) +- Fixed an issue where AO override would not override specular occlusion. +- Fixed an issue where Volume inspector might not refresh correctly in some cases. +- Fixed render texture with XR +- Fixed issue with resources being accessed before initialization process has been performed completely. +- Half fixed shuriken particle light that cast shadows (only the first one will be correct) +- Fixed issue with atmospheric fog turning black if a planar reflection probe is placed below ground level. (case 1226588) +- Fixed custom pass GC alloc issue in CustomPassVolume.GetActiveVolumes(). +- Fixed a bug where instanced shadergraph shaders wouldn't compile on PS4. +- Fixed an issue related to the envlightdatasrt not being bound in recursive rendering. +- Fixed shadow cascade tooltip when using the metric mode (case 1229232) +- Fixed how the area light influence volume is computed to match rasterization. +- Focus on Decal uses the extends of the projectors +- Fixed usage of light size data that are not available at runtime. +- Fixed the depth buffer copy made before custom pass after opaque and normal injection point. +- Fix for issue that prevented scene from being completely saved when baked reflection probes are present and lighting is set to auto generate. +- Fixed drag area width at left of Light's intensity field in Inspector. +- Fixed light type resolution when performing a reset on HDAdditionalLightData (case 1220931) +- Fixed reliance on atan2 undefined behavior in motion vector debug shader. +- Fixed an usage of a a compute buffer not bound (1229964) +- Fixed an issue where changing the default volume profile from another inspector would not update the default volume editor. +- Fix issues in the post process system with RenderTexture being invalid in some cases, causing rendering problems. +- Fixed an issue where unncessarily serialized members in StaticLightingSky component would change each time the scene is changed. +- Fixed a weird behavior in the scalable settings drawing when the space becomes tiny (1212045). +- Fixed a regression in the ray traced indirect diffuse due to the new probe system. +- Fix for range compression factor for probes going negative (now clamped to positive values). +- Fixed path validation when creating new volume profile (case 1229933) +- Fix reflection hierarchy for CARPAINT in AxF. +- Fix precise fresnel for delta lights for SVBRDF in AxF. +- Fixed the debug exposure mode for display sky reflection and debug view baked lighting +- Fixed MSAA depth resolve when there is no motion vectors +- Fixed various object leaks in HDRP. +- Fixed compile error with XR SubsystemManager. +- Fix for assertion triggering sometimes when saving a newly created lit shader graph (case 1230996) +- Fixed culling of planar reflection probes that change position (case 1218651) +- Fixed null reference when processing lightprobe (case 1235285) +- Fix issue causing wrong planar reflection rendering when more than one camera is present. +>>>>>>> 5dea391dea... Fix issues causing planar probes to be broken with multiple cameras in the scene (#4) ### Changed - Hide unused LOD settings in Quality Settings legacy window. diff --git a/com.unity.render-pipelines.high-definition/Documentation~/Glossary.md b/com.unity.render-pipelines.high-definition/Documentation~/Glossary.md index 65bbdd53b8e..698fd6ac263 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/Glossary.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/Glossary.md @@ -137,3 +137,12 @@ A function that describes a wave that represents the human eye’s relative sens #### punctual lights: A light is considered to be punctual if it emits light from a single point. HDRPs Spot and Point Lights are punctual. +## Rendering Artifacts + + +#### disocclusion +A rendering artifact that describes the situation where a GameObject that was previously occluded becomes visible. + + +#### ghosting +A rendering artifact that describes the situation where a moving GameObject leaves a trail of pixels behind it. \ No newline at end of file diff --git a/com.unity.render-pipelines.high-definition/Documentation~/Light-Component.md b/com.unity.render-pipelines.high-definition/Documentation~/Light-Component.md index f5029e94aaa..fd537c504a4 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/Light-Component.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/Light-Component.md @@ -184,7 +184,7 @@ These settings define the volumetric behavior of this Light. Alter these setting ### **Shadows** -Use this section to adjust the Shadows cast by this Light. Note that Area Lights can't currently cast shadows for GameObjects that use a **StackLit** Material. +Use this section to adjust the Shadows cast by this Light. Unity exposes extra properties in this section depending on the **Mode** you set in the [General](#GeneralProperties) section. Unity also exposes extra properties depending on the **Filtering Quality** set in your Unity Project’s [HDRP Asset](HDRP-Asset.html). diff --git a/com.unity.render-pipelines.high-definition/Documentation~/Shadows-in-HDRP.md b/com.unity.render-pipelines.high-definition/Documentation~/Shadows-in-HDRP.md index 4ecd5cdca8b..2dede000014 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/Shadows-in-HDRP.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/Shadows-in-HDRP.md @@ -53,9 +53,10 @@ Using high shadow bias values may result in light "leaking" through Meshes. This After HDRP captures a shadow map, it processes filtering on the map in order to decrease the aliasing effect that occurs on low resolution shadow maps. Different filters affect the perceived sharpness of shadows. -To change which filter HDRP uses, change the **Filtering Quality** property in your Unity Project’s [HDRP Asset](HDRP-Asset.html). There are currently four filter quality presets for directional and punctual lights. For information on the available filter qualities, see the [Filtering Qualities table](HDRP-Asset.html#FilteringQualities). +To change which filter HDRP uses, the method depends on which filter quality you want to use and whether your HDRP Project uses [forward or deferred rendering](Forward-And-Deferred-Rendering.md). -Currently, if you want to use **High** quality (PCSS) filtering in [deferred](Forward-And-Deferred-Rendering.html) mode, you need to enable it in the [HDRP Config package](HDRP-Config-Package.html). For information on how to do this, see the [Example section](HDRP-Config-Package.html#Example) of the Config package documentation. +* **Forward rendering**: Change the **Filtering Quality** property in your Unity Project’s [HDRP Asset](HDRP-Asset.html). This method works for every filter quality. There are currently three filter quality presets for directional and punctual lights. For information on the available filter qualities, see the [Filtering Qualities table](HDRP-Asset.html#FilteringQualities). +* **Deferred rendering**: For **Low** and **Medium** filter qualities, use the same method as forward rendering. If you want to use **High** quality (PCSS) filtering, you need to enable it in the [HDRP Config package](HDRP-Config-Package.html). For information on how to do this, see the [Example section](HDRP-Config-Package.html#Example) of the Config package documentation. ## Shadowmasks diff --git a/com.unity.render-pipelines.high-definition/Editor/AssetProcessors/ShaderGraphMaterialsUpdater.cs b/com.unity.render-pipelines.high-definition/Editor/AssetProcessors/ShaderGraphMaterialsUpdater.cs index 3a3f94a9084..770e36cd1ef 100644 --- a/com.unity.render-pipelines.high-definition/Editor/AssetProcessors/ShaderGraphMaterialsUpdater.cs +++ b/com.unity.render-pipelines.high-definition/Editor/AssetProcessors/ShaderGraphMaterialsUpdater.cs @@ -55,13 +55,13 @@ static void OnShaderGraphSaved(Shader shader, object saveContext) // Free the materials every 200 iterations, on big project loading all materials in memory can lead to a crash if ((i % 200 == 0) && i != 0) - EditorUtility.UnloadUnusedAssetsImmediate(false); + EditorUtility.UnloadUnusedAssetsImmediate(true); } } finally { EditorUtility.ClearProgressBar(); - EditorUtility.UnloadUnusedAssetsImmediate(false); + EditorUtility.UnloadUnusedAssetsImmediate(true); } } } diff --git a/com.unity.render-pipelines.high-definition/Editor/Material/AxF/AxFGUI.cs b/com.unity.render-pipelines.high-definition/Editor/Material/AxF/AxFGUI.cs index b18aceccd86..51c0ab3c4a5 100644 --- a/com.unity.render-pipelines.high-definition/Editor/Material/AxF/AxFGUI.cs +++ b/com.unity.render-pipelines.high-definition/Editor/Material/AxF/AxFGUI.cs @@ -26,7 +26,7 @@ class AxFGUI : ShaderGUI { new SurfaceOptionUIBlock(MaterialUIBlock.Expandable.Base, features: SurfaceOptionUIBlock.Features.Unlit | SurfaceOptionUIBlock.Features.ReceiveSSR), new AxfSurfaceInputsUIBlock(MaterialUIBlock.Expandable.Input), - new AdvancedOptionsUIBlock(MaterialUIBlock.Expandable.Advance, AdvancedOptionsUIBlock.Features.Instancing | AdvancedOptionsUIBlock.Features.AddPrecomputedVelocity), + new AdvancedOptionsUIBlock(MaterialUIBlock.Expandable.Advance, AdvancedOptionsUIBlock.Features.Instancing | AdvancedOptionsUIBlock.Features.SpecularOcclusion | AdvancedOptionsUIBlock.Features.AddPrecomputedVelocity), }; public override void OnGUI(MaterialEditor materialEditor, MaterialProperty[] props) @@ -46,7 +46,9 @@ public override void OnGUI(MaterialEditor materialEditor, MaterialProperty[] pro ///////////////////////////////////////////////////////////////////////////////////////////////// // AxF material keywords - static string m_AxF_BRDFTypeText = "_AxF_BRDFType"; + const string kAxF_BRDFType = "_AxF_BRDFType"; + const string kEnableGeometricSpecularAA = "_EnableGeometricSpecularAA"; + const string kSpecularOcclusionMode = "_SpecularOcclusionMode"; // match AdvancedOptionsUIBlock.kSpecularOcclusionMode : TODO move both to HDStringConstants. // All Setup Keyword functions must be static. It allow to create script to automatically update the shaders with a script if code change static public void SetupMaterialKeywordsAndPass(Material material) @@ -54,7 +56,7 @@ static public void SetupMaterialKeywordsAndPass(Material material) material.SetupBaseUnlitKeywords(); material.SetupBaseUnlitPass(); - AxfBrdfType BRDFType = (AxfBrdfType)material.GetFloat(m_AxF_BRDFTypeText); + AxfBrdfType BRDFType = (AxfBrdfType)material.GetFloat(kAxF_BRDFType); CoreUtils.SetKeyword(material, "_AXF_BRDF_TYPE_SVBRDF", BRDFType == AxfBrdfType.SVBRDF); CoreUtils.SetKeyword(material, "_AXF_BRDF_TYPE_CAR_PAINT", BRDFType == AxfBrdfType.CAR_PAINT); @@ -65,6 +67,8 @@ static public void SetupMaterialKeywordsAndPass(Material material) CoreUtils.SetKeyword(material, "_DISABLE_DECALS", decalsEnabled == false); bool ssrEnabled = material.HasProperty(kEnableSSR) && material.GetFloat(kEnableSSR) > 0.0f; CoreUtils.SetKeyword(material, "_DISABLE_SSR", ssrEnabled == false); + CoreUtils.SetKeyword(material, "_ENABLE_GEOMETRIC_SPECULAR_AA", material.HasProperty(kEnableGeometricSpecularAA) && material.GetFloat(kEnableGeometricSpecularAA) > 0.0f); + CoreUtils.SetKeyword(material, "_SPECULAR_OCCLUSION_NONE", material.HasProperty(kSpecularOcclusionMode) && material.GetFloat(kSpecularOcclusionMode) == 0.0f); BaseLitGUI.SetupStencil(material, receivesSSR: ssrEnabled, useSplitLighting: false); diff --git a/com.unity.render-pipelines.high-definition/Editor/RenderPipeline/CustomPass/DrawRenderersCustomPassDrawer.cs b/com.unity.render-pipelines.high-definition/Editor/RenderPipeline/CustomPass/DrawRenderersCustomPassDrawer.cs index 63d835a7615..0d28dd585c9 100644 --- a/com.unity.render-pipelines.high-definition/Editor/RenderPipeline/CustomPass/DrawRenderersCustomPassDrawer.cs +++ b/com.unity.render-pipelines.high-definition/Editor/RenderPipeline/CustomPass/DrawRenderersCustomPassDrawer.cs @@ -56,10 +56,11 @@ private class Styles public static string unlitShaderMessage = "HDRP Unlit shaders will force the shader passes to \"ForwardOnly\""; public static string hdrpLitShaderMessage = "HDRP Lit shaders are not supported in a custom pass"; public static string opaqueObjectWithDeferred = "Your HDRP settings does not support ForwardOnly, some object might not render."; + public static string objectRendererTwiceWithMSAA = "MSAA is enabled, re-rendering same object twice will cause depth test artifacts in Before/After Post Process injection points"; } //Headers and layout - private int m_FilterLines = 3; + private int m_FilterLines = 2; private int m_MaterialLines = 2; // Foldouts @@ -86,6 +87,8 @@ private class Styles ReorderableList m_ShaderPassesList; + CustomPassVolume m_Volume; + bool customDepthIsNone => (CustomPass.TargetBuffer)m_TargetDepthBuffer.intValue == CustomPass.TargetBuffer.None; protected override void Initialize(SerializedProperty customPass) @@ -112,6 +115,8 @@ protected override void Initialize(SerializedProperty customPass) m_DepthCompareFunction = customPass.FindPropertyRelative("depthCompareFunction"); m_DepthWrite = customPass.FindPropertyRelative("depthWrite"); + m_Volume = customPass.serializedObject.targetObject as CustomPassVolume; + m_ShaderPassesList = new ReorderableList(null, m_ShaderPasses, true, true, true, true); m_ShaderPassesList.drawElementCallback = @@ -132,6 +137,14 @@ protected override void Initialize(SerializedProperty customPass) protected override void DoPassGUI(SerializedProperty customPass, Rect rect) { + if (ShowMsaaObjectInfo()) + { + Rect helpBoxRect = rect; + helpBoxRect.height = Styles.helpBoxHeight; + EditorGUI.HelpBox(helpBoxRect, Styles.objectRendererTwiceWithMSAA, MessageType.Info); + rect.y += Styles.helpBoxHeight; + } + DoFilters(ref rect); m_RendererFoldout.boolValue = EditorGUI.Foldout(rect, m_RendererFoldout.boolValue, Styles.renderHeader, true); @@ -156,7 +169,7 @@ protected override void DoPassGUI(SerializedProperty customPass, Rect rect) } } - // Tel if we need to show a warning for rendering opaque object and we're in deferred. + // Tell if we need to show a warning for rendering opaque object and we're in deferred. bool ShowOpaqueObjectWarning() { // Only opaque objects are concerned @@ -173,6 +186,18 @@ bool ShowOpaqueObjectWarning() return true; } + // Tell if we need to show the MSAA message info + bool ShowMsaaObjectInfo() + { + if (!HDRenderPipeline.currentAsset.currentPlatformRenderPipelineSettings.supportMSAA) + return false; + + if (m_Volume.injectionPoint != CustomPassInjectionPoint.AfterPostProcess && m_Volume.injectionPoint != CustomPassInjectionPoint.BeforePostProcess) + return false; + + return true; + } + void DoFilters(ref Rect rect) { m_FilterFoldout.boolValue = EditorGUI.Foldout(rect, m_FilterFoldout.boolValue, Styles.filtersHeader, true); @@ -296,9 +321,11 @@ protected override float GetPassHeight(SerializedProperty customPass) { float height = Styles.defaultLineSpace; + height += ShowMsaaObjectInfo() ? Styles.helpBoxHeight : 0; + if (m_FilterFoldout.boolValue) { - height *= m_FilterLines; + height += Styles.defaultLineSpace * m_FilterLines; height += ShowOpaqueObjectWarning() ? Styles.helpBoxHeight : 0; } diff --git a/com.unity.render-pipelines.high-definition/Runtime/Core/Textures/TextureCacheCubemap.cs b/com.unity.render-pipelines.high-definition/Runtime/Core/Textures/TextureCacheCubemap.cs index bb2ca768c52..82c52aec49a 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Core/Textures/TextureCacheCubemap.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/Core/Textures/TextureCacheCubemap.cs @@ -189,7 +189,9 @@ public void Release() CoreUtils.Destroy(m_CubeBlitMaterial); } - m_Cache.Release(); + CoreUtils.Destroy(m_BlitCubemapFaceMaterial); + + CoreUtils.Destroy(m_Cache); } private bool TransferToPanoCache(CommandBuffer cmd, int sliceIndex, Texture[] textureArray) diff --git a/com.unity.render-pipelines.high-definition/Runtime/Lighting/LightLoop/LightLoop.cs b/com.unity.render-pipelines.high-definition/Runtime/Lighting/LightLoop/LightLoop.cs index 253b45ecd8a..a1b12726020 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Lighting/LightLoop/LightLoop.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/Lighting/LightLoop/LightLoop.cs @@ -2409,17 +2409,17 @@ int PreprocessVisibleProbes(HDCamera hdCamera, CullingResults cullResults, HDPro { var probe = cullResults.visibleReflectionProbes[probeIndex]; + if (probe.reflectionProbe == null + || probe.reflectionProbe.Equals(null) || !probe.reflectionProbe.isActiveAndEnabled + || !aovRequest.IsLightEnabled(probe.reflectionProbe.gameObject)) + continue; + ref ProcessedProbeData processedData = ref m_ProcessedReflectionProbeData[probeIndex]; PreprocessReflectionProbeData(ref processedData, probe, hdCamera); if (TrivialRejectProbe(processedData, hdCamera)) continue; - if (probe.reflectionProbe == null - || probe.reflectionProbe.Equals(null) || !probe.reflectionProbe.isActiveAndEnabled - || !aovRequest.IsLightEnabled(probe.reflectionProbe.gameObject)) - continue; - // Work around the data issues. if (probe.localToWorldMatrix.determinant == 0) { diff --git a/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/HDProbe.cs b/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/HDProbe.cs index 9704c75d253..d7acda444b7 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/HDProbe.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/HDProbe.cs @@ -248,7 +248,11 @@ public Texture SetTexture(ProbeSettings.Mode targetMode, Texture texture) /// public RenderData renderData => GetRenderData(mode); /// - /// Get the render data of a specific mode + /// Get the render data of a specific mode. + /// + /// Note: The HDProbe stores only one RenderData per mode, even for view dependent probes with multiple viewers. + /// In that case, make sure that you have set the RenderData relative to the expected viewer before rendering. + /// Otherwise the data retrieved by this function will be wrong. /// /// The mode to query /// The requested render data @@ -264,7 +268,10 @@ public RenderData GetRenderData(ProbeSettings.Mode targetMode) } } /// - /// Set the render data for a specific mode + /// Set the render data for a specific mode. + /// + /// Note: The HDProbe stores only one RenderData per mode, even for view dependent probes with multiple viewers. + /// In that case, make sure that you have set the RenderData relative to the expected viewer before rendering. /// /// The mode to update /// The data to set diff --git a/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/HDProbeSystem.cs b/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/HDProbeSystem.cs index 4dab12b248a..5a2bcc6dac5 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/HDProbeSystem.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/HDProbeSystem.cs @@ -246,7 +246,7 @@ internal HDProbeCullState PrepareCull(Camera camera) if (m_PlanarProbeCullingGroup == null) return default; - RemoveDestroyedProbes(m_PlanarProbes, m_PlanarProbeBounds, ref m_PlanarProbeCount); + UpdateBoundsAndRemoveDestroyedProbes(m_PlanarProbes, m_PlanarProbeBounds, ref m_PlanarProbeCount); m_PlanarProbeCullingGroup.targetCamera = camera; m_PlanarProbeCullingGroup.SetBoundingSpheres(m_PlanarProbeBounds); @@ -286,7 +286,7 @@ static void RemoveDestroyedProbes(List probes) } } - static void RemoveDestroyedProbes(PlanarReflectionProbe[] probes, BoundingSphere[] bounds, ref int count) + static void UpdateBoundsAndRemoveDestroyedProbes(PlanarReflectionProbe[] probes, BoundingSphere[] bounds, ref int count) { for (int i = 0; i < count; ++i) { @@ -297,6 +297,11 @@ static void RemoveDestroyedProbes(PlanarReflectionProbe[] probes, BoundingSphere probes[count - 1] = null; --count; } + + if (probes[i]) + { + bounds[i] = probes[i].boundingSphere; + } } } diff --git a/com.unity.render-pipelines.high-definition/Runtime/Lighting/Shadow/HDShadowManager.cs b/com.unity.render-pipelines.high-definition/Runtime/Lighting/Shadow/HDShadowManager.cs index 159ac9e7d04..345b8f95eda 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Lighting/Shadow/HDShadowManager.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/Lighting/Shadow/HDShadowManager.cs @@ -257,6 +257,8 @@ partial class HDShadowManager : IDisposable int m_CascadeCount; int m_ShadowResolutionRequestCounter; + Material m_ClearShadowMaterial; + private static HDShadowManager s_Instance = new HDShadowManager(); public static HDShadowManager instance { get { return s_Instance; } } @@ -268,7 +270,7 @@ private HDShadowManager() public void InitShadowManager(RenderPipelineResources renderPipelineResources, DepthBits directionalShadowDepthBits, HDShadowInitParameters.HDShadowAtlasInitParams punctualLightAtlasInfo, HDShadowInitParameters.HDShadowAtlasInitParams areaLightAtlasInfo, int maxShadowRequests, Shader clearShader) { - Material clearMaterial = CoreUtils.CreateEngineMaterial(clearShader); + m_ClearShadowMaterial = CoreUtils.CreateEngineMaterial(clearShader); // Prevent the list from resizing their internal container when we add shadow requests m_ShadowDatas.Capacity = Math.Max(maxShadowRequests, m_ShadowDatas.Capacity); @@ -282,13 +284,13 @@ public void InitShadowManager(RenderPipelineResources renderPipelineResources, D } // The cascade atlas will be allocated only if there is a directional light - m_Atlas = new HDShadowAtlas(renderPipelineResources, punctualLightAtlasInfo.shadowAtlasResolution, punctualLightAtlasInfo.shadowAtlasResolution, HDShaderIDs._ShadowmapAtlas, HDShaderIDs._ShadowAtlasSize, clearMaterial, maxShadowRequests, depthBufferBits: punctualLightAtlasInfo.shadowAtlasDepthBits, name: "Shadow Map Atlas"); + m_Atlas = new HDShadowAtlas(renderPipelineResources, punctualLightAtlasInfo.shadowAtlasResolution, punctualLightAtlasInfo.shadowAtlasResolution, HDShaderIDs._ShadowmapAtlas, HDShaderIDs._ShadowAtlasSize, m_ClearShadowMaterial, maxShadowRequests, depthBufferBits: punctualLightAtlasInfo.shadowAtlasDepthBits, name: "Shadow Map Atlas"); // Cascade atlas render texture will only be allocated if there is a shadow casting directional light HDShadowAtlas.BlurAlgorithm cascadeBlur = GetDirectionalShadowAlgorithm() == DirectionalShadowAlgorithm.IMS ? HDShadowAtlas.BlurAlgorithm.IM : HDShadowAtlas.BlurAlgorithm.None; - m_CascadeAtlas = new HDShadowAtlas(renderPipelineResources, 1, 1, HDShaderIDs._ShadowmapCascadeAtlas, HDShaderIDs._CascadeShadowAtlasSize, clearMaterial, maxShadowRequests, cascadeBlur, depthBufferBits: directionalShadowDepthBits, name: "Cascade Shadow Map Atlas"); + m_CascadeAtlas = new HDShadowAtlas(renderPipelineResources, 1, 1, HDShaderIDs._ShadowmapCascadeAtlas, HDShaderIDs._CascadeShadowAtlasSize, m_ClearShadowMaterial, maxShadowRequests, cascadeBlur, depthBufferBits: directionalShadowDepthBits, name: "Cascade Shadow Map Atlas"); if (ShaderConfig.s_AreaLights == 1) - m_AreaLightShadowAtlas = new HDShadowAtlas(renderPipelineResources, areaLightAtlasInfo.shadowAtlasResolution, areaLightAtlasInfo.shadowAtlasResolution, HDShaderIDs._AreaLightShadowmapAtlas, HDShaderIDs._AreaShadowAtlasSize, clearMaterial, maxShadowRequests, HDShadowAtlas.BlurAlgorithm.EVSM, depthBufferBits: areaLightAtlasInfo.shadowAtlasDepthBits, name: "Area Light Shadow Map Atlas", momentAtlasShaderID: HDShaderIDs._AreaShadowmapMomentAtlas); + m_AreaLightShadowAtlas = new HDShadowAtlas(renderPipelineResources, areaLightAtlasInfo.shadowAtlasResolution, areaLightAtlasInfo.shadowAtlasResolution, HDShaderIDs._AreaLightShadowmapAtlas, HDShaderIDs._AreaShadowAtlasSize, m_ClearShadowMaterial, maxShadowRequests, HDShadowAtlas.BlurAlgorithm.EVSM, depthBufferBits: areaLightAtlasInfo.shadowAtlasDepthBits, name: "Area Light Shadow Map Atlas", momentAtlasShaderID: HDShaderIDs._AreaShadowmapMomentAtlas); m_ShadowDataBuffer = new ComputeBuffer(maxShadowRequests, System.Runtime.InteropServices.Marshal.SizeOf(typeof(HDShadowData))); m_DirectionalShadowDataBuffer = new ComputeBuffer(1, System.Runtime.InteropServices.Marshal.SizeOf(typeof(HDDirectionalShadowData))); @@ -822,6 +824,8 @@ public void Dispose() if (ShaderConfig.s_AreaLights == 1) m_AreaLightShadowAtlas.Release(); m_CascadeAtlas.Release(); + + CoreUtils.Destroy(m_ClearShadowMaterial); } } } diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.cs b/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.cs index b0fc37dadc1..2d161eea64e 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.cs @@ -44,6 +44,13 @@ public enum FeatureFlags [GenerateHLSL(PackingRules.Exact, false, false, true, 1200)] public struct SurfaceData { + [MaterialSharedPropertyMapping(MaterialSharedProperty.AmbientOcclusion)] + [SurfaceDataAttributes("Ambient Occlusion")] + public float ambientOcclusion; + + [SurfaceDataAttributes("Specular Occlusion")] + public float specularOcclusion; + [MaterialSharedPropertyMapping(MaterialSharedProperty.Normal)] [SurfaceDataAttributes(new string[] {"Normal", "Normal View Space"}, true)] public Vector3 normalWS; @@ -64,7 +71,7 @@ public struct SurfaceData public Vector3 fresnelF0; [SurfaceDataAttributes("Specular Lobe")] - public Vector2 specularLobe; + public Vector3 specularLobe; // .xy for SVBRDF, .xyz for CARPAINT2, for _CarPaint2_CTSpreads per lobe roughnesses [SurfaceDataAttributes("Height")] public float height_mm; @@ -102,6 +109,9 @@ public struct SurfaceData [GenerateHLSL(PackingRules.Exact, false, false, true, 1250)] public struct BSDFData { + public float ambientOcclusion; + public float specularOcclusion; + [SurfaceDataAttributes(new string[] { "Normal WS", "Normal View Space" }, true)] public Vector3 normalWS; [SurfaceDataAttributes("", true)] @@ -113,7 +123,7 @@ public struct BSDFData public Vector3 diffuseColor; public Vector3 specularColor; public Vector3 fresnelF0; - public Vector2 roughness; + public Vector3 roughness; // .xy for SVBRDF, .xyz for CARPAINT2, for _CarPaint2_CTSpreads per lobe roughnesses public float height_mm; // Car Paint Variables diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.cs.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.cs.hlsl index 12b8d46ab0b..7362aefc55d 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.cs.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.cs.hlsl @@ -18,53 +18,59 @@ // // UnityEngine.Rendering.HighDefinition.AxF+SurfaceData: static fields // -#define DEBUGVIEW_AXF_SURFACEDATA_NORMAL (1200) -#define DEBUGVIEW_AXF_SURFACEDATA_NORMAL_VIEW_SPACE (1201) -#define DEBUGVIEW_AXF_SURFACEDATA_TANGENT (1202) -#define DEBUGVIEW_AXF_SURFACEDATA_DIFFUSE_COLOR (1203) -#define DEBUGVIEW_AXF_SURFACEDATA_SPECULAR_COLOR (1204) -#define DEBUGVIEW_AXF_SURFACEDATA_FRESNEL_F0 (1205) -#define DEBUGVIEW_AXF_SURFACEDATA_SPECULAR_LOBE (1206) -#define DEBUGVIEW_AXF_SURFACEDATA_HEIGHT (1207) -#define DEBUGVIEW_AXF_SURFACEDATA_ANISOTROPIC_ANGLE (1208) -#define DEBUGVIEW_AXF_SURFACEDATA_FLAKES_UV (1209) -#define DEBUGVIEW_AXF_SURFACEDATA_FLAKES_MIP (1210) -#define DEBUGVIEW_AXF_SURFACEDATA_CLEARCOAT_COLOR (1211) -#define DEBUGVIEW_AXF_SURFACEDATA_CLEARCOAT_NORMAL (1212) -#define DEBUGVIEW_AXF_SURFACEDATA_CLEARCOAT_IOR (1213) -#define DEBUGVIEW_AXF_SURFACEDATA_GEOMETRIC_NORMAL (1214) -#define DEBUGVIEW_AXF_SURFACEDATA_GEOMETRIC_NORMAL_VIEW_SPACE (1215) +#define DEBUGVIEW_AXF_SURFACEDATA_AMBIENT_OCCLUSION (1200) +#define DEBUGVIEW_AXF_SURFACEDATA_SPECULAR_OCCLUSION (1201) +#define DEBUGVIEW_AXF_SURFACEDATA_NORMAL (1202) +#define DEBUGVIEW_AXF_SURFACEDATA_NORMAL_VIEW_SPACE (1203) +#define DEBUGVIEW_AXF_SURFACEDATA_TANGENT (1204) +#define DEBUGVIEW_AXF_SURFACEDATA_DIFFUSE_COLOR (1205) +#define DEBUGVIEW_AXF_SURFACEDATA_SPECULAR_COLOR (1206) +#define DEBUGVIEW_AXF_SURFACEDATA_FRESNEL_F0 (1207) +#define DEBUGVIEW_AXF_SURFACEDATA_SPECULAR_LOBE (1208) +#define DEBUGVIEW_AXF_SURFACEDATA_HEIGHT (1209) +#define DEBUGVIEW_AXF_SURFACEDATA_ANISOTROPIC_ANGLE (1210) +#define DEBUGVIEW_AXF_SURFACEDATA_FLAKES_UV (1211) +#define DEBUGVIEW_AXF_SURFACEDATA_FLAKES_MIP (1212) +#define DEBUGVIEW_AXF_SURFACEDATA_CLEARCOAT_COLOR (1213) +#define DEBUGVIEW_AXF_SURFACEDATA_CLEARCOAT_NORMAL (1214) +#define DEBUGVIEW_AXF_SURFACEDATA_CLEARCOAT_IOR (1215) +#define DEBUGVIEW_AXF_SURFACEDATA_GEOMETRIC_NORMAL (1216) +#define DEBUGVIEW_AXF_SURFACEDATA_GEOMETRIC_NORMAL_VIEW_SPACE (1217) // // UnityEngine.Rendering.HighDefinition.AxF+BSDFData: static fields // -#define DEBUGVIEW_AXF_BSDFDATA_NORMAL_WS (1250) -#define DEBUGVIEW_AXF_BSDFDATA_NORMAL_VIEW_SPACE (1251) -#define DEBUGVIEW_AXF_BSDFDATA_TANGENT_WS (1252) -#define DEBUGVIEW_AXF_BSDFDATA_BI_TANGENT_WS (1253) -#define DEBUGVIEW_AXF_BSDFDATA_DIFFUSE_COLOR (1254) -#define DEBUGVIEW_AXF_BSDFDATA_SPECULAR_COLOR (1255) -#define DEBUGVIEW_AXF_BSDFDATA_FRESNEL_F0 (1256) -#define DEBUGVIEW_AXF_BSDFDATA_ROUGHNESS (1257) -#define DEBUGVIEW_AXF_BSDFDATA_HEIGHT_MM (1258) -#define DEBUGVIEW_AXF_BSDFDATA_FLAKES_UV (1259) -#define DEBUGVIEW_AXF_BSDFDATA_FLAKES_MIP (1260) -#define DEBUGVIEW_AXF_BSDFDATA_CLEARCOAT_COLOR (1261) -#define DEBUGVIEW_AXF_BSDFDATA_CLEARCOAT_NORMAL_WS (1262) -#define DEBUGVIEW_AXF_BSDFDATA_CLEARCOAT_IOR (1263) -#define DEBUGVIEW_AXF_BSDFDATA_GEOMETRIC_NORMAL (1264) -#define DEBUGVIEW_AXF_BSDFDATA_GEOMETRIC_NORMAL_VIEW_SPACE (1265) +#define DEBUGVIEW_AXF_BSDFDATA_AMBIENT_OCCLUSION (1250) +#define DEBUGVIEW_AXF_BSDFDATA_SPECULAR_OCCLUSION (1251) +#define DEBUGVIEW_AXF_BSDFDATA_NORMAL_WS (1252) +#define DEBUGVIEW_AXF_BSDFDATA_NORMAL_VIEW_SPACE (1253) +#define DEBUGVIEW_AXF_BSDFDATA_TANGENT_WS (1254) +#define DEBUGVIEW_AXF_BSDFDATA_BI_TANGENT_WS (1255) +#define DEBUGVIEW_AXF_BSDFDATA_DIFFUSE_COLOR (1256) +#define DEBUGVIEW_AXF_BSDFDATA_SPECULAR_COLOR (1257) +#define DEBUGVIEW_AXF_BSDFDATA_FRESNEL_F0 (1258) +#define DEBUGVIEW_AXF_BSDFDATA_ROUGHNESS (1259) +#define DEBUGVIEW_AXF_BSDFDATA_HEIGHT_MM (1260) +#define DEBUGVIEW_AXF_BSDFDATA_FLAKES_UV (1261) +#define DEBUGVIEW_AXF_BSDFDATA_FLAKES_MIP (1262) +#define DEBUGVIEW_AXF_BSDFDATA_CLEARCOAT_COLOR (1263) +#define DEBUGVIEW_AXF_BSDFDATA_CLEARCOAT_NORMAL_WS (1264) +#define DEBUGVIEW_AXF_BSDFDATA_CLEARCOAT_IOR (1265) +#define DEBUGVIEW_AXF_BSDFDATA_GEOMETRIC_NORMAL (1266) +#define DEBUGVIEW_AXF_BSDFDATA_GEOMETRIC_NORMAL_VIEW_SPACE (1267) // Generated from UnityEngine.Rendering.HighDefinition.AxF+SurfaceData // PackingRules = Exact struct SurfaceData { + float ambientOcclusion; + float specularOcclusion; float3 normalWS; float3 tangentWS; float3 diffuseColor; float3 specularColor; float3 fresnelF0; - float2 specularLobe; + float3 specularLobe; float height_mm; float anisotropyAngle; float2 flakesUV; @@ -79,13 +85,15 @@ struct SurfaceData // PackingRules = Exact struct BSDFData { + float ambientOcclusion; + float specularOcclusion; float3 normalWS; float3 tangentWS; float3 biTangentWS; float3 diffuseColor; float3 specularColor; float3 fresnelF0; - float2 roughness; + float3 roughness; float height_mm; float2 flakesUV; float flakesMipLevel; @@ -102,6 +110,12 @@ void GetGeneratedSurfaceDataDebug(uint paramId, SurfaceData surfacedata, inout f { switch (paramId) { + case DEBUGVIEW_AXF_SURFACEDATA_AMBIENT_OCCLUSION: + result = surfacedata.ambientOcclusion.xxx; + break; + case DEBUGVIEW_AXF_SURFACEDATA_SPECULAR_OCCLUSION: + result = surfacedata.specularOcclusion.xxx; + break; case DEBUGVIEW_AXF_SURFACEDATA_NORMAL: result = surfacedata.normalWS * 0.5 + 0.5; break; @@ -123,7 +137,7 @@ void GetGeneratedSurfaceDataDebug(uint paramId, SurfaceData surfacedata, inout f result = surfacedata.fresnelF0; break; case DEBUGVIEW_AXF_SURFACEDATA_SPECULAR_LOBE: - result = float3(surfacedata.specularLobe, 0.0); + result = surfacedata.specularLobe; break; case DEBUGVIEW_AXF_SURFACEDATA_HEIGHT: result = surfacedata.height_mm.xxx; @@ -162,6 +176,12 @@ void GetGeneratedBSDFDataDebug(uint paramId, BSDFData bsdfdata, inout float3 res { switch (paramId) { + case DEBUGVIEW_AXF_BSDFDATA_AMBIENT_OCCLUSION: + result = bsdfdata.ambientOcclusion.xxx; + break; + case DEBUGVIEW_AXF_BSDFDATA_SPECULAR_OCCLUSION: + result = bsdfdata.specularOcclusion.xxx; + break; case DEBUGVIEW_AXF_BSDFDATA_NORMAL_WS: result = bsdfdata.normalWS * 0.5 + 0.5; break; @@ -184,7 +204,7 @@ void GetGeneratedBSDFDataDebug(uint paramId, BSDFData bsdfdata, inout float3 res result = bsdfdata.fresnelF0; break; case DEBUGVIEW_AXF_BSDFDATA_ROUGHNESS: - result = float3(bsdfdata.roughness, 0.0); + result = bsdfdata.roughness; break; case DEBUGVIEW_AXF_BSDFDATA_HEIGHT_MM: result = bsdfdata.height_mm.xxx; diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.hlsl index 7709d6c8176..96dd992632d 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.hlsl @@ -375,6 +375,31 @@ float GetScalarRoughnessFromAnisoRoughness(float roughnessT, float roughnessB) return 0.5 * (roughnessT + roughnessB); } +float GetScalarRoughness(float3 roughness) +{ + float singleRoughness = 0.5; + +#if defined(_AXF_BRDF_TYPE_SVBRDF) + + singleRoughness = (HasAnisotropy()) ? GetScalarRoughnessFromAnisoRoughness(roughness.x, roughness.y) : roughness.x; + +#elif defined(_AXF_BRDF_TYPE_CAR_PAINT) + float sumCoeffXRoughness = 0.0; + float sumCoeff = 0.0; + UNITY_UNROLL + for (uint lobeIndex = 0; lobeIndex < CARPAINT2_LOBE_COUNT; lobeIndex++) // TODO remove all variable lobecnt code + { + float coeff = _CarPaint2_CTCoeffs[lobeIndex]; + float spread = roughness[lobeIndex]; + sumCoeff += coeff; + sumCoeffXRoughness += spread * coeff; + } + singleRoughness = min(1.0, SafeDiv(sumCoeffXRoughness,sumCoeff)); +#endif + + return singleRoughness; +} + NormalData ConvertSurfaceDataToNormalData(SurfaceData surfaceData) { NormalData normalData; @@ -389,31 +414,9 @@ NormalData ConvertSurfaceDataToNormalData(SurfaceData surfaceData) { normalData.normalWS = surfaceData.normalWS; -#if defined(_AXF_BRDF_TYPE_SVBRDF) - float roughness = (HasAnisotropy()) ? GetScalarRoughnessFromAnisoRoughness(surfaceData.specularLobe.x, surfaceData.specularLobe.y) : surfaceData.specularLobe.x; - normalData.perceptualRoughness = RoughnessToPerceptualRoughness(roughness); - -#elif defined(_AXF_BRDF_TYPE_CAR_PAINT) // Hack: try to get a "single equivalent" roughness - normalData.perceptualRoughness = 0.0; - - float sumCoeffXRoughness = 0.0; - float sumCoeff = 0.0; - - UNITY_UNROLL - for (uint lobeIndex = 0; lobeIndex < CARPAINT2_LOBE_COUNT; lobeIndex++) - { - float coeff = _CarPaint2_CTCoeffs[lobeIndex]; - float spread = _CarPaint2_CTSpreads[lobeIndex]; - - sumCoeff += coeff; - sumCoeffXRoughness += spread * coeff; - } - normalData.perceptualRoughness = RoughnessToPerceptualRoughness(min(1.0, SafeDiv(sumCoeffXRoughness,sumCoeff))); -#else - // This is only possible if the AxF is a BTF type. However, there is a bunch of ifdefs do not support this third case - normalData.perceptualRoughness = 0.0; -#endif + float roughness = GetScalarRoughness(surfaceData.specularLobe); + normalData.perceptualRoughness = RoughnessToPerceptualRoughness(roughness); } return normalData; @@ -487,6 +490,7 @@ float3 RefractSaturateToTIR(float3 incoming, float3 normal, float eta, out floa float sinThetaCrit = saturate(rcp(eta)); float cosThetaCrit = sqrt(1 - Sq(sinThetaCrit)); float3 incOrthoN = (incoming - c * normal) * /*normalize the ortho component:*/rcp(sqrt(sinIncSq)); + // Note: sqrt(sinIncSq) shouldn't be close to 0, since b < 0 <=> (sinIncSq) > 1/Sq(eta) and eta shouldn't be close to 1/sqrt(eps)! criticalDir = sinThetaCrit * incOrthoN + cosThetaCrit * normal; @@ -543,7 +547,7 @@ float CT_F(float H_V, float F0) return F0 + (1.0 - F0) * f_1_sub_cos_fifth; } -float MultiLobesCookTorrance(float NdotL, float NdotV, float NdotH, float VdotH) +float MultiLobesCookTorrance(BSDFData bsdfData, float NdotL, float NdotV, float NdotH, float VdotH) { // Ensure numerical stability if (NdotV < 0.00174532836589830883577820272085 || NdotL < 0.00174532836589830883577820272085) //sin(0.1 deg ) @@ -554,7 +558,7 @@ float MultiLobesCookTorrance(float NdotL, float NdotV, float NdotH, float VdotH { float F0 = _CarPaint2_CTF0s[lobeIndex]; float coeff = _CarPaint2_CTCoeffs[lobeIndex]; - float spread = _CarPaint2_CTSpreads[lobeIndex]; + float spread = bsdfData.roughness[lobeIndex]; // _CarPaint2_CTSpreads[lobeIndex]; specularIntensity += coeff * CT_D(NdotH, spread) * CT_F(VdotH, F0); } @@ -684,10 +688,14 @@ BSDFData ConvertSurfaceDataToBSDFData(uint2 positionSS, SurfaceData surfaceData) BSDFData bsdfData; // ZERO_INITIALIZE(BSDFData, data); + bsdfData.ambientOcclusion = surfaceData.ambientOcclusion; + bsdfData.specularOcclusion = surfaceData.specularOcclusion; + bsdfData.normalWS = surfaceData.normalWS; bsdfData.tangentWS = surfaceData.tangentWS; bsdfData.biTangentWS = cross(bsdfData.normalWS, bsdfData.tangentWS); + bsdfData.roughness = 0; //----------------------------------------------------------------------------- #ifdef _AXF_BRDF_TYPE_SVBRDF bsdfData.diffuseColor = surfaceData.diffuseColor; @@ -696,7 +704,7 @@ BSDFData ConvertSurfaceDataToBSDFData(uint2 positionSS, SurfaceData surfaceData) bsdfData.fresnelF0 = surfaceData.fresnelF0; // See AxfData.hlsl: the actual sampled texture is always 1 channel, if we ever find otherwise, we will use the others. bsdfData.height_mm = surfaceData.height_mm; - bsdfData.roughness = HasAnisotropy() ? surfaceData.specularLobe : surfaceData.specularLobe.xx; + bsdfData.roughness.xy = HasAnisotropy() ? surfaceData.specularLobe.xy : surfaceData.specularLobe.xx; bsdfData.clearcoatColor = surfaceData.clearcoatColor; bsdfData.clearcoatNormalWS = HasClearcoat() ? surfaceData.clearcoatNormalWS : surfaceData.normalWS; @@ -717,7 +725,7 @@ BSDFData ConvertSurfaceDataToBSDFData(uint2 positionSS, SurfaceData surfaceData) bsdfData.specularColor = GetCarPaintSpecularColor(); bsdfData.fresnelF0 = GetCarPaintFresnelF0(); - bsdfData.roughness = 0; + bsdfData.roughness.xyz = surfaceData.specularLobe.xyz; // the later stores per lobe possibly modified (for geometric specular AA) _CarPaint2_CTSpreads bsdfData.height_mm = 0; #endif @@ -744,8 +752,8 @@ struct PreLightData float3 viewWS_UnderCoat; // View vector after optional clear-coat refraction. // IBL - float3 iblDominantDirectionWS_UnderCoat; // Dominant specular direction, used for IBL in EvaluateBSDF_Env() - float3 iblDominantDirectionWS_Clearcoat; // Dominant specular direction, used for IBL in EvaluateBSDF_Env() and also in area lights when clearcoat is enabled + float3 iblDominantDirectionWS_BottomLobeOnTop; // Dominant specular direction, for bottom lobe but as it exit on top, used for IBL in EvaluateBSDF_Env() + float3 iblDominantDirectionWS_Clearcoat; // Dominant specular direction, used for IBL in EvaluateBSDF_Env() and also in area lights when clearcoat is enabled #ifdef _AXF_BRDF_TYPE_SVBRDF float iblPerceptualRoughness; float3 specularFGD; @@ -983,7 +991,7 @@ float3 CarPaint_BTF(float thetaH, float thetaD, BSDFData bsdfData) } #endif //...#if defined(_AXF_BRDF_TYPE_CAR_PAINT) -float3 FindAverageBaseLobeDirOnTop(BSDFData bsdfData, PreLightData preLightData) +float3 FindAverageBaseLobeDirOnTop(BSDFData bsdfData, PreLightData preLightData, out float3 lobeDirUndercoat) { float3 outDir; @@ -1010,7 +1018,8 @@ float3 FindAverageBaseLobeDirOnTop(BSDFData bsdfData, PreLightData preLightData) float3 incomingSaturated; float rayIntensity; outDir = RefractSaturateToTIR(-vRefractedBottomReflected, -bsdfData.clearcoatNormalWS, bsdfData.clearcoatIOR, rayIntensity, incomingSaturated); -#endif + lobeDirUndercoat = -incomingSaturated; // incoming is away from the top interface from under the surface so *-1 to reverse quadrant. +#endif return outDir; } @@ -1045,13 +1054,19 @@ PreLightData GetPreLightData(float3 viewWS_Clearcoat, PositionInputs posInput // Handle IBL + multiscattering // todo_dir: // todo_dir todo_modes todo_pseudorefract: cant use undercoat like that, but better than to lose the bottom normal effect for now... - preLightData.iblDominantDirectionWS_UnderCoat = reflect(-preLightData.viewWS_UnderCoat, bsdfData.normalWS); + float3 reflectedLobeDirUndercoat = reflect(-preLightData.viewWS_UnderCoat, bsdfData.normalWS); + preLightData.iblDominantDirectionWS_BottomLobeOnTop = reflectedLobeDirUndercoat; if (HasClearcoatAndRefraction()) { - preLightData.iblDominantDirectionWS_UnderCoat = FindAverageBaseLobeDirOnTop(bsdfData, preLightData); // much better + preLightData.iblDominantDirectionWS_BottomLobeOnTop = FindAverageBaseLobeDirOnTop(bsdfData, preLightData, reflectedLobeDirUndercoat); // much better + // reflectedLobeDirUndercoat is now adjusted to correspond to the refracted-back on top direction returned by FindAverageBaseLobeDirOnTop() + + //sanity check: If both normals are equal, then this shouldn't change the output: + //preLightData.iblDominantDirectionWS_BottomLobeOnTop = reflect(-viewWS_Clearcoat, bsdfData.clearcoatNormalWS); + //reflectedLobeDirUndercoat = reflect(-preLightData.viewWS_UnderCoat, bsdfData.normalWS); } preLightData.iblDominantDirectionWS_Clearcoat = reflect(-viewWS_Clearcoat, bsdfData.clearcoatNormalWS); - //preLightData.iblDominantDirectionWS_UnderCoat = preLightData.iblDominantDirectionWS_Clearcoat; + //preLightData.iblDominantDirectionWS_BottomLobeOnTop = preLightData.iblDominantDirectionWS_Clearcoat; #ifdef _AXF_BRDF_TYPE_SVBRDF // @TODO => Anisotropic IBL? @@ -1103,11 +1118,32 @@ PreLightData GetPreLightData(float3 viewWS_Clearcoat, PositionInputs posInput preLightData.specularCTFGDReflectivity = 0; preLightData.ltcTransformSpecularCT = (float3x3[MAX_CT_LOBE_COUNT])0; - // TODO_diffuseFGDColor: better one, averaged maybe... + // TODO_diffuseFGDColor: better one, averaged maybe: ie depending on roughness also preLightData.singleBRDFColor = 1.0; float thetaH = 0; //acos(clamp(NdotH, 0, 1)); float thetaD = acos(clamp(preLightData.NdotV_UnderCoat, 0, 1)); - + // The above is the same as + //float3 lightDir = reflect(-preLightData.viewWS_UnderCoat, bsdfData.normalWS); + //float3 H = normalize(preLightData.viewWS_UnderCoat + lightDir); + //float NdotH = dot(bsdfData.normalWS, H); + //float LdotH = dot(H, lightDir); + //thetaH = acos(clamp(NdotH, 0, 1)); + //thetaD = acos(clamp(LdotH, 0, 1)); + + // Also, could use reflectedLobeDirUndercoat here (and see TODO_diffuseFGDColor: if we make it depends on roughness, one per lobe) + // This is relevant only if both normals aren't the same obviously. + // In the case of CARPAINT, this means a clearcoat normal map. + // (ie orange peel) + if (false) + { + float3 H = normalize(preLightData.viewWS_UnderCoat + reflectedLobeDirUndercoat); + float NdotH = dot(bsdfData.normalWS, H); + + float LdotH = dot(H, reflectedLobeDirUndercoat); + thetaH = acos(clamp(NdotH, 0, 1)); + thetaD = acos(clamp(LdotH, 0, 1)); + } + preLightData.singleBRDFColor *= GetBRDFColor(thetaH, thetaD); preLightData.singleFlakesComponent = CarPaint_BTF(thetaH, thetaD, bsdfData); @@ -1116,7 +1152,7 @@ PreLightData GetPreLightData(float3 viewWS_Clearcoat, PositionInputs posInput { float F0 = _CarPaint2_CTF0s[lobeIndex]; float coeff = _CarPaint2_CTCoeffs[lobeIndex]; - float spread = _CarPaint2_CTSpreads[lobeIndex]; + float spread = bsdfData.roughness[lobeIndex]; // _CarPaint2_CTSpreads[lobeIndex]; #if !USE_COOK_TORRANCE_MULTI_LOBES // Computes weighted average of roughness values sumCoeff += coeff; @@ -1161,8 +1197,9 @@ PreLightData GetPreLightData(float3 viewWS_Clearcoat, PositionInputs posInput float oneOverLobeCnt = rcp(CARPAINT2_LOBE_COUNT); preLightData.iblPerceptualRoughness = RoughnessToPerceptualRoughness(sumRoughness * oneOverLobeCnt); tempF0 = sumF0 * oneOverLobeCnt; - // todo_BeckmannToGGX + // todo_BeckmannToGGX GetPreIntegratedFGDCookTorranceAndLambert(NdotV_UnderCoat, preLightData.iblPerceptualRoughness, tempF0 * preLightData.singleBRDFColor, specularFGD, diffuseFGD, reflectivity); + preLightData.iblPerceptualRoughness = PerceptualRoughnessBeckmannToGGX(preLightData.iblPerceptualRoughness); specularFGD *= GetPreIntegratedFGDCookTorranceSampleMutiplier(); preLightData.specularCTFGDSingleLobe = specularFGD * sumCoeff; #endif @@ -1419,7 +1456,7 @@ float3 ComputeWard(float3 H, float LdotH, float NdotL, float NdotV, PreLightData float F = 1.0; switch (_SVBRDF_BRDFVariants & 3) { - case 1: F_FresnelDieletricSafe(Fresnel0ToIorSafe(bsdfData.fresnelF0.r), LdotH); break; + case 1: F = F_FresnelDieletricSafe(Fresnel0ToIorSafe(bsdfData.fresnelF0.r), LdotH); break; case 2: F = F_Schlick(bsdfData.fresnelF0.r, LdotH); break; } @@ -1427,8 +1464,8 @@ float3 ComputeWard(float3 H, float LdotH, float NdotL, float NdotV, PreLightData float3 tsH = float3(dot(H, bsdfData.tangentWS), dot(H, bsdfData.biTangentWS), dot(H, bsdfData.normalWS)); //float2 rotH = tsH.xy / tsH.z; float2 rotH = tsH.xy / max(0.00001, tsH.z); - //float2 roughness = bsdfData.roughness; - float2 roughness = max(0.0001, bsdfData.roughness); + //float2 roughness = bsdfData.roughness.xy; + float2 roughness = max(0.0001, bsdfData.roughness.xy); //if (bsdfData.roughness.y == 0.0) bsdfData.specularColor = float3(1,0,0); if (roughness.x * roughness.y <= 0.0001 && tsH.z < 1.0) @@ -1452,7 +1489,7 @@ float3 ComputeWard(float3 H, float LdotH, float NdotL, float NdotV, PreLightData float3 ComputeBlinnPhong(float3 H, float LdotH, float NdotL, float NdotV, PreLightData preLightData, BSDFData bsdfData) { - float2 exponents = exp2(bsdfData.roughness); + float2 exponents = exp2(bsdfData.roughness.xy); // Evaluate normal distribution function float3 tsH = float3(dot(H, bsdfData.tangentWS), dot(H, bsdfData.biTangentWS), dot(H, bsdfData.normalWS)); @@ -1623,6 +1660,7 @@ CBSDF EvaluateBSDF(float3 viewWS_Clearcoat, float3 lightWS_Clearcoat, PreLightDa float3 GetCarPaintSpecularFGDForLobe(PreLightData preLightData, uint lobeIndex) { return lerp(preLightData.specularCTFGDAtZeroF0[lobeIndex], preLightData.specularCTFGDReflectivity[lobeIndex], _CarPaint2_CTF0s[lobeIndex]*preLightData.singleBRDFColor); + //return lerp(preLightData.specularCTFGDAtZeroF0[lobeIndex], preLightData.specularCTFGDReflectivity[lobeIndex], _CarPaint2_CTF0s[lobeIndex])*preLightData.singleBRDFColor; } @@ -1689,7 +1727,7 @@ CBSDF EvaluateBSDF(float3 viewWS_Clearcoat, float3 lightWS_Clearcoat, PreLightDa float3 diffuseTerm = Lambert(); // Apply multi-lobes Cook-Torrance - float3 specularTerm = MultiLobesCookTorrance(NdotL, NdotV, NdotH, VdotH); + float3 specularTerm = MultiLobesCookTorrance(bsdfData, NdotL, NdotV, NdotH, VdotH); // Apply BRDF color float3 BRDFColor = GetBRDFColor(thetaH, thetaD); @@ -1963,7 +2001,7 @@ DirectLighting EvaluateBSDF_Line( LightLoopContext lightLoopContext, // We project the point onto the area light's plane using the reflected view direction and recompute the light direction from this position // todo_dir: #if 0 - float3 bestLightWS_Specular = ComputeBestLightDirection_Line(lightPositionRWS, preLightData.iblDominantDirectionWS_UnderCoat, lightData); + float3 bestLightWS_Specular = ComputeBestLightDirection_Line(lightPositionRWS, preLightData.iblDominantDirectionWS_BottomLobeOnTop, lightData); // todo_dir todo_pseudorefract // refract light dir here for GetBRDFColor since it is a fresnel-like effect, but @@ -2182,7 +2220,7 @@ DirectLighting EvaluateBSDF_Rect(LightLoopContext lightLoopContext, // We project the point onto the area light's plane using the reflected view direction and recompute the light direction from this position // TODO_dir: #if 0 - float3 bestLightWS_Specular = ComputeBestLightDirection_Rectangle(lightPositionRWS, preLightData.iblDominantDirectionWS_UnderCoat, lightData); + float3 bestLightWS_Specular = ComputeBestLightDirection_Rectangle(lightPositionRWS, preLightData.iblDominantDirectionWS_BottomLobeOnTop, lightData); // TODO_dir: refract light dir for GetBRDFColor here since it is a fresnel-like effect, but // compute LTC / env fetching using *non refracted dir* @@ -2402,24 +2440,43 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext, float weight = 1.0; // TODO_dir: this shouldn't be undercoat. - float3 environmentSamplingDirectionWS_UnderCoat = preLightData.iblDominantDirectionWS_UnderCoat; + float3 envSamplingDirForBottomLayer = preLightData.iblDominantDirectionWS_BottomLobeOnTop; #if defined(_AXF_BRDF_TYPE_SVBRDF) float3 envLighting = 0.0; float NdotV = ClampNdotV(preLightData.NdotV_UnderCoat); - - environmentSamplingDirectionWS_UnderCoat = GetModifiedEnvSamplingDir(lightData, bsdfData.normalWS, preLightData.iblDominantDirectionWS_UnderCoat, preLightData.iblPerceptualRoughness, NdotV); - - // Note: using _influenceShapeType and projectionShapeType instead of (lightData|proxyData).shapeType allow to make compiler optimization in case the type is know (like for sky) - EvaluateLight_EnvIntersection(positionWS, bsdfData.normalWS, lightData, _influenceShapeType, environmentSamplingDirectionWS_UnderCoat, weight); + // Here we use bsdfData.clearcoatNormalWS: if there's no coat, bsdfData.clearcoatNormalWS == bsdfData.normalWS anyway. + // The reason is that, normally, since GetModifiedEnvSamplingDir (off-specular effect) is roughness dependent, + // we would have to store another direction (lightData is only used to escape the modification in case of planar probe) + // and in case of carpaint, one for each lobe. However, if we would like to "correctly" take into account the effect, we would have + // to calculate the effect on the bottom layer where directions are different, and then use FindAverageBaseLobeDirOnTop(). + // We decide to just apply the effect on top instead. + // (FindAverageBaseLobeDirOnTop is alreayd an approximation ignoring under-horizon or TIR. If we saturated to the critical angle undercoat + // and thus grazing when exiting on top, a tilt back for off-specular effect might in fact have no effect since the lobe could still + // be under horizon. On the other hand, if we didn't have to saturate, a little tilt-back toward normal (from GetModifiedEnvSamplingDir) + // should have translated into a bigger one on top because of angle range decompression.) + envSamplingDirForBottomLayer = GetModifiedEnvSamplingDir(lightData, bsdfData.clearcoatNormalWS, preLightData.iblDominantDirectionWS_BottomLobeOnTop, preLightData.iblPerceptualRoughness, NdotV); + + // Note: using _influenceShapeType and projectionShapeType instead of (lightData|proxyData).shapeType allow to make compiler optimization in case the type is know (like for sky) + EvaluateLight_EnvIntersection(positionWS, bsdfData.clearcoatNormalWS, lightData, _influenceShapeType, envSamplingDirForBottomLayer, weight); + // ...here the normal is only used for normal fading mode of the influence volume. + + // Another problem with having even two fetch directions is the reflection hierarchy that only supports one weight. + // (TODO: We could have a vector tracking multiplied weights already applied per lobe that we update and that is + // passed back by the light loop but otherwise opaque to it, with the single hierarchyWeight tracked alongside. + // That way no "overlighting" would be done and by returning the hierarchyWeight = min(all weights) up to now, + // we could potentially avoid artifacts in having eg the clearcoat reflection not available from one influence volume + // while the base has full weight reflection. This ends up always preventing a blend for the coat reflection when the + // bottom reflection is full. Lit doesn't have this problem too much in practice since only GetModifiedEnvSamplingDir + // changes the direction vs the coat.) float IBLMipLevel; IBLMipLevel = GetEnvMipLevel(lightData, preLightData.iblPerceptualRoughness); // Sample the pre-integrated environment lighting - float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, environmentSamplingDirectionWS_UnderCoat, IBLMipLevel, lightData.rangeCompressionFactorCompensation); + float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, envSamplingDirForBottomLayer, IBLMipLevel, lightData.rangeCompressionFactorCompensation); weight *= preLD.w; // Used by planar reflection to discard pixel envLighting = GetSpecularIndirectDimmer() * preLightData.specularFGD * preLD.xyz; @@ -2429,39 +2486,37 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext, float3 envLighting = 0.0; - float NdotV = ClampNdotV(preLightData.NdotV_UnderCoat); - // A part of this BRDF depends on thetaH and thetaD and should thus have entered // the split sum pre-integration. We do a further approximation by pulling those // terms out and evaluating them in the specular dominant direction, - // for BRDFColor and flakes. - float3 viewWS_UnderCoat = preLightData.viewWS_UnderCoat; - float3 lightWS_UnderCoat = environmentSamplingDirectionWS_UnderCoat; + // for BRDFColor and flakes, see GetPreLightData. - float3 H = normalize(viewWS_UnderCoat + lightWS_UnderCoat); - float NdotH = dot(bsdfData.normalWS, H); - float VdotH = dot(viewWS_UnderCoat, H); - - // TODO_dir: so this is just thetaH = 0, etc. CHECK and remove. - float thetaH = acos(clamp(NdotH, 0, 1)); - float thetaD = acos(clamp(VdotH, 0, 1)); + // Note: we don't use GetModifiedEnvSamplingDir() per lobe here, and see comment above about reflection hierarchy. + EvaluateLight_EnvIntersection(positionWS, bsdfData.clearcoatNormalWS, lightData, _influenceShapeType, envSamplingDirForBottomLayer, weight); #if USE_COOK_TORRANCE_MULTI_LOBES // Multi-lobes approach // Each CT lobe samples the environment with the appropriate roughness - float sumWeights = 0.0; + float probeSkipFactor = 1; for (uint lobeIndex = 0; lobeIndex < CARPAINT2_LOBE_COUNT; lobeIndex++) { float coeff = _CarPaint2_CTCoeffs[lobeIndex]; float lobeMipLevel = PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness[lobeIndex]); - float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, lightWS_UnderCoat, lobeMipLevel, lightData.rangeCompressionFactorCompensation); + float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, envSamplingDirForBottomLayer, lobeMipLevel, lightData.rangeCompressionFactorCompensation); //todotodo: try removing coeff envLighting += coeff * GetCarPaintSpecularFGDForLobe(preLightData, lobeIndex) * preLD.xyz; - sumWeights += preLD.w; + // Note: preLD.w is only used by planar probes, returning 0 if outside captured direction or 1 otherwise (the influence volume weight fades, not this). + // Since this is only used for planar probes, even if we had used GetModifiedEnvSamplingDir() above, all directions would be the same in that case anyway + // since GetModifiedEnvSamplingDir() doesn't do anything for planar probes. + // For that reason, only one preLD.w needs to be used, no need to average them, they should all be the same. + // sumWeights += preLD.w; + probeSkipFactor = preLD.w; } + // See discussion about reflection hierarchy above for SVBRDF, same thing here: When we will evaluate the coat, we will ignore its weight. + weight *= probeSkipFactor; envLighting *= GetSpecularIndirectDimmer(); //now already in rebuilt specularFGD: envLighting *= GetBRDFColor(thetaH, thetaD); @@ -2469,8 +2524,7 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext, //TODO_FLAKES float flakesMipLevel = 0; // Flakes are supposed to be perfect mirrors //envLighting += preLightData.flakesFGD * CarPaint_BTF(thetaH, thetaD, bsdfData) * SampleEnv(lightLoopContext, lightData.envIndex, lightWS_UnderCoat, flakesMipLevel, lightData.rangeCompressionFactorCompensation).xyz; - envLighting += preLightData.singleFlakesComponent * SampleEnv(lightLoopContext, lightData.envIndex, lightWS_UnderCoat, flakesMipLevel, lightData.rangeCompressionFactorCompensation).xyz; - weight *= sumWeights / CARPAINT2_LOBE_COUNT; + envLighting += preLightData.singleFlakesComponent * SampleEnv(lightLoopContext, lightData.envIndex, envSamplingDirForBottomLayer, flakesMipLevel, lightData.rangeCompressionFactorCompensation).xyz; #else // USE_COOK_TORRANCE_MULTI_LOBES @@ -2480,10 +2534,10 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext, IBLMipLevel = GetEnvMipLevel(lightData, preLightData.iblPerceptualRoughness); // Sample the actual environment lighting - float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, lightWS_UnderCoat, IBLMipLevel, lightData.rangeCompressionFactorCompensation); + float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, envSamplingDirForBottomLayer, IBLMipLevel, lightData.rangeCompressionFactorCompensation); float3 envLighting; - envLighting = preLightData.specularCTFGDSingleLobe * GetSpecularIndirectDimmer() * GetBRDFColor(thetaH, thetaD); + envLighting = preLightData.specularCTFGDSingleLobe * GetSpecularIndirectDimmer(); //TODO_FLAKES //envLighting += preLightData.flakesFGD * CarPaint_BTF(thetaH, thetaD, bsdfData); envLighting += preLightData.singleFlakesComponent; @@ -2538,9 +2592,11 @@ void PostEvaluateBSDF( LightLoopContext lightLoopContext, { // There is no AmbientOcclusion from data with AxF, but let's apply our SSAO AmbientOcclusionFactor aoFactor; - GetScreenSpaceAmbientOcclusionMultibounce( posInput.positionSS, preLightData.NdotV_UnderCoat, - RoughnessToPerceptualRoughness(GetScalarRoughnessFromAnisoRoughness(bsdfData.roughness.x, bsdfData.roughness.y)), - 1.0, 1.0, GetColorBaseDiffuse(bsdfData), GetColorBaseFresnelF0(bsdfData), aoFactor); + GetScreenSpaceAmbientOcclusionMultibounce(posInput.positionSS, preLightData.NdotV_UnderCoat, + RoughnessToPerceptualRoughness(GetScalarRoughness(bsdfData.roughness)), + bsdfData.ambientOcclusion, bsdfData.specularOcclusion, + GetColorBaseDiffuse(bsdfData), GetColorBaseFresnelF0(bsdfData), aoFactor); + ApplyAmbientOcclusionFactor(aoFactor, builtinData, lighting); diffuseLighting = bsdfData.diffuseColor * lighting.direct.diffuse + builtinData.bakeDiffuseLighting; diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.shader b/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.shader index 802760a38b9..c125cc62582 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.shader +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.shader @@ -63,6 +63,10 @@ Shader "HDRP/AxF" _CarPaint2_CTCoeffs("_CarPaint2_CTCoeffs", Vector) = (1,1,1,1) _CarPaint2_CTSpreads("_CarPaint2_CTSpreads", Vector) = (1,1,1,1) + // GUI inspector only - saves state in material meta, read back from SetupMaterialKeywordsAndPass + //[Enum(Off, 0, From Ambient Occlusion, 1, From Bent Normals, 2)] _SpecularOcclusionMode("Specular Occlusion Mode", Int) = 1 + [Enum(Off, 0, From Ambient Occlusion, 1)] _SpecularOcclusionMode("Specular Occlusion Mode", Int) = 1 + [ToggleUI] _UseShadowThreshold("_UseShadowThreshold", Float) = 0.0 [ToggleUI] _AlphaCutoffEnable("Alpha Cutoff Enable", Float) = 0.0 _AlphaCutoff("Alpha Cutoff", Range(0.0, 1.0)) = 0.5 @@ -101,6 +105,10 @@ Shader "HDRP/AxF" [Enum(Flip, 0, Mirror, 1, None, 2)] _DoubleSidedNormalMode("Double sided normal mode", Float) = 1 // This is for the editor only, see BaseLitUI.cs: _DoubleSidedConstants will be set based on the mode. [HideInInspector] _DoubleSidedConstants("_DoubleSidedConstants", Vector) = (1, 1, -1, 0) + [ToggleUI] _EnableGeometricSpecularAA("EnableGeometricSpecularAA", Float) = 0.0 + _SpecularAAScreenSpaceVariance("SpecularAAScreenSpaceVariance", Range(0.0, 1.0)) = 0.1 + _SpecularAAThreshold("SpecularAAThreshold", Range(0.0, 1.0)) = 0.2 + // Caution: C# code in BaseLitUI.cs call LightmapEmissionFlagsProperty() which assume that there is an existing "_EmissionColor" // value that exist to identify if the GI emission need to be enabled. // In our case we don't use such a mechanism but need to keep the code quiet. We declare the value and always enable it. @@ -129,11 +137,14 @@ Shader "HDRP/AxF" //------------------------------------------------------------------------------------- #pragma shader_feature_local _AXF_BRDF_TYPE_SVBRDF _AXF_BRDF_TYPE_CAR_PAINT _AXF_BRDF_TYPE_BTF + #pragma shader_feature_local _ _SPECULAR_OCCLUSION_NONE //_SPECULAR_OCCLUSION_FROM_BENT_NORMAL_MAP + #pragma shader_feature_local _ALPHATEST_ON #pragma shader_feature_local _DOUBLESIDED_ON #pragma shader_feature_local _DISABLE_DECALS #pragma shader_feature_local _DISABLE_SSR + #pragma shader_feature_local _ENABLE_GEOMETRIC_SPECULAR_AA #pragma shader_feature_local _ADD_PRECOMPUTED_VELOCITY diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxFData.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxFData.hlsl index 2a9e98df610..8e919f6ddfe 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxFData.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxFData.hlsl @@ -38,6 +38,9 @@ void ApplyDecalToSurfaceData(DecalSurfaceData decalSurfaceData, inout SurfaceDat surfaceData.specularLobe.x = PerceptualSmoothnessToRoughness(RoughnessToPerceptualSmoothness(surfaceData.specularLobe.x) * decalSurfaceData.mask.w + decalSurfaceData.mask.z); surfaceData.specularLobe.y = PerceptualSmoothnessToRoughness(RoughnessToPerceptualSmoothness(surfaceData.specularLobe.y) * decalSurfaceData.mask.w + decalSurfaceData.mask.z); +#ifdef _AXF_BRDF_TYPE_CAR_PAINT + surfaceData.specularLobe.z = PerceptualSmoothnessToRoughness(RoughnessToPerceptualSmoothness(surfaceData.specularLobe.z) * decalSurfaceData.mask.w + decalSurfaceData.mask.z); +#endif } #endif } @@ -60,11 +63,15 @@ void GetSurfaceAndBuiltinData(FragInputs input, float3 V, inout PositionInputs p float alpha = 1.0; + surfaceData.ambientOcclusion = 1.0; + surfaceData.specularOcclusion = 1.0; + surfaceData.specularLobe = 0; + #ifdef _AXF_BRDF_TYPE_SVBRDF surfaceData.diffuseColor = SAMPLE_TEXTURE2D(_SVBRDF_DiffuseColorMap, sampler_SVBRDF_DiffuseColorMap, UV0).xyz; surfaceData.specularColor = SAMPLE_TEXTURE2D(_SVBRDF_SpecularColorMap, sampler_SVBRDF_SpecularColorMap, UV0).xyz; - surfaceData.specularLobe = _SVBRDF_SpecularLobeMapScale * SAMPLE_TEXTURE2D(_SVBRDF_SpecularLobeMap, sampler_SVBRDF_SpecularLobeMap, UV0).xy; + surfaceData.specularLobe.xy = _SVBRDF_SpecularLobeMapScale * SAMPLE_TEXTURE2D(_SVBRDF_SpecularLobeMap, sampler_SVBRDF_SpecularLobeMap, UV0).xy; // The AxF models include both a general coloring term that they call "specular color" while the f0 is actually another term, // seemingly always scalar: @@ -98,6 +105,8 @@ void GetSurfaceAndBuiltinData(FragInputs input, float3 V, inout PositionInputs p surfaceData.diffuseColor = _CarPaint2_CTDiffuse; surfaceData.clearcoatIOR = max(1.001, _CarPaint2_ClearcoatIOR); // Can't be exactly 1 otherwise the precise fresnel divides by 0! + surfaceData.specularLobe = _CarPaint2_CTSpreads.xyz; // We may want to modify these (eg for Specular AA) + surfaceData.normalWS = input.tangentToWorld[2].xyz; GetNormalWS(input, 2.0 * SAMPLE_TEXTURE2D(_ClearcoatNormalMap, sampler_ClearcoatNormalMap, UV0).xyz - 1.0, surfaceData.clearcoatNormalWS, doubleSidedConstants); @@ -116,13 +125,29 @@ void GetSurfaceAndBuiltinData(FragInputs input, float3 V, inout PositionInputs p // Useless for car paint BSDF surfaceData.specularColor = 0; - surfaceData.specularLobe = 0; surfaceData.fresnelF0 = 0; surfaceData.height_mm = 0; surfaceData.anisotropyAngle = 0; surfaceData.clearcoatColor = 0; #endif + // TODO + // Assume same xyz encoding for AxF bent normal as other normal maps. + //float3 bentNormalWS; + //GetNormalWS(input, 2.0 * SAMPLE_TEXTURE2D(_BentNormalMap, sampler_BentNormalMap, UV0).xyz - 1.0, bentNormalWS, doubleSidedConstants); + + float perceptualRoughness = RoughnessToPerceptualRoughness(GetScalarRoughness(surfaceData.specularLobe)); + + //TODO +//#if defined(_SPECULAR_OCCLUSION_FROM_BENT_NORMAL_MAP) + // Note: we use normalWS as it will always exist and be equal to clearcoatNormalWS if there's no coat + // (otherwise we do SO with the base lobe, might be wrong depending on way AO is computed, will be wrong either way with a single non-lobe specific value) + //surfaceData.specularOcclusion = GetSpecularOcclusionFromBentAO(V, bentNormalWS, surfaceData.normalWS, surfaceData.ambientOcclusion, perceptualRoughness); +//#endif +#if !defined(_SPECULAR_OCCLUSION_NONE) + surfaceData.specularOcclusion = GetSpecularOcclusionFromAmbientOcclusion(ClampNdotV(dot(surfaceData.normalWS, V)), surfaceData.ambientOcclusion, perceptualRoughness); +#endif + // Propagate the geometry normal surfaceData.geomNormalWS = input.tangentToWorld[2]; @@ -166,6 +191,15 @@ void GetSurfaceAndBuiltinData(FragInputs input, float3 V, inout PositionInputs p #endif #endif +#if defined(_ENABLE_GEOMETRIC_SPECULAR_AA) + // Specular AA for geometric curvature + + surfaceData.specularLobe.x = PerceptualSmoothnessToRoughness(GeometricNormalFiltering(RoughnessToPerceptualSmoothness(surfaceData.specularLobe.x), input.tangentToWorld[2], _SpecularAAScreenSpaceVariance, _SpecularAAThreshold)); + surfaceData.specularLobe.y = PerceptualSmoothnessToRoughness(GeometricNormalFiltering(RoughnessToPerceptualSmoothness(surfaceData.specularLobe.y), input.tangentToWorld[2], _SpecularAAScreenSpaceVariance, _SpecularAAThreshold)); +#if defined(_AXF_BRDF_TYPE_CAR_PAINT) + surfaceData.specularLobe.z = PerceptualSmoothnessToRoughness(GeometricNormalFiltering(RoughnessToPerceptualSmoothness(surfaceData.specularLobe.z), input.tangentToWorld[2], _SpecularAAScreenSpaceVariance, _SpecularAAThreshold)); +#endif +#endif #if defined(DEBUG_DISPLAY) if (_DebugMipMapMode != DEBUGMIPMAPMODE_NONE) diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxFProperties.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxFProperties.hlsl index 6f7a6527e3f..9c3383f73da 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxFProperties.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxFProperties.hlsl @@ -107,6 +107,11 @@ float _UseShadowThreshold; float _AlphaCutoffShadow; float4 _DoubleSidedConstants; +// Specular AA +float _EnableGeometricSpecularAA; +float _SpecularAAScreenSpaceVariance; +float _SpecularAAThreshold; + // Caution: C# code in BaseLitUI.cs call LightmapEmissionFlagsProperty() which assume that there is an existing "_EmissionColor" // value that exist to identify if the GI emission need to be enabled. // In our case we don't use such a mechanism but need to keep the code quiet. We declare the value and always enable it. diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/GGXConvolution/ComputeGgxIblSampleData.compute b/com.unity.render-pipelines.high-definition/Runtime/Material/GGXConvolution/ComputeGgxIblSampleData.compute index de02fb64dcf..d8445386ed7 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/GGXConvolution/ComputeGgxIblSampleData.compute +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/GGXConvolution/ComputeGgxIblSampleData.compute @@ -11,7 +11,7 @@ #define MAX_IBL_SAMPLE_CNT 89 #endif -RWTexture2D output; // [MAX_SAMPLE_CNT x UNITY_SPECCUBE_LOD_STEPS] +RWTexture2D outputResult; // [MAX_SAMPLE_CNT x UNITY_SPECCUBE_LOD_STEPS] #pragma kernel ComputeGgxIblSampleData @@ -33,7 +33,7 @@ void ComputeGgxIblSampleData(uint3 groupThreadId : SV_GroupThreadID) if (sampleIndex >= sampleCount) { - output[texCoord] = float4(0, 0, 0, 0); + outputResult[texCoord] = float4(0, 0, 0, 0); return; } @@ -82,6 +82,6 @@ void ComputeGgxIblSampleData(uint3 groupThreadId : SV_GroupThreadID) float pdf = 0.25 * D_GGX(NdotH, roughness); float omegaS = rcp(sampleCount) * rcp(pdf); - output[texCoord] = float4(localL, omegaS); + outputResult[texCoord] = float4(localL, omegaS); } } diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/GGXConvolution/IBLFilterGGX.cs b/com.unity.render-pipelines.high-definition/Runtime/Material/GGXConvolution/IBLFilterGGX.cs index 1f3df886b42..5ef402930e4 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/GGXConvolution/IBLFilterGGX.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/GGXConvolution/IBLFilterGGX.cs @@ -67,7 +67,7 @@ public override void Initialize(CommandBuffer cmd) void InitializeGgxIblSampleData(CommandBuffer cmd) { - m_ComputeGgxIblSampleDataCS.SetTexture(m_ComputeGgxIblSampleDataKernel, "output", m_GgxIblSampleData); + m_ComputeGgxIblSampleDataCS.SetTexture(m_ComputeGgxIblSampleDataKernel, "outputResult", m_GgxIblSampleData); cmd.DispatchCompute(m_ComputeGgxIblSampleDataCS, m_ComputeGgxIblSampleDataKernel, 1, 1, 1); } diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/Lit.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/Lit.hlsl index d4dcfc77194..7da9be1d3ed 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/Lit.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/Lit.hlsl @@ -1710,7 +1710,12 @@ IndirectLighting EvaluateBSDF_ScreenSpaceReflection(PositionInputs posInput, ApplyScreenSpaceReflectionWeight(ssrLighting); // TODO: we should multiply all indirect lighting by the FGD value only ONCE. - lighting.specularReflected = ssrLighting.rgb * preLightData.specularFGD; + // In case this material has a clear coat, we shou not be using the specularFGD. The condition for it is a combination + // of a materia feature and the coat mask. + float clampedNdotV = ClampNdotV(preLightData.NdotV); + lighting.specularReflected = ssrLighting.rgb * (HasFlag(bsdfData.materialFeatures, MATERIALFEATUREFLAGS_LIT_CLEAR_COAT) ? + lerp(preLightData.specularFGD, F_Schlick(CLEAR_COAT_F0, clampedNdotV), bsdfData.coatMask) + : preLightData.specularFGD); reflectionHierarchyWeight = ssrLighting.a; return lighting; diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/SharedRTManager.cs b/com.unity.render-pipelines.high-definition/Runtime/Material/SharedRTManager.cs index f9b4a7f732b..4c261ced47d 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/SharedRTManager.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/SharedRTManager.cs @@ -94,6 +94,8 @@ public void InitSharedBuffers(GBufferManager gbufferManager, RenderPipelineSetti // Create the required resolve materials m_DepthResolveMaterial = CoreUtils.CreateEngineMaterial(resources.shaders.depthValuesPS); m_ColorResolveMaterial = CoreUtils.CreateEngineMaterial(resources.shaders.colorResolvePS); + + CoreUtils.SetKeyword(m_DepthResolveMaterial, "_HAS_MOTION_VECTORS", m_MotionVectorsSupport); } AllocateCoarseStencilBuffer(RTHandles.maxWidth, RTHandles.maxHeight, TextureXR.slices); @@ -342,18 +344,30 @@ public void ResolveSharedRT(CommandBuffer cmd, HDCamera hdCamera) Debug.Assert(m_MSAASupported); using (new ProfilingScope(cmd, ProfilingSampler.Get(HDProfileId.ResolveMSAADepth))) { - // Grab the RTIs and set the output render targets - m_RTIDs3[0] = m_CameraDepthValuesBuffer.nameID; - m_RTIDs3[1] = m_NormalRT.nameID; - m_RTIDs3[2] = m_MotionVectorsRT.nameID; - CoreUtils.SetRenderTarget(cmd, m_RTIDs3, m_CameraDepthStencilBuffer); - - // Set the input textures + if (m_MotionVectorsSupport) + { + // Grab the RTIs and set the output render targets + m_RTIDs3[0] = m_CameraDepthValuesBuffer.nameID; + m_RTIDs3[1] = m_NormalRT.nameID; + m_RTIDs3[2] = m_MotionVectorsRT.nameID; + CoreUtils.SetRenderTarget(cmd, m_RTIDs3, m_CameraDepthStencilBuffer); + + // Set the motion vector input texture + Shader.SetGlobalTexture(HDShaderIDs._MotionVectorTextureMS, m_MotionVectorsMSAART); + } + else + { + // Grab the RTIs and set the output render targets + m_RTIDs2[0] = m_CameraDepthValuesBuffer.nameID; + m_RTIDs2[1] = m_NormalRT.nameID; + CoreUtils.SetRenderTarget(cmd, m_RTIDs2, m_CameraDepthStencilBuffer); + } + + // Set the depth and normal input textures Shader.SetGlobalTexture(HDShaderIDs._NormalTextureMS, m_NormalMSAART); Shader.SetGlobalTexture(HDShaderIDs._DepthTextureMS, m_DepthAsColorMSAART); - Shader.SetGlobalTexture(HDShaderIDs._MotionVectorTextureMS, m_MotionVectorsMSAART); - // Resolve the depth and normal buffers + // Resolve the buffers cmd.DrawProcedural(Matrix4x4.identity, m_DepthResolveMaterial, SampleCountToPassIndex(m_MSAASamples), MeshTopology.Triangles, 3, 1); } } diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/StackLit/StackLit.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/StackLit/StackLit.hlsl index d6d064d5f95..80a53b76a8a 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/StackLit/StackLit.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/StackLit/StackLit.hlsl @@ -31,6 +31,12 @@ // #define STACK_LIT_DISPLAY_REFERENCE_IBL #endif +#ifndef SKIP_RASTERIZED_SHADOWS +#define RASTERIZED_AREA_LIGHT_SHADOWS 1 +#else +#define RASTERIZED_AREA_LIGHT_SHADOWS 0 +#endif + //----------------------------------------------------------------------------- // Texture and constant buffer declaration //----------------------------------------------------------------------------- @@ -178,7 +184,7 @@ void GetAmbientOcclusionFactor(float3 indirectAmbientOcclusion, float3 indirectS #define TOP_DIR_IDX 0 #define BOTTOM_DIR_IDX (NB_LV_DIR-1) -// BASE_NB_LOBES will never be 1, we let the compiler optimize +// BASE_NB_LOBES will never be 1, we let the compiler optimize // everything out from bsdfData.lobeMix = 0; #define BASE_NB_LOBES 2 // use numeric indices for these arrays #define TOTAL_NB_LOBES (BASE_NB_LOBES+COAT_NB_LOBES) // use *_LOBE?_IDX for these arrays. @@ -252,9 +258,9 @@ bool IsCoatNormalMapEnabled(BSDFData bsdfData) // based on Fresnel terms (hack to reduce pre-integrated FGD fetches TODOENERGY). // // Normally when shading with normal maps, we clamp / saturate diverse values -// (eg see here BSDF_SetupNormalsAndAngles or CommonLighting's GetBSDFAngle) to avoid +// (eg see here BSDF_SetupNormalsAndAngles or CommonLighting's GetBSDFAngle) to avoid // special casing the BSDF evaluation but still shade according to the normal maps. -// Fresnel is normally evaluated with the LdotH angle, but this normally never "clips" +// Fresnel is normally evaluated with the LdotH angle, but this normally never "clips" // to the hemisphere (oriented on the normal) the complete BSDF evaluation as LdotH is // never negative (H is at most 90 degrees away from L and V). // @@ -271,9 +277,9 @@ bool IsCoatNormalMapEnabled(BSDFData bsdfData) // Obviously this is still a hack as stated but is more pleasing and is roughly akin to // having the top layer "folds" as dual-faced. // -// When no recompute per light is done or we are doing ComputeAdding in the first call in +// When no recompute per light is done or we are doing ComputeAdding in the first call in // GetPreLightData for split-sum type of lights (non dirac), we don't have a particular L -// to use and use clamped NdotV. In that case, the normal is taken as the H vector, and +// to use and use clamped NdotV. In that case, the normal is taken as the H vector, and // there will be regions where NdotV can be negative so is clamped near zero. In that case, // being in the "grazing angle region", integrated FGD or Fresnel would yeld reflectance // operators that yield zero energy transmitted to the bottom layer, and everything reflected @@ -285,15 +291,15 @@ bool IsCoatNormalMapEnabled(BSDFData bsdfData) // the geometric normal on the top since it should not be back facing to begin the // computations - in that case, we lose the Fresnel variations induced by the top normal map // and it only affects other parts of BSDF evaluations later for all types of lights. -// +// // This is VLAYERED_DUAL_NORMALS_TOP_FIX_GEOM_NORMAL. // // Otherwise, we also provide a behavior similar to flipping of the normal, and we even // saturate a bit less close to zero (than ClampNdotV) to remove the effect of the grazing -// angle. +// angle. // // This is VLAYERED_DUAL_NORMALS_TOP_FIX_FLIP_NORMAL -// +// #define VLAYERED_DUAL_NORMALS_TOP_FIX_DEFAULT 0 // do nothing #define VLAYERED_DUAL_NORMALS_TOP_FIX_GEOM_NORMAL 1 #define VLAYERED_DUAL_NORMALS_TOP_FIX_FLIP_NORMAL 2 @@ -565,7 +571,7 @@ void ApplyDebugToSurfaceData(float3x3 tangentToWorld, inout SurfaceData surfaceD // There is no metallic with SSS and specular color mode float metallic = HasFlag(surfaceData.materialFeatures, MATERIALFEATUREFLAGS_STACK_LIT_SPECULAR_COLOR | MATERIALFEATUREFLAGS_STACK_LIT_SUBSURFACE_SCATTERING | MATERIALFEATUREFLAGS_STACK_LIT_TRANSMISSION) ? 0.0 : surfaceData.metallic; - + float3 diffuseColor = ComputeDiffuseColor(surfaceData.baseColor, metallic); bool specularWorkflow = HasFlag(surfaceData.materialFeatures, MATERIALFEATUREFLAGS_STACK_LIT_SPECULAR_COLOR); float3 specularColor = specularWorkflow ? surfaceData.specularColor : ComputeFresnel0(surfaceData.baseColor, surfaceData.metallic, IorToFresnel0(surfaceData.dielectricIor)); @@ -635,7 +641,7 @@ NormalData ConvertSurfaceDataToNormalData(SurfaceData surfaceData) { // In HazyGloss mode. ConvertSurfaceDataToNormalData() would need positionSS and to call // ConvertSurfaceDataToBSDFData, might be too heavy for a prepass, maybe find a lightweight approximation - // of HazeMapping. + // of HazeMapping. // This is a moot point though: mixing two roughnesses directly in one is already a hack, the // resulting lobe isn't representative of this. But for what ConvertSurfaceDataToNormalData() influences // (like SSR and shadows), it might be sufficient. @@ -672,7 +678,7 @@ NormalData ConvertSurfaceDataToNormalData(SurfaceData surfaceData) void HazeMapping(float3 fresnel0, float roughnessAT, float roughnessAB, float haziness, float hazeExtent, float hazeExtentAnisotropy, float3 hazyGlossMaxf0, inout BSDFData bsdfData) { float w = 10.0; // interpolation steepness weight (Bezier weight of central point) - bool useBezierToMapKh = true; + bool useBezierToMapKh = true; float3 r_c = fresnel0; // We can use clamping of roughnessA here to avoid a "p == 0/0" case if roughnessA == 0. @@ -705,7 +711,7 @@ void HazeMapping(float3 fresnel0, float roughnessAT, float roughnessAB, float ha // maximum core roughness and since this primary roughness (of lobe A) can be textured, we // don't know it). float p = alpha_n_xy/alpha_w_xy; // peak ratio formula at theta_d = 0 (ie p is in the paper := P(0)) - + float r_c_max = Max3(r_c.r, r_c.g, r_c.b); float k_h_max = 0.0; @@ -714,13 +720,13 @@ void HazeMapping(float3 fresnel0, float roughnessAT, float roughnessAB, float ha bsdfData.lobeMix = 0.0; } //else if (alpha_w_xy <= FLT_EPS) { bsdfData.lobeMix = beta_h; } - else + else { if (useBezierToMapKh) { // Smooth out C1 discontinuity at k_h = p with a Bezier curve // (loose some hazeExtent in the process). - + float b = 2*(r_c_max*(1-w)+w*p); float u; // parametric coordinate for rational Bezier curve if (abs(2*(b-1)) <= FLT_EPS) @@ -740,18 +746,18 @@ void HazeMapping(float3 fresnel0, float roughnessAT, float roughnessAB, float ha // Interpolation between 0 and positivity and energy constraints: these are lines // but form a triangle so there's a discontinuity at k_h := K_h(0) = p, hence the // branch here: - k_h_max = (r_c_max > p) ? beta_h*(1-r_c_max)/(1-p) : beta_h*r_c_max/p; + k_h_max = (r_c_max > p) ? beta_h*(1-r_c_max)/(1-p) : beta_h*r_c_max/p; } - + float r_max = r_c_max + (1-p)*k_h_max; // compound reflectivity (max color channel) float3 chromaVec = r_c/r_c_max; - + bsdfData.fresnel0 = r_max*chromaVec; bsdfData.fresnel0 = min(bsdfData.fresnel0, hazyGlossMaxf0); bsdfData.lobeMix = k_h_max / r_max; //bsdfData.lobeMix = 0.5; - // For IBL, convert back to the scalar roughness + anisotropy parametrization for the + // For IBL, convert back to the scalar roughness + anisotropy parametrization for the // secondary lobe: float anisotropyB; float roughnessB; @@ -829,7 +835,7 @@ BSDFData ConvertSurfaceDataToBSDFData(uint2 positionSS, SurfaceData surfaceData) // It is important to deal with the hazy gloss parametrization after we have fresnel0 for the base but // before the effect of the coat is applied on it. When hazy gloss is used, the current fresnel0 at this // point is reinterpreted as a pseudo-f0 ("core lobe reflectivity" or Fc(0) or r_c in the paper) - // + // if (HasFlag(surfaceData.materialFeatures, MATERIALFEATUREFLAGS_STACK_LIT_HAZY_GLOSS)) { // reminder: ComputeFresnel0 lerps from last param to first param using middle param as lerp factor. @@ -1070,7 +1076,7 @@ struct PreLightData float screenSpaceAmbientOcclusion; // Keep a copy of the screen space occlusion texture fetch between // PreLightData and PostEvaluateBSDF. float3 hemiSpecularOcclusion[TOTAL_NB_LOBES]; // Specular occlusion calculated from roughness and for an unknown - // (the less sparse / more uniform the better) light structure + // (the less sparse / more uniform the better) light structure // potentially covering the whole hemisphere. }; @@ -1096,7 +1102,7 @@ struct PreLightData // 1b) Clamp input roughnesses before the stack computations, so that the new top roughness also impacts the bottom. // // 2) For 1b), we also could interpret the minRoughness as clamping the coat only: since the bottom will get the -// impact of the clamp indirectly, this could suffice. +// impact of the clamp indirectly, this could suffice. // // As the light.minRoughness is a hack that can be used to simulate a sphere light from a point light, all options // can be valid, it depends on what appearance the user wants. @@ -1146,7 +1152,7 @@ void ClampRoughness(inout PreLightData preLightData, inout BSDFData bsdfData, fl // we don't update this, no need to: bsdfData.coatPerceptualRoughness = RoughnessToPerceptualRoughness(bsdfData.coatRoughness); } } - + if (!GetRecomputeStackPerLightOption()) { preLightData.layeredRoughnessT[0] = max(minRoughness, preLightData.layeredRoughnessT[0]); @@ -1164,7 +1170,7 @@ void ClampRoughness(inout PreLightData preLightData, inout BSDFData bsdfData, fl { preLightData.layeredCoatRoughness = max(minRoughness, preLightData.layeredCoatRoughness); } - + preLightData.layeredRoughnessT[0] = max(minRoughness, preLightData.layeredRoughnessT[0]); preLightData.layeredRoughnessT[1] = max(minRoughness, preLightData.layeredRoughnessT[1]); preLightData.layeredRoughnessB[0] = max(minRoughness, preLightData.layeredRoughnessB[0]); @@ -1261,7 +1267,7 @@ float3 GetOrthogonalComponent(float3 V, float3 N, bool testSingularity = false) if (testSingularity && (abs(1.0 - VdotN) <= FLT_EPS)) { // In this case N == V, and azimuth orientation around N shouldn't matter for the caller, - // we can use any quaternion-based method, like Frisvad or Reynold's (Pixar): + // we can use any quaternion-based method, like Frisvad or Reynold's (Pixar): float3x3 orthoBasis = GetLocalFrame(N); unitVOrtho = orthoBasis[0]; // we pick any axis, compiler should optimize out calculation of [1] } @@ -1522,7 +1528,7 @@ void ComputeStatistics(in float cti, in float3 V, in float3 vOrthoGeomN, in bo if( stt <= 1.0f ) { // See p5 fig5 a) vs b) : using a refraction as a peak mean is the dotted line, while the ref is the solid line. - // The scale is a hack to reproduce this effect: + // The scale is a hack to reproduce this effect: // As roughness -> 1, remove the effect of changing angle of entry. // Note that we never track complete means per se because of symmetry, we have no azimuth, so the "space" of the // means sin(theta) (here sti and stt) is just a line perpendicular to the normal in the plane of incidence. @@ -1704,9 +1710,9 @@ void ComputeAdding(float _cti, float3 V, in BSDFData bsdfData, inout PreLightDat { // Just a precaution minRoughness = 0.0; - // ie We will only take it into account if called per light. + // ie We will only take it into account if called per light. // If GetHonorPerLightMinRoughness(), we will still escape the default clamp of ClampRoughnessForDiracLightsByDefault() though. - // The net result if we're never recomputing the stack per light but signal we honor the per-light minRoughness is that we + // The net result if we're never recomputing the stack per light but signal we honor the per-light minRoughness is that we // won't clamp anything in ComputeAdding and just late clamp the resulting roughnesses at each light evaluation via ClampRoughness(). // The change in coat roughness will obviously not affect the bottom roughness in that case and the results will be wrong, but // depending on the scene setup, could be acceptable. @@ -1876,7 +1882,7 @@ void ComputeAdding(float _cti, float3 V, in BSDFData bsdfData, inout PreLightDat // Update mean - // Avoid grazing angle black artefacts and instead of + // Avoid grazing angle black artefacts and instead of // cti = ctt; cti = ClampNdotV(ctt); @@ -2476,10 +2482,10 @@ void PreLightData_SetupOcclusion(PositionInputs posInput, BSDFData bsdfData, flo // -We have 3 lobes with different roughnesses, and these have been placed unclamped and modified by vlayering in // iblPerceptualRoughness[]. // -We might have 2 different shading normals to consider. - // -Bentnormal is always considered if the algorithm permits it, but it might trivially be the normal if no bent + // -Bentnormal is always considered if the algorithm permits it, but it might trivially be the normal if no bent // normals were given by the user. // - // -Finally, our pre-calculated specular occlusion will serve for IBL for now, which have unknown structure so the + // -Finally, our pre-calculated specular occlusion will serve for IBL for now, which have unknown structure so the // whole hemisphere around the normal is taken as potential light visibility region, that's why the pre-calculated // values are identified as "hemiSpecularOcclusion". This would potentially need to be different per light type, // or even per light: @@ -2679,7 +2685,7 @@ PreLightData GetPreLightData(float3 V, PositionInputs posInput, inout BSDFData b float diffuseFGDTmp; // unused, for coat layer FGD fetch - // We will do the coat specific FGD fetch here: + // We will do the coat specific FGD fetch here: // (FGD fetches used for IBL + area light + multiscattering) GetPreIntegratedFGDGGXAndDisneyDiffuse(NdotV[COAT_NORMAL_IDX], preLightData.iblPerceptualRoughness[COAT_LOBE_IDX], @@ -2688,7 +2694,7 @@ PreLightData GetPreLightData(float3 V, PositionInputs posInput, inout BSDFData b diffuseFGDTmp, specularReflectivity[COAT_LOBE_IDX]); - // We apply the coatMask here since even an f0 of 0 in the fetch above will give a + // We apply the coatMask here since even an f0 of 0 in the fetch above will give a // directional albedo (aka specular reflectivity) that is non zero: preLightData.specularFGD[COAT_LOBE_IDX] *= bsdfData.coatMask; // This is for the base FGD fetches factored out of "if vlayering or not": @@ -2775,7 +2781,7 @@ PreLightData GetPreLightData(float3 V, PositionInputs posInput, inout BSDFData b if (AREA_LIGHTS_ANISOTROPY_ENABLED == false) { // If area lights don't support anisotropy, we can setup area lights here and occlusion after, as the former - // don't need the anisotropic modified normal and roughness (IBL anisotropy hack) and the later can use + // don't need the anisotropic modified normal and roughness (IBL anisotropy hack) and the later can use // the area lights preLightData.orthoBasisViewNormal: PreLightData_SetupAreaLights(bsdfData, V, N, NdotV, preLightData); @@ -2803,7 +2809,7 @@ PreLightData GetPreLightData(float3 V, PositionInputs posInput, inout BSDFData b // isn't really needed, as if no vlayering, COAT_LOBE_IDX will be == to one of the BASE_LOBE?_IDX // and the following line will be pruned out by the compiler: preLightData.partLambdaV[COAT_LOBE_IDX] = GetSmithJointGGXPartLambdaV(NdotV[COAT_NORMAL_IDX], preLightData.layeredCoatRoughness); - + preLightData.partLambdaV[BASE_LOBEA_IDX] = GetSmithJointGGXAnisoPartLambdaV(TdotV, BdotV, NdotV[BASE_NORMAL_IDX], preLightData.layeredRoughnessT[0], preLightData.layeredRoughnessB[0]); preLightData.partLambdaV[BASE_LOBEB_IDX] = GetSmithJointGGXAnisoPartLambdaV(TdotV, BdotV, NdotV[BASE_NORMAL_IDX], @@ -2941,7 +2947,7 @@ void ModifyBakedDiffuseLighting(float3 V, PositionInputs posInput, SurfaceData s builtinData.bakeDiffuseLighting += builtinData.backBakeDiffuseLighting * bsdfData.transmittance; } - // For SSS we need to take into account the state of diffuseColor + // For SSS we need to take into account the state of diffuseColor if (HasFlag(bsdfData.materialFeatures, MATERIALFEATUREFLAGS_STACK_LIT_SUBSURFACE_SCATTERING)) { bsdfData.diffuseColor = GetModifiedDiffuseColorForSSS(bsdfData); @@ -2953,7 +2959,7 @@ void ModifyBakedDiffuseLighting(float3 V, PositionInputs posInput, SurfaceData s // Note: When baking reflection probes, we approximate the diffuse with the fresnel0 builtinData.bakeDiffuseLighting *= preLightData.diffuseFGD * preLightData.diffuseEnergy * GetDiffuseOrDefaultColor(bsdfData, _ReplaceDiffuseForIndirect).rgb; - // The lobe specific specular occlusion data, along with the result of the screen space occlusion sampling + // The lobe specific specular occlusion data, along with the result of the screen space occlusion sampling // will be computed in PreLightData. } @@ -2979,7 +2985,7 @@ float GetInferredMetallic(float dielectricF0, float3 inDiffuseColor, float3 inFr if (dielectricF0 <= 0.0001) { // The baseColor + metallic parameterization gives (note that this is used to build - // a possible conversion, but the given fresnel0, diffuseColor and dielectricF0 might not + // a possible conversion, but the given fresnel0, diffuseColor and dielectricF0 might not // be possible with a baseColor + metallic parameterization): // // (A) fresnel0 = metallic * basecolor + (1.0 - metallic) * dielectricF0; @@ -3005,7 +3011,7 @@ float GetInferredMetallic(float dielectricF0, float3 inDiffuseColor, float3 inFr // metallic = 1/(diffuseColor/fresnel0 + 1); // metallic = fresnel0/(diffuseColor + fresnel0); // - // So we will use that formula when dielectricF0 == 0 since it outputs plausible values: + // So we will use that formula when dielectricF0 == 0 since it outputs plausible values: // // -When fresnel0 is 0, it will always output a desired (for the reasons discussed above) value of metallic = 0. // -When input values are possible for (A) and (B), the formula is correct (for when dielectricF0 == 0 of course). @@ -3410,9 +3416,9 @@ void GetNLForDirectionalPunctualLights(BSDFData bsdfData, PreLightData preLightD // For the rest, we will use the N which produces the biggest NdotL, as we don't want // to early out eg from the bottom layer when the top should have a highlight, // while the final BSDF evaluation will take care of applying the proper NdotL in any - // case. + // case. // We could increase the cost and complexity of all this and actually - // commit fully to making all these diract-light evaluations local to this file and + // commit fully to making all these diract-light evaluations local to this file and // pass to BSDF the L[], V[], etc. arrays instead of hacking our way around just here. float maxNdotL = max(NdotL[COAT_NORMAL_IDX], NdotL[BASE_NORMAL_IDX]); @@ -3569,7 +3575,7 @@ CBSDF EvaluateBSDF(float3 inV, float3 inL, PreLightData preLightData, BSDFData b // NO VLAYERING: // -------------------------------------------------------------------- - // Note: See GetPreLightData(), in that case, + // Note: See GetPreLightData(), in that case, // preLightData.layeredRoughnessT[0] = bsdfData.roughnessAT; // preLightData.layeredRoughnessB[0] = bsdfData.roughnessAB; // preLightData.layeredRoughnessT[1] = bsdfData.roughnessBT; @@ -3743,147 +3749,154 @@ DirectLighting EvaluateBSDF_Line( LightLoopContext lightLoopContext, // Terminate if the shaded point is too far away. if (intensity == 0.0) - return lighting; - - lightData.diffuseDimmer *= intensity; - lightData.specularDimmer *= intensity; + { + lightData.diffuseDimmer *= intensity; + lightData.specularDimmer *= intensity; - // Translate the light s.t. the shaded point is at the origin of the coordinate system. - lightData.positionRWS -= positionWS; + // Translate the light s.t. the shaded point is at the origin of the coordinate system. + lightData.positionRWS -= positionWS; - // TODO: some of this could be precomputed. - float3 P1 = lightData.positionRWS - T * (0.5 * len); - float3 P2 = lightData.positionRWS + T * (0.5 * len); + // TODO: some of this could be precomputed. + float3 P1 = lightData.positionRWS - T * (0.5 * len); + float3 P2 = lightData.positionRWS + T * (0.5 * len); - // Setup the default local canonical frame with X-Y aligned to the reflection plane - // using orthoBasisViewNormal: without the anisotropic hack, this is only dependent on - // if we have dual normal maps or not: + // Setup the default local canonical frame with X-Y aligned to the reflection plane + // using orthoBasisViewNormal: without the anisotropic hack, this is only dependent on + // if we have dual normal maps or not: - // Rotate the endpoints into the local coordinate system. - float3 localP1 = mul(P1, transpose(preLightData.orthoBasisViewNormal[BASE_NORMAL_IDX])); - float3 localP2 = mul(P2, transpose(preLightData.orthoBasisViewNormal[BASE_NORMAL_IDX])); - // Compute the binormal in the local coordinate system. - float3 B = normalize(cross(localP1, localP2)); + // Rotate the endpoints into the local coordinate system. + float3 localP1 = mul(P1, transpose(preLightData.orthoBasisViewNormal[BASE_NORMAL_IDX])); + float3 localP2 = mul(P2, transpose(preLightData.orthoBasisViewNormal[BASE_NORMAL_IDX])); + // Compute the binormal in the local coordinate system. + float3 B = normalize(cross(localP1, localP2)); - if (AREA_LIGHTS_ANISOTROPY_ENABLED) // statically known, so no need for if else, just overwrite the above - { - // Since we proceed with calculating diffuse and transmission irradiance, we setup - // the points for the diffuse frame. - // There's no anisotropy on the diffuse component and this is oriented considering - // the proper base layer normal: - localP1 = mul(P1, transpose(preLightData.orthoBasisViewNormalDiffuse)); - localP2 = mul(P2, transpose(preLightData.orthoBasisViewNormalDiffuse)); - B = normalize(cross(localP1, localP2)); - } - - // Calculate the L irradiance (ltcValue) first for the diffuse part and transmission, - // then for the specular base layer and finishing with the coat. - float ltcValue; + if (AREA_LIGHTS_ANISOTROPY_ENABLED) // statically known, so no need for if else, just overwrite the above + { + // Since we proceed with calculating diffuse and transmission irradiance, we setup + // the points for the diffuse frame. + // There's no anisotropy on the diffuse component and this is oriented considering + // the proper base layer normal: + localP1 = mul(P1, transpose(preLightData.orthoBasisViewNormalDiffuse)); + localP2 = mul(P2, transpose(preLightData.orthoBasisViewNormalDiffuse)); + B = normalize(cross(localP1, localP2)); + } - // Evaluate the diffuse part - ltcValue = LTCEvaluate(localP1, localP2, B, preLightData.ltcTransformDiffuse); - ltcValue *= lightData.diffuseDimmer; - // We don't multiply by 'bsdfData.diffuseColor' here. It's done only once in PostEvaluateBSDF(). - lighting.diffuse = preLightData.diffuseFGD * preLightData.diffuseEnergy * ltcValue; + // Calculate the L irradiance (ltcValue) first for the diffuse part and transmission, + // then for the specular base layer and finishing with the coat. + float ltcValue; - UNITY_BRANCH if (HasFlag(bsdfData.materialFeatures, MATERIALFEATUREFLAGS_STACK_LIT_TRANSMISSION)) - { - // Flip the view vector and the normal. The bitangent stays the same. - float3x3 flipMatrix = float3x3(-1, 0, 0, - 0, 1, 0, - 0, 0, -1); - - // Use the Lambertian approximation for performance reasons. - // The matrix multiplication should not generate any extra ALU on GCN. - // TODO: double evaluation is very inefficient! This is a temporary solution. - ltcValue = LTCEvaluate(localP1, localP2, B, mul(flipMatrix, k_identity3x3)); + // Evaluate the diffuse part + ltcValue = LTCEvaluate(localP1, localP2, B, preLightData.ltcTransformDiffuse); ltcValue *= lightData.diffuseDimmer; - - // VLAYERED_DIFFUSE_ENERGY_HACKED_TERM: - // In Lit with Lambert, there's no diffuseFGD, it is one. In our case, we also - // need a diffuse energy term when vlayered. - - // We use diffuse lighting for accumulation since it is going to be blurred during the SSS pass. // We don't multiply by 'bsdfData.diffuseColor' here. It's done only once in PostEvaluateBSDF(). - lighting.diffuse += bsdfData.transmittance * ltcValue * preLightData.diffuseEnergy; - } + lighting.diffuse = preLightData.diffuseFGD * preLightData.diffuseEnergy * ltcValue; - // Evaluate the specular lobes for the stack - IF_DEBUG( if ( _DebugLobeMask.y != 0.0) ) - { - if (AREA_LIGHTS_ANISOTROPY_ENABLED) + UNITY_BRANCH if (HasFlag(bsdfData.materialFeatures, MATERIALFEATUREFLAGS_STACK_LIT_TRANSMISSION)) { - // In that case, instead of only considering possibly dual normal maps and thus two - // local canonical frames we have lobe specific frames because of the anisotropic hack: - localP1 = mul(P1, transpose(preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_BASE_LOBEA_IDX])); - localP2 = mul(P2, transpose(preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_BASE_LOBEA_IDX])); - B = normalize(cross(localP1, localP2)); + // Flip the view vector and the normal. The bitangent stays the same. + float3x3 flipMatrix = float3x3(-1, 0, 0, + 0, 1, 0, + 0, 0, -1); + + // Use the Lambertian approximation for performance reasons. + // The matrix multiplication should not generate any extra ALU on GCN. + // TODO: double evaluation is very inefficient! This is a temporary solution. + ltcValue = LTCEvaluate(localP1, localP2, B, mul(flipMatrix, k_identity3x3)); + ltcValue *= lightData.diffuseDimmer; + + // VLAYERED_DIFFUSE_ENERGY_HACKED_TERM: + // In Lit with Lambert, there's no diffuseFGD, it is one. In our case, we also + // need a diffuse energy term when vlayered. + + // We use diffuse lighting for accumulation since it is going to be blurred during the SSS pass. + // We don't multiply by 'bsdfData.diffuseColor' here. It's done only once in PostEvaluateBSDF(). + lighting.diffuse += bsdfData.transmittance * ltcValue * preLightData.diffuseEnergy; } - ltcValue = LTCEvaluate(localP1, localP2, B, preLightData.ltcTransformSpecular[BASE_LOBEA_IDX]); - // See EvaluateBSDF_Env TODOENERGY: - lighting.specular += preLightData.energyCompensationFactor[BASE_LOBEA_IDX] * preLightData.specularFGD[BASE_LOBEA_IDX] * ltcValue; - } - IF_DEBUG( if ( _DebugLobeMask.z != 0.0) ) - { - if (AREA_LIGHTS_ANISOTROPY_ENABLED) - { - // In that case, instead of only considering possibly dual normal maps and thus two - // local canonical frames we have lobe specific frames because of the anisotropic hack: - localP1 = mul(P1, transpose(preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_BASE_LOBEB_IDX])); - localP2 = mul(P2, transpose(preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_BASE_LOBEB_IDX])); - B = normalize(cross(localP1, localP2)); - } - ltcValue = LTCEvaluate(localP1, localP2, B, preLightData.ltcTransformSpecular[BASE_LOBEB_IDX]); - lighting.specular += preLightData.energyCompensationFactor[BASE_LOBEB_IDX] * preLightData.specularFGD[BASE_LOBEB_IDX] * ltcValue; - } - if (IsVLayeredEnabled(bsdfData)) - { - IF_DEBUG( if ( _DebugLobeMask.x != 0.0) ) + // Evaluate the specular lobes for the stack + IF_DEBUG( if ( _DebugLobeMask.y != 0.0) ) { - if (IsCoatNormalMapEnabled(bsdfData)) + if (AREA_LIGHTS_ANISOTROPY_ENABLED) { - localP1 = mul(P1, transpose(preLightData.orthoBasisViewNormal[COAT_NORMAL_IDX])); - localP2 = mul(P2, transpose(preLightData.orthoBasisViewNormal[COAT_NORMAL_IDX])); - B = normalize(cross(localP1, localP2)); + // In that case, instead of only considering possibly dual normal maps and thus two + // local canonical frames we have lobe specific frames because of the anisotropic hack: + localP1 = mul(P1, transpose(preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_BASE_LOBEA_IDX])); + localP2 = mul(P2, transpose(preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_BASE_LOBEA_IDX])); + B = normalize(cross(localP1, localP2)); } - if (AREA_LIGHTS_ANISOTROPY_ENABLED) // statically known, so no need for if else, just overwrite the above + ltcValue = LTCEvaluate(localP1, localP2, B, preLightData.ltcTransformSpecular[BASE_LOBEA_IDX]); + // See EvaluateBSDF_Env TODOENERGY: + lighting.specular += preLightData.energyCompensationFactor[BASE_LOBEA_IDX] * preLightData.specularFGD[BASE_LOBEA_IDX] * ltcValue; + } + IF_DEBUG( if ( _DebugLobeMask.z != 0.0) ) + { + if (AREA_LIGHTS_ANISOTROPY_ENABLED) { - // No need to check if we have dual normal maps here: alread taken care via iblN[COAT_LOBE_IDX] - // in GetPreLightData and setup in preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_COAT_LOBE_IDX]. - - // we have lobe specific frames because of the anisotropic hack (there's no anisotropy for the - // coat, but the index of the ortho basis is lobe-based still because of the base layer lobes which - // can have anisotropy). - localP1 = mul(P1, transpose(preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_COAT_LOBE_IDX])); - localP2 = mul(P2, transpose(preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_COAT_LOBE_IDX])); + // In that case, instead of only considering possibly dual normal maps and thus two + // local canonical frames we have lobe specific frames because of the anisotropic hack: + localP1 = mul(P1, transpose(preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_BASE_LOBEB_IDX])); + localP2 = mul(P2, transpose(preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_BASE_LOBEB_IDX])); B = normalize(cross(localP1, localP2)); } - ltcValue = LTCEvaluate(localP1, localP2, B, preLightData.ltcTransformSpecular[COAT_LOBE_IDX]); - lighting.specular += preLightData.energyCompensationFactor[COAT_LOBE_IDX] * preLightData.specularFGD[COAT_LOBE_IDX] * ltcValue; + ltcValue = LTCEvaluate(localP1, localP2, B, preLightData.ltcTransformSpecular[BASE_LOBEB_IDX]); + lighting.specular += preLightData.energyCompensationFactor[BASE_LOBEB_IDX] * preLightData.specularFGD[BASE_LOBEB_IDX] * ltcValue; } - } - lighting.specular *= lightData.specularDimmer; + + if (IsVLayeredEnabled(bsdfData)) + { + IF_DEBUG( if ( _DebugLobeMask.x != 0.0) ) + { + if (IsCoatNormalMapEnabled(bsdfData)) + { + localP1 = mul(P1, transpose(preLightData.orthoBasisViewNormal[COAT_NORMAL_IDX])); + localP2 = mul(P2, transpose(preLightData.orthoBasisViewNormal[COAT_NORMAL_IDX])); + B = normalize(cross(localP1, localP2)); + } + if (AREA_LIGHTS_ANISOTROPY_ENABLED) // statically known, so no need for if else, just overwrite the above + { + // No need to check if we have dual normal maps here: alread taken care via iblN[COAT_LOBE_IDX] + // in GetPreLightData and setup in preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_COAT_LOBE_IDX]. + + // we have lobe specific frames because of the anisotropic hack (there's no anisotropy for the + // coat, but the index of the ortho basis is lobe-based still because of the base layer lobes which + // can have anisotropy). + localP1 = mul(P1, transpose(preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_COAT_LOBE_IDX])); + localP2 = mul(P2, transpose(preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_COAT_LOBE_IDX])); + B = normalize(cross(localP1, localP2)); + } + ltcValue = LTCEvaluate(localP1, localP2, B, preLightData.ltcTransformSpecular[COAT_LOBE_IDX]); + lighting.specular += preLightData.energyCompensationFactor[COAT_LOBE_IDX] * preLightData.specularFGD[COAT_LOBE_IDX] * ltcValue; + } + } + lighting.specular *= lightData.specularDimmer; - // Save ALU by applying 'lightData.color' only once. - lighting.diffuse *= lightData.color; - lighting.specular *= lightData.color; + // Save ALU by applying 'lightData.color' only once. + lighting.diffuse *= lightData.color; + lighting.specular *= lightData.color; -#ifdef DEBUG_DISPLAY - if (_DebugLightingMode == DEBUGLIGHTINGMODE_LUX_METER) - { - // Make sure we're using the base layer frame: - localP1 = mul(P1, transpose(preLightData.orthoBasisViewNormal[BASE_NORMAL_IDX])); - localP2 = mul(P2, transpose(preLightData.orthoBasisViewNormal[BASE_NORMAL_IDX])); - B = normalize(cross(localP1, localP2)); - - // Only lighting, not BSDF - // Apply area light on lambert then multiply by PI to cancel Lambert - lighting.diffuse = LTCEvaluate(localP1, localP2, B, k_identity3x3); - lighting.diffuse *= PI * lightData.diffuseDimmer; + #ifdef DEBUG_DISPLAY + if (_DebugLightingMode == DEBUGLIGHTINGMODE_LUX_METER) + { + // Make sure we're using the base layer frame: + localP1 = mul(P1, transpose(preLightData.orthoBasisViewNormal[BASE_NORMAL_IDX])); + localP2 = mul(P2, transpose(preLightData.orthoBasisViewNormal[BASE_NORMAL_IDX])); + if (AREA_LIGHTS_ANISOTROPY_ENABLED) + { + // In that case orthoBasisViewNormal[] is per lobe due to anistropic hack, + // use orthoBasisViewNormalDiffuse: + localP1 = mul(P1, transpose(preLightData.orthoBasisViewNormalDiffuse)); + localP2 = mul(P2, transpose(preLightData.orthoBasisViewNormalDiffuse)); + } + B = normalize(cross(localP1, localP2)); + + // Only lighting, not BSDF + // Apply area light on lambert then multiply by PI to cancel Lambert + lighting.diffuse = LTCEvaluate(localP1, localP2, B, k_identity3x3); + lighting.diffuse *= PI * lightData.diffuseDimmer; + } + #endif } -#endif #endif // STACK_LIT_DISPLAY_REFERENCE_AREA @@ -3916,211 +3929,255 @@ DirectLighting EvaluateBSDF_Rect( LightLoopContext lightLoopContext, #else float3 unL = lightData.positionRWS - positionWS; - if (dot(lightData.forward, unL) >= 0.0001) + // if (dot(lightData.forward, unL) >= eps), all points on the light are back-facing: + // (Dont early return to guard against compiler bug for if / quick early return constructs) + if (dot(lightData.forward, unL) < FLT_EPS) { - // The light is back-facing. - return lighting; - } - - // Rotate the light direction into the light space. - float3x3 lightToWorld = float3x3(lightData.right, lightData.up, -lightData.forward); - unL = mul(unL, transpose(lightToWorld)); - - // TODO: This could be precomputed. - float halfWidth = lightData.size.x * 0.5; - float halfHeight = lightData.size.y * 0.5; - - // Define the dimensions of the attenuation volume. - // TODO: This could be precomputed. - float range = lightData.range; - float3 invHalfDim = rcp(float3(range + halfWidth, - range + halfHeight, - range)); - - // Compute the light attenuation. -#ifdef ELLIPSOIDAL_ATTENUATION - // The attenuation volume is an axis-aligned ellipsoid s.t. - // r1 = (r + w / 2), r2 = (r + h / 2), r3 = r. - float intensity = EllipsoidalDistanceAttenuation(unL, invHalfDim, - lightData.rangeAttenuationScale, - lightData.rangeAttenuationBias); -#else - // The attenuation volume is an axis-aligned box s.t. - // hX = (r + w / 2), hY = (r + h / 2), hZ = r. - float intensity = BoxDistanceAttenuation(unL, invHalfDim, - lightData.rangeAttenuationScale, - lightData.rangeAttenuationBias); -#endif - - // Terminate if the shaded point is too far away. - if (intensity == 0.0) - return lighting; - - lightData.diffuseDimmer *= intensity; - lightData.specularDimmer *= intensity; - // Translate the light s.t. the shaded point is at the origin of the coordinate system. - lightData.positionRWS -= positionWS; - - float4x3 lightVerts; - - // TODO: some of this could be precomputed. - lightVerts[0] = lightData.positionRWS + lightData.right * -halfWidth + lightData.up * -halfHeight; // LL - lightVerts[1] = lightData.positionRWS + lightData.right * -halfWidth + lightData.up * halfHeight; // UL - lightVerts[2] = lightData.positionRWS + lightData.right * halfWidth + lightData.up * halfHeight; // UR - lightVerts[3] = lightData.positionRWS + lightData.right * halfWidth + lightData.up * -halfHeight; // LR + // Rotate the light direction into the light space. + float3x3 lightToWorld = float3x3(lightData.right, lightData.up, -lightData.forward); + unL = mul(unL, transpose(lightToWorld)); + + // TODO: This could be precomputed. + float halfWidth = lightData.size.x * 0.5; + float halfHeight = lightData.size.y * 0.5; + + // Define the dimensions of the attenuation volume. + // TODO: This could be precomputed. + float range = lightData.range; + float3 invHalfDim = rcp(float3(range + halfWidth, + range + halfHeight, + range)); + + // Compute the light attenuation. + #ifdef ELLIPSOIDAL_ATTENUATION + // The attenuation volume is an axis-aligned ellipsoid s.t. + // r1 = (r + w / 2), r2 = (r + h / 2), r3 = r. + float intensity = EllipsoidalDistanceAttenuation(unL, invHalfDim, + lightData.rangeAttenuationScale, + lightData.rangeAttenuationBias); + #else + // The attenuation volume is an axis-aligned box s.t. + // hX = (r + w / 2), hY = (r + h / 2), hZ = r. + float intensity = BoxDistanceAttenuation(unL, invHalfDim, + lightData.rangeAttenuationScale, + lightData.rangeAttenuationBias); + #endif + + // If the shaded point is too far away we avoid shading. + // (guard against compiler bug for if / quick early return constructs) + if (intensity != 0.0) + { + lightData.diffuseDimmer *= intensity; + lightData.specularDimmer *= intensity; + + // Translate the light s.t. the shaded point is at the origin of the coordinate system. + lightData.positionRWS -= positionWS; + + float4x3 lightVerts; + + // TODO: some of this could be precomputed. + lightVerts[0] = lightData.positionRWS + lightData.right * -halfWidth + lightData.up * -halfHeight; // LL + lightVerts[1] = lightData.positionRWS + lightData.right * -halfWidth + lightData.up * halfHeight; // UL + lightVerts[2] = lightData.positionRWS + lightData.right * halfWidth + lightData.up * halfHeight; // UR + lightVerts[3] = lightData.positionRWS + lightData.right * halfWidth + lightData.up * -halfHeight; // LR + + // Rotate the endpoints into the local coordinate system. + float4x3 localLightVerts = mul(lightVerts, transpose(preLightData.orthoBasisViewNormal[BASE_NORMAL_IDX])); + + if (AREA_LIGHTS_ANISOTROPY_ENABLED) // statically known, so no need for if else, just overwrite the above + { + // Since we proceed with calculating diffuse and transmission irradiance, we setup + // the points for the diffuse frame. + // There's no anisotropy on the diffuse component and this is oriented considering + // the proper base layer normal: + localLightVerts = mul(lightVerts, transpose(preLightData.orthoBasisViewNormalDiffuse)); + } + + // Calculate the L irradiance (ltcValue) first for the diffuse part and transmission, + // then for the specular base layer and finishing with the coat. + float3 ltcValue; + + // Evaluate the diffuse part + // Polygon irradiance in the transformed configuration. + float4x3 LD = mul(localLightVerts, preLightData.ltcTransformDiffuse); + ltcValue = PolygonIrradiance(LD); + ltcValue *= lightData.diffuseDimmer; + // Only apply cookie if there is one + if ( lightData.cookieMode != COOKIEMODE_NONE ) + { + // Compute the cookie data for the diffuse term + float3 formFactorD = PolygonFormFactor(LD); + ltcValue *= SampleAreaLightCookie(lightData.cookieScaleOffset, LD, formFactorD); + } + // We don't multiply by 'bsdfData.diffuseColor' here. It's done only once in PostEvaluateBSDF(). + lighting.diffuse = preLightData.diffuseFGD * preLightData.diffuseEnergy * ltcValue; + + UNITY_BRANCH if (HasFlag(bsdfData.materialFeatures, MATERIALFEATUREFLAGS_STACK_LIT_TRANSMISSION)) + { + // Flip the view vector and the normal. The bitangent stays the same. + float3x3 flipMatrix = float3x3(-1, 0, 0, + 0, 1, 0, + 0, 0, -1); + + // Use the Lambertian approximation for performance reasons. + // The matrix multiplication should not generate any extra ALU on GCN. + float3x3 ltcTransform = mul(flipMatrix, k_identity3x3); + + // Polygon irradiance in the transformed configuration. + // TODO: double evaluation is very inefficient! This is a temporary solution. + float4x3 LTD = mul(localLightVerts, ltcTransform); + ltcValue = PolygonIrradiance(LTD); + ltcValue *= lightData.diffuseDimmer; + // Only apply cookie if there is one + if ( lightData.cookieMode != COOKIEMODE_NONE ) + { + // Compute the cookie data for the transmission diffuse term + float3 formFactorTD = PolygonFormFactor(LTD); + ltcValue *= SampleAreaLightCookie(lightData.cookieScaleOffset, LTD, formFactorTD); + } + // VLAYERED_DIFFUSE_ENERGY_HACKED_TERM: + // In Lit with Lambert, there's no diffuseFGD, it is one. In our case, we also + // need a diffuse energy term when vlayered. + + // We use diffuse lighting for accumulation since it is going to be blurred during the SSS pass. + // We don't multiply by 'bsdfData.diffuseColor' here. It's done only once in PostEvaluateBSDF(). + lighting.diffuse += bsdfData.transmittance * ltcValue * preLightData.diffuseEnergy; + } + + // Evaluate the specular lobes for the stack + IF_DEBUG( if ( _DebugLobeMask.y != 0.0) ) + { + if (AREA_LIGHTS_ANISOTROPY_ENABLED) + { + // In that case, instead of only considering possibly dual normal maps and thus two + // local canonical frames we have lobe specific frames because of the anisotropic hack: + localLightVerts = mul(lightVerts, transpose(preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_BASE_LOBEA_IDX])); + } + // Polygon irradiance in the transformed configuration. + float4x3 LAS = mul(localLightVerts, preLightData.ltcTransformSpecular[BASE_LOBEA_IDX]); + ltcValue = PolygonIrradiance(LAS); + // Only apply cookie if there is one + if ( lightData.cookieMode != COOKIEMODE_NONE ) + { + // Compute the cookie data for the specular term + float3 formFactorAS = PolygonFormFactor(LAS); + ltcValue *= SampleAreaLightCookie(lightData.cookieScaleOffset, LAS, formFactorAS); + } + + // See EvaluateBSDF_Env TODOENERGY: + lighting.specular += preLightData.energyCompensationFactor[BASE_LOBEA_IDX] * preLightData.specularFGD[BASE_LOBEA_IDX] * ltcValue; + } + IF_DEBUG( if ( _DebugLobeMask.z != 0.0) ) + { + if (AREA_LIGHTS_ANISOTROPY_ENABLED) + { + // In that case, instead of only considering possibly dual normal maps and thus two + // local canonical frames we have lobe specific frames because of the anisotropic hack: + localLightVerts = mul(lightVerts, transpose(preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_BASE_LOBEB_IDX])); + } + float4x3 LS = mul(localLightVerts, preLightData.ltcTransformSpecular[BASE_LOBEB_IDX]); + ltcValue = PolygonIrradiance(LS); + // Only apply cookie if there is one + if ( lightData.cookieMode != COOKIEMODE_NONE ) + { + // Compute the cookie data for the specular term + float3 formFactorS = PolygonFormFactor(LS); + ltcValue *= SampleAreaLightCookie(lightData.cookieScaleOffset, LS, formFactorS); + } + + lighting.specular += preLightData.energyCompensationFactor[BASE_LOBEB_IDX] * preLightData.specularFGD[BASE_LOBEB_IDX] * ltcValue; + } + + if (IsVLayeredEnabled(bsdfData)) + { + if (IsCoatNormalMapEnabled(bsdfData)) + { + localLightVerts = mul(lightVerts, transpose(preLightData.orthoBasisViewNormal[COAT_NORMAL_IDX])); + } + if (AREA_LIGHTS_ANISOTROPY_ENABLED) + { + // In that case, instead of only considering possibly dual normal maps and thus two + // local canonical frames we have lobe specific frames because of the anisotropic hack: + localLightVerts = mul(lightVerts, transpose(preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_COAT_LOBE_IDX])); + } + IF_DEBUG( if ( _DebugLobeMask.x != 0.0) ) + { + float4x3 LSCC = mul(localLightVerts, preLightData.ltcTransformSpecular[COAT_LOBE_IDX]); + ltcValue = PolygonIrradiance(LSCC); + // Only apply cookie if there is one + if ( lightData.cookieMode != COOKIEMODE_NONE ) + { + // Compute the cookie data for the specular term + float3 formFactorS = PolygonFormFactor(LSCC); + ltcValue *= SampleAreaLightCookie(lightData.cookieScaleOffset, LSCC, formFactorS); + } + lighting.specular += preLightData.energyCompensationFactor[COAT_LOBE_IDX] * preLightData.specularFGD[COAT_LOBE_IDX] * ltcValue; + } + } + lighting.specular *= lightData.specularDimmer; + + + // Save ALU by applying 'lightData.color' only once. + lighting.diffuse *= lightData.color; + lighting.specular *= lightData.color; - // Rotate the endpoints into the local coordinate system. - float4x3 localLightVerts = mul(lightVerts, transpose(preLightData.orthoBasisViewNormal[BASE_NORMAL_IDX])); + #ifdef DEBUG_DISPLAY + if (_DebugLightingMode == DEBUGLIGHTINGMODE_LUX_METER) + { + // Make sure we're using the base layer frame: + localLightVerts = mul(lightVerts, transpose(preLightData.orthoBasisViewNormal[BASE_NORMAL_IDX])); + if (AREA_LIGHTS_ANISOTROPY_ENABLED) + { + // In that case orthoBasisViewNormal[] is per lobe due to anistropic hack, + // use orthoBasisViewNormalDiffuse: + localLightVerts = mul(lightVerts, transpose(preLightData.orthoBasisViewNormalDiffuse)); + } - if (AREA_LIGHTS_ANISOTROPY_ENABLED) // statically known, so no need for if else, just overwrite the above - { - // Since we proceed with calculating diffuse and transmission irradiance, we setup - // the points for the diffuse frame. - // There's no anisotropy on the diffuse component and this is oriented considering - // the proper base layer normal: - localLightVerts = mul(lightVerts, transpose(preLightData.orthoBasisViewNormalDiffuse)); - } + // Only lighting, not BSDF + // Apply area light on lambert then multiply by PI to cancel Lambert + lighting.diffuse = PolygonIrradiance(mul(localLightVerts, k_identity3x3)); + lighting.diffuse *= PI * lightData.diffuseDimmer; + } + #endif - // Calculate the L irradiance (ltcValue) first for the diffuse part and transmission, - // then for the specular base layer and finishing with the coat. - float3 ltcValue; - - // Evaluate the diffuse part - // Polygon irradiance in the transformed configuration. - float4x3 LD = mul(localLightVerts, preLightData.ltcTransformDiffuse); - ltcValue = PolygonIrradiance(LD); - ltcValue *= lightData.diffuseDimmer; - // Only apply cookie if there is one - if ( lightData.cookieMode != COOKIEMODE_NONE ) - { - // Compute the cookie data for the diffuse term - float3 formFactorD = PolygonFormFactor(LD); - ltcValue *= SampleAreaLightCookie(lightData.cookieScaleOffset, LD, formFactorD); - } - // We don't multiply by 'bsdfData.diffuseColor' here. It's done only once in PostEvaluateBSDF(). - lighting.diffuse = preLightData.diffuseFGD * preLightData.diffuseEnergy * ltcValue; + } // if light not too far - UNITY_BRANCH if (HasFlag(bsdfData.materialFeatures, MATERIALFEATUREFLAGS_STACK_LIT_TRANSMISSION)) - { - // Flip the view vector and the normal. The bitangent stays the same. - float3x3 flipMatrix = float3x3(-1, 0, 0, - 0, 1, 0, - 0, 0, -1); - - // Use the Lambertian approximation for performance reasons. - // The matrix multiplication should not generate any extra ALU on GCN. - float3x3 ltcTransform = mul(flipMatrix, k_identity3x3); - - // Polygon irradiance in the transformed configuration. - // TODO: double evaluation is very inefficient! This is a temporary solution. - float4x3 LTD = mul(localLightVerts, ltcTransform); - ltcValue = PolygonIrradiance(LTD); - ltcValue *= lightData.diffuseDimmer; - // Only apply cookie if there is one - if ( lightData.cookieMode != COOKIEMODE_NONE ) - { - // Compute the cookie data for the transmission diffuse term - float3 formFactorTD = PolygonFormFactor(LTD); - ltcValue *= SampleAreaLightCookie(lightData.cookieScaleOffset, LTD, formFactorTD); - } - // VLAYERED_DIFFUSE_ENERGY_HACKED_TERM: - // In Lit with Lambert, there's no diffuseFGD, it is one. In our case, we also - // need a diffuse energy term when vlayered. + } // if light not back-facing - // We use diffuse lighting for accumulation since it is going to be blurred during the SSS pass. - // We don't multiply by 'bsdfData.diffuseColor' here. It's done only once in PostEvaluateBSDF(). - lighting.diffuse += bsdfData.transmittance * ltcValue * preLightData.diffuseEnergy; - } + float shadow = 1.0; + float shadowMask = 1.0; +#ifdef SHADOWS_SHADOWMASK + // shadowMaskSelector.x is -1 if there is no shadow mask + // Note that we override shadow value (in case we don't have any dynamic shadow) + shadow = shadowMask = (lightData.shadowMaskSelector.x >= 0.0) ? dot(BUILTIN_DATA_SHADOW_MASK, lightData.shadowMaskSelector) : 1.0; +#endif - // Evaluate the specular lobes for the stack - IF_DEBUG( if ( _DebugLobeMask.y != 0.0) ) +#if defined(SCREEN_SPACE_SHADOWS) && !defined(_SURFACE_TYPE_TRANSPARENT) + if ((lightData.screenSpaceShadowIndex & SCREEN_SPACE_SHADOW_INDEX_MASK) != INVALID_SCREEN_SPACE_SHADOW) { - if (AREA_LIGHTS_ANISOTROPY_ENABLED) - { - // In that case, instead of only considering possibly dual normal maps and thus two - // local canonical frames we have lobe specific frames because of the anisotropic hack: - localLightVerts = mul(lightVerts, transpose(preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_BASE_LOBEA_IDX])); - } - // Polygon irradiance in the transformed configuration. - float4x3 LAS = mul(localLightVerts, preLightData.ltcTransformSpecular[BASE_LOBEA_IDX]); - ltcValue = PolygonIrradiance(LAS); - // Only apply cookie if there is one - if ( lightData.cookieMode != COOKIEMODE_NONE ) - { - // Compute the cookie data for the specular term - float3 formFactorAS = PolygonFormFactor(LAS); - ltcValue *= SampleAreaLightCookie(lightData.cookieScaleOffset, LAS, formFactorAS); - } - - // See EvaluateBSDF_Env TODOENERGY: - lighting.specular += preLightData.energyCompensationFactor[BASE_LOBEA_IDX] * preLightData.specularFGD[BASE_LOBEA_IDX] * ltcValue; + shadow = GetScreenSpaceShadow(posInput, lightData.screenSpaceShadowIndex); } - IF_DEBUG( if ( _DebugLobeMask.z != 0.0) ) + else +#endif // ENABLE_RAYTRACING + if (lightData.shadowIndex != -1) { - if (AREA_LIGHTS_ANISOTROPY_ENABLED) - { - // In that case, instead of only considering possibly dual normal maps and thus two - // local canonical frames we have lobe specific frames because of the anisotropic hack: - localLightVerts = mul(lightVerts, transpose(preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_BASE_LOBEB_IDX])); - } - float4x3 LS = mul(localLightVerts, preLightData.ltcTransformSpecular[BASE_LOBEB_IDX]); - ltcValue = PolygonIrradiance(LS); - // Only apply cookie if there is one - if ( lightData.cookieMode != COOKIEMODE_NONE ) - { - // Compute the cookie data for the specular term - float3 formFactorS = PolygonFormFactor(LS); - ltcValue *= SampleAreaLightCookie(lightData.cookieScaleOffset, LS, formFactorS); - } - - lighting.specular += preLightData.energyCompensationFactor[BASE_LOBEB_IDX] * preLightData.specularFGD[BASE_LOBEB_IDX] * ltcValue; - } +#if RASTERIZED_AREA_LIGHT_SHADOWS + // lightData.positionRWS now contains the Light vector. + shadow = GetAreaLightAttenuation(lightLoopContext.shadowContext, posInput.positionSS, posInput.positionWS, bsdfData.normalWS, lightData.shadowIndex, normalize(lightData.positionRWS), length(lightData.positionRWS)); +#ifdef SHADOWS_SHADOWMASK + // See comment for punctual light shadow mask + shadow = lightData.nonLightMappedOnly ? min(shadowMask, shadow) : shadow; +#endif + shadow = lerp(shadowMask, shadow, lightData.shadowDimmer); - if (IsVLayeredEnabled(bsdfData)) - { - if (IsCoatNormalMapEnabled(bsdfData)) - { - localLightVerts = mul(lightVerts, transpose(preLightData.orthoBasisViewNormal[COAT_NORMAL_IDX])); - } - if (AREA_LIGHTS_ANISOTROPY_ENABLED) - { - // In that case, instead of only considering possibly dual normal maps and thus two - // local canonical frames we have lobe specific frames because of the anisotropic hack: - localLightVerts = mul(lightVerts, transpose(preLightData.orthoBasisViewNormal[ORTHOBASIS_VN_COAT_LOBE_IDX])); - } - IF_DEBUG( if ( _DebugLobeMask.x != 0.0) ) - { - float4x3 LSCC = mul(localLightVerts, preLightData.ltcTransformSpecular[COAT_LOBE_IDX]); - ltcValue = PolygonIrradiance(LSCC); - // Only apply cookie if there is one - if ( lightData.cookieMode != COOKIEMODE_NONE ) - { - // Compute the cookie data for the specular term - float3 formFactorS = PolygonFormFactor(LSCC); - ltcValue *= SampleAreaLightCookie(lightData.cookieScaleOffset, LSCC, formFactorS); - } - lighting.specular += preLightData.energyCompensationFactor[COAT_LOBE_IDX] * preLightData.specularFGD[COAT_LOBE_IDX] * ltcValue; - } +#endif } - lighting.specular *= lightData.specularDimmer; - - // Save ALU by applying 'lightData.color' only once. - lighting.diffuse *= lightData.color; - lighting.specular *= lightData.color; - -#ifdef DEBUG_DISPLAY - if (_DebugLightingMode == DEBUGLIGHTINGMODE_LUX_METER) - { - // Make sure we're using the base layer frame: - localLightVerts = mul(lightVerts, transpose(preLightData.orthoBasisViewNormal[BASE_NORMAL_IDX])); - - // Only lighting, not BSDF - // Apply area light on lambert then multiply by PI to cancel Lambert - lighting.diffuse = PolygonIrradiance(mul(localLightVerts, k_identity3x3)); - lighting.diffuse *= PI * lightData.diffuseDimmer; - } +#if RASTERIZED_AREA_LIGHT_SHADOWS || SUPPORTS_RAYTRACED_AREA_SHADOWS + float3 shadowColor = ComputeShadowColor(shadow, lightData.shadowTint, lightData.penumbraTint); + lighting.diffuse *= shadowColor; + lighting.specular *= shadowColor; #endif #endif // STACK_LIT_DISPLAY_REFERENCE_AREA @@ -4163,7 +4220,7 @@ IndirectLighting EvaluateBSDF_ScreenSpaceReflection(PositionInputs posInput, ApplyScreenSpaceReflectionWeight(ssrLighting); // For performance reasons, SSR doesn't allow us to be discriminating per lobe, ie wrt direction, roughness, - // anisotropy, etc. + // anisotropy, etc. // At least the vlayered BSDF stack model already represents the stack with a single interface with multiple // effective/equivalent lobes. @@ -4173,7 +4230,7 @@ IndirectLighting EvaluateBSDF_ScreenSpaceReflection(PositionInputs posInput, // This is the approach we take since roughnesses between coat and base lobes can be very different, while // if the coat exist, ConvertSurfaceDataToNormalData will output the roughness of the coat and we don't need // a boost of sharp reflections from a potentially rough bottom layer. - + float3 reflectanceFactor = (float3)0.0; if (IsVLayeredEnabled(bsdfData)) @@ -4389,7 +4446,7 @@ void PostEvaluateBSDF( LightLoopContext lightLoopContext, // bsdfData.diffuseColor is not appropriate to use when vlayered when doing GTAOMultiBounce here, but we can // try something with (bsdfData.diffuseColor * bsdfData.coatExtinction) (for specular occlusion with f0, it's // even worse but both are a hack anyway) We could also try "renormalizing diffuseEnergy" to the luminance of - // diffuseColor. + // diffuseColor. // For now, we use (bsdfData.diffuseColor * preLightData.diffuseEnergy) directly: float3 GTAOMultiBounceTintBase = (bsdfData.diffuseColor * preLightData.diffuseEnergy); GetApplyScreenSpaceDiffuseOcclusionForDirect(GTAOMultiBounceTintBase, preLightData.screenSpaceAmbientOcclusion, directAmbientOcclusion, lighting); diff --git a/com.unity.render-pipelines.high-definition/Runtime/PostProcessing/PostProcessSystem.cs b/com.unity.render-pipelines.high-definition/Runtime/PostProcessing/PostProcessSystem.cs index 4629f347a0e..0d1e26a9a19 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/PostProcessing/PostProcessSystem.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/PostProcessing/PostProcessSystem.cs @@ -269,6 +269,8 @@ public void Cleanup() RTHandles.Release(m_InternalLogLut); CoreUtils.Destroy(m_FinalPassMaterial); CoreUtils.Destroy(m_ClearBlackMaterial); + CoreUtils.Destroy(m_SMAAMaterial); + CoreUtils.Destroy(m_TemporalAAMaterial); CoreUtils.SafeRelease(m_BokehNearKernel); CoreUtils.SafeRelease(m_BokehFarKernel); CoreUtils.SafeRelease(m_BokehIndirectCmd); @@ -285,6 +287,8 @@ public void Cleanup() m_InternalLogLut = null; m_FinalPassMaterial = null; m_ClearBlackMaterial = null; + m_SMAAMaterial = null; + m_TemporalAAMaterial = null; m_BokehNearKernel = null; m_BokehFarKernel = null; m_BokehIndirectCmd = null; diff --git a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.cs b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.cs index be8af6df0b4..ab4468b6cad 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.cs @@ -940,6 +940,8 @@ void DisposeProbeCameraPool() } CameraCaptureBridge.enabled = false; + + HDUtils.ReleaseComponentSingletons(); } @@ -1147,6 +1149,7 @@ public struct Target // Indices of render request to render before this one public List dependsOnRenderRequestIndices; public CameraSettings cameraSettings; + public List<(HDProbe.RenderData, HDProbe)> viewDependentProbesData; } struct HDCullingResults { @@ -1366,7 +1369,8 @@ protected override void Render(ScriptableRenderContext renderContext, Camera[] c }, dependsOnRenderRequestIndices = ListPool.Get(), index = renderRequests.Count, - cameraSettings = CameraSettings.From(hdCamera) + cameraSettings = CameraSettings.From(hdCamera), + viewDependentProbesData = ListPool<(HDProbe.RenderData, HDProbe)>.Get() // TODO: store DecalCullResult }; renderRequests.Add(request); @@ -1450,6 +1454,8 @@ float ComputeVisibility(int visibleInIndex, HDProbe visibleProbe) parentCamera = visibleInRenderRequest.hdCamera.camera; + var renderDatas = ListPool.Get(); + AddHDProbeRenderRequests( visibleProbe, viewerTransform, @@ -1457,8 +1463,16 @@ float ComputeVisibility(int visibleInIndex, HDProbe visibleProbe) HDUtils.GetSceneCullingMaskFromCamera(visibleInRenderRequest.hdCamera.camera), parentCamera, visibleInRenderRequest.hdCamera.camera.fieldOfView, - visibleInRenderRequest.hdCamera.camera.aspect + visibleInRenderRequest.hdCamera.camera.aspect, + ref renderDatas ); + + foreach (var renderData in renderDatas) + { + visibleInRenderRequest.viewDependentProbesData.Add((renderData, visibleProbe)); + } + + ListPool.Release(renderDatas); } } else @@ -1473,7 +1487,11 @@ float ComputeVisibility(int visibleInIndex, HDProbe visibleProbe) visibleInOneViewer = true; } if (visibleInOneViewer) - AddHDProbeRenderRequests(visibleProbe, null, visibilities, 0, parentCamera); + { + var renderDatas = ListPool.Get(); + AddHDProbeRenderRequests(visibleProbe, null, visibilities, 0, parentCamera, referenceFieldOfView: 90, referenceAspect: 1, ref renderDatas); + ListPool.Release(renderDatas); + } } } foreach (var pair in renderRequestIndicesWhereTheProbeIsVisible) @@ -1487,8 +1505,9 @@ void AddHDProbeRenderRequests( List<(int index, float weight)> visibilities, ulong overrideSceneCullingMask, Camera parentCamera, - float referenceFieldOfView = 90, - float referenceAspect = 1 + float referenceFieldOfView, + float referenceAspect, + ref List renderDatas ) { var position = ProbeCapturePositionSettings.ComputeFrom( @@ -1587,16 +1606,20 @@ ref _cullingResults if (!visibleProbe.realtimeTexture.IsCreated()) visibleProbe.realtimeTexture.Create(); - visibleProbe.SetRenderData( - ProbeSettings.Mode.Realtime, - new HDProbe.RenderData( + var renderData = new HDProbe.RenderData( camera.worldToCameraMatrix, camera.projectionMatrix, camera.transform.position, camera.transform.rotation, cameraSettings[j].frustum.fieldOfView, cameraSettings[j].frustum.aspect - ) + ); + + renderDatas.Add(renderData); + + visibleProbe.SetRenderData( + ProbeSettings.Mode.Realtime, + renderData ); // TODO: Assign the actual final target to render to. @@ -1613,7 +1636,8 @@ ref _cullingResults clearCameraSettings = true, dependsOnRenderRequestIndices = ListPool.Get(), index = renderRequests.Count, - cameraSettings = cameraSettings[j] + cameraSettings = cameraSettings[j], + viewDependentProbesData = ListPool<(HDProbe.RenderData, HDProbe)>.Get() // TODO: store DecalCullResult }; @@ -1736,6 +1760,15 @@ ref _cullingResults target.id = m_TemporaryTargetForCubemaps; } + // The HDProbe store only one RenderData per probe, however RenderData can be view dependent (e.g. planar probes). + // To avoid that the render data for the wrong view is used, we previously store a copy of the render data + // for each viewer and we are going to set it on the probe right before said viewer is rendered. + foreach (var probeDataPair in renderRequest.viewDependentProbesData) + { + var probe = probeDataPair.Item2; + var probeRenderData = probeDataPair.Item1; + probe.SetRenderData(ProbeSettings.Mode.Realtime, probeRenderData); + } // var aovRequestIndex = 0; foreach (var aovRequest in renderRequest.hdCamera.aovRequests) @@ -1775,6 +1808,7 @@ ref _cullingResults renderRequest.hdCamera.camera.targetTexture = null; ListPool.Release(renderRequest.dependsOnRenderRequestIndices); + ListPool<(HDProbe.RenderData, HDProbe)>.Release(renderRequest.viewDependentProbesData); // Culling results can be shared between render requests: clear only when required if (!skipClearCullingResults.Contains(renderRequest.index)) diff --git a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/RenderPass/MSAA/DepthValues.shader b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/RenderPass/MSAA/DepthValues.shader index e3d3c2f7bba..bdb882276dd 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/RenderPass/MSAA/DepthValues.shader +++ b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/RenderPass/MSAA/DepthValues.shader @@ -3,6 +3,8 @@ Shader "Hidden/HDRP/DepthValues" HLSLINCLUDE #pragma target 4.5 #pragma only_renderers d3d11 playstation xboxone vulkan metal switch + #pragma multi_compile _ _HAS_MOTION_VECTORS + #include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Common.hlsl" #include "Packages/com.unity.render-pipelines.high-definition/Runtime/ShaderLibrary/ShaderVariables.hlsl" // #pragma enable_d3d11_debug_symbols @@ -10,7 +12,9 @@ Shader "Hidden/HDRP/DepthValues" // Target multisampling textures TEXTURE2D_X_MSAA(float, _DepthTextureMS); TEXTURE2D_X_MSAA(float4, _NormalTextureMS); + #ifdef _HAS_MOTION_VECTORS TEXTURE2D_X_MSAA(float2, _MotionVectorTextureMS); + #endif struct Attributes { @@ -29,7 +33,9 @@ Shader "Hidden/HDRP/DepthValues" { float4 depthValues : SV_Target0; float4 normal : SV_Target1; + #ifdef _HAS_MOTION_VECTORS float2 motionVectors : SV_Target2; + #endif float actualDepth : SV_Depth; }; @@ -51,7 +57,9 @@ Shader "Hidden/HDRP/DepthValues" float depthVal = LOAD_TEXTURE2D_X_MSAA(_DepthTextureMS, pixelCoords, 0).x; fragO.depthValues = float4(depthVal, depthVal, depthVal, 0.0f); fragO.normal = LOAD_TEXTURE2D_X_MSAA(_NormalTextureMS, pixelCoords, 0); + #ifdef _HAS_MOTION_VECTORS fragO.motionVectors = LOAD_TEXTURE2D_X_MSAA(_MotionVectorTextureMS, pixelCoords, 0); + #endif fragO.actualDepth = fragO.depthValues.x; return fragO; } @@ -77,8 +85,10 @@ Shader "Hidden/HDRP/DepthValues" fragO.depthValues.z *= 0.5; fragO.actualDepth = fragO.depthValues.x; fragO.normal = LOAD_TEXTURE2D_X_MSAA(_NormalTextureMS, pixelCoords, 0); + #ifdef _HAS_MOTION_VECTORS // We pick the closest sample to camera, not really a great solution, but resolving motion vectors is ill defined. fragO.motionVectors = LOAD_TEXTURE2D_X_MSAA(_MotionVectorTextureMS, pixelCoords, closestSample); + #endif return fragO; } @@ -103,8 +113,10 @@ Shader "Hidden/HDRP/DepthValues" fragO.depthValues.z *= 0.25; fragO.actualDepth = fragO.depthValues.x; fragO.normal = LOAD_TEXTURE2D_X_MSAA(_NormalTextureMS, pixelCoords, 0); + #ifdef _HAS_MOTION_VECTORS // We pick the closest sample to camera, not really a great solution, but resolving motion vectors is ill defined. fragO.motionVectors = LOAD_TEXTURE2D_X_MSAA(_MotionVectorTextureMS, pixelCoords, closestSample); + #endif return fragO; } @@ -129,8 +141,10 @@ Shader "Hidden/HDRP/DepthValues" fragO.depthValues.z *= 0.125; fragO.actualDepth = fragO.depthValues.x; fragO.normal = LOAD_TEXTURE2D_X_MSAA(_NormalTextureMS, pixelCoords, 0); + #ifdef _HAS_MOTION_VECTORS // We pick the closest sample to camera, not really a great solution, but resolving motion vectors is ill defined. fragO.motionVectors = LOAD_TEXTURE2D_X_MSAA(_MotionVectorTextureMS, pixelCoords, closestSample); + #endif return fragO; } ENDHLSL diff --git a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/RenderPass/MipGenerator.cs b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/RenderPass/MipGenerator.cs index d16186a2d52..f413e1df6ad 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/RenderPass/MipGenerator.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/RenderPass/MipGenerator.cs @@ -39,6 +39,8 @@ public void Release() RTHandles.Release(m_TempDownsamplePyramid[i]); m_TempDownsamplePyramid[i] = null; } + + CoreUtils.Destroy(m_ColorPyramidPSMat); } private int tmpTargetCount @@ -222,4 +224,4 @@ public int RenderColorGaussianPyramid(CommandBuffer cmd, Vector2Int size, Textur return srcMipLevel + 1; } } -} \ No newline at end of file +} diff --git a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/SceneViewDrawMode.cs b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/SceneViewDrawMode.cs index 507732895d6..72613d64978 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/SceneViewDrawMode.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/SceneViewDrawMode.cs @@ -19,8 +19,7 @@ static private bool RejectDrawMode(SceneView.CameraMode cameraMode) cameraMode.drawMode == DrawCameraMode.DeferredSmoothness || cameraMode.drawMode == DrawCameraMode.DeferredNormal || cameraMode.drawMode == DrawCameraMode.ValidateAlbedo || - cameraMode.drawMode == DrawCameraMode.ValidateMetalSpecular || - cameraMode.drawMode == DrawCameraMode.LightOverlap + cameraMode.drawMode == DrawCameraMode.ValidateMetalSpecular ) return false; diff --git a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/Settings/FrameSettings.cs b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/Settings/FrameSettings.cs index 1bad98da64e..b0757bcbf66 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/Settings/FrameSettings.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/Settings/FrameSettings.cs @@ -318,7 +318,9 @@ public enum FrameSettingsField [DebuggerDisplay("{mask.humanizedData}")] public struct FrameSettingsOverrideMask { - /// Mask of overridden values. + /// Gets the underlying BitArray HDRP uses to store the override mask and thus specific which field is overridden or not. + /// Note: BitArray128 is implements IBitArray and therefore has the scripting API described below. It is recomended to use the interface as the exact BitArray con evolve from one version of the package to another as the we need more capacity here. + /// [SerializeField] public BitArray128 mask; } @@ -501,59 +503,54 @@ partial struct FrameSettings BitArray128 bitDatas; /// - /// if lodBiasMode == LODBiasMode.Fixed, then this value will overwrite QualitySettings.lodBias - /// if lodBiasMode == LODBiasMode.ScaleQualitySettings, then this value will scale QualitySettings.lodBias + /// If lodBiasMode is LODBiasMode.Fixed, then this value overwrites QualitySettings.lodBias. + /// If lodBiasMode is LODBiasMode.ScaleQualitySettings, then this value scales QualitySettings.lodBias. /// [SerializeField] public float lodBias; - /// Define how the QualitySettings.lodBias value is set. + /// Specifies how HDRP calculates QualitySettings.lodBias. [SerializeField] public LODBiasMode lodBiasMode; - /// The quality level to use when fetching the quality setting value. + /// The quality level the rendering component uses when it fetches the quality setting value. [SerializeField] public int lodBiasQualityLevel; /// - /// if maximumLODLevelMode == MaximumLODLevelMode.FromQualitySettings, then this value will overwrite QualitySettings.maximumLODLevel - /// if maximumLODLevelMode == MaximumLODLevelMode.OffsetQualitySettings, then this value will offset QualitySettings.maximumLODLevel + /// If maximumLODLevelMode is MaximumLODLevelMode.FromQualitySettings, then this value overwrites QualitySettings.maximumLODLevel + /// If maximumLODLevelMode is MaximumLODLevelMode.OffsetQualitySettings, then this value offsets QualitySettings.maximumLODLevel /// [SerializeField] public int maximumLODLevel; - /// Define how the QualitySettings.maximumLODLevel value is set. + /// Specifies how HDRP calculates QualitySettings.maximumLODLevel. [SerializeField] public MaximumLODLevelMode maximumLODLevelMode; - /// The quality level to use when fetching the quality setting value. + /// The maximum quality level the rendering component uses when it fetches the quality setting value. [SerializeField] public int maximumLODLevelQualityLevel; /// - /// The material quality level to use for this rendering. - /// if materialQuality == 0, then the material quality from the current quality settings - /// (in HDRP Asset) will be used. + /// The material quality level this rendering component uses. + /// If materialQuality == 0, the rendering component uses the material quality from the current quality settings in the HDRP Asset. /// public MaterialQuality materialQuality; - /// Helper to see binary saved data on LitShaderMode as a LitShaderMode enum. + /// Specifies the rendering path this rendering component uses. Here you can use the LitShaderMode enum to specify whether the rendering component uses forward or deferred rendering. public LitShaderMode litShaderMode { get => bitDatas[(uint)FrameSettingsField.LitShaderMode] ? LitShaderMode.Deferred : LitShaderMode.Forward; set => bitDatas[(uint)FrameSettingsField.LitShaderMode] = value == LitShaderMode.Deferred; } - /// - /// Get stored data for this field. - /// + /// Gets the stored override value for the passed in Frame Setting. Use this to access boolean values. /// Requested field. /// True if the field is enabled. public bool IsEnabled(FrameSettingsField field) => bitDatas[(uint)field]; - /// - /// Set stored data for this field. - /// + /// Sets the stored override value for the passed in Frame Setting. Use this to access boolean values. /// Requested field. /// State to set to the field. public void SetEnabled(FrameSettingsField field, bool value) => bitDatas[(uint)field] = value; /// - /// Compute the LOD bias value to use + /// Calculates the LOD bias value to use. /// /// The HDRP Assets to use /// The LOD Bias to use @@ -570,7 +567,7 @@ public float GetResolvedLODBias(HDRenderPipelineAsset hdrp) } /// - /// Compute the Maximum LOD level to use + /// Calculates the Maximum LOD level to use. /// /// The HDRP Asset to use /// The Maximum LOD level to use. @@ -732,7 +729,7 @@ internal static void AggregateFrameSettings(ref FrameSettings aggregatedFrameSet } /// - /// Equality operator. + /// Equality operator between two FrameSettings. Return `true` if equivalent. (comparison of content). /// /// First frame settings. /// Second frame settings. @@ -748,7 +745,7 @@ internal static void AggregateFrameSettings(ref FrameSettings aggregatedFrameSet && a.materialQuality == b.materialQuality; /// - /// Inequality operator. + /// Inequality operator between two FrameSettings. Return `true` if different. (comparison of content). /// /// First frame settings. /// Second frame settings. @@ -764,7 +761,7 @@ internal static void AggregateFrameSettings(ref FrameSettings aggregatedFrameSet || a.materialQuality != b.materialQuality; /// - /// Equality operator. + /// Equality operator between two FrameSettings. Return `true` if equivalent. (comparison of content). /// /// Frame Settings to compare to. /// True if both settings are equal. @@ -780,7 +777,7 @@ public override bool Equals(object obj) && materialQuality.Equals(((FrameSettings)obj).materialQuality); /// - /// Returns the hash code of the frame settings. + /// Returns the hash code of this object. /// /// Hash code of the frame settings. public override int GetHashCode() diff --git a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/Utility/HDUtils.cs b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/Utility/HDUtils.cs index b4b6796ad28..0560b4eaebb 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/Utility/HDUtils.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/Utility/HDUtils.cs @@ -24,7 +24,7 @@ public class HDUtils static internal HDAdditionalLightData s_DefaultHDAdditionalLightData { get { return ComponentSingleton.instance; } } /// Default HDAdditionalCameraData static internal HDAdditionalCameraData s_DefaultHDAdditionalCameraData { get { return ComponentSingleton.instance; } } - + static List m_TempCustomPassVolumeList = new List(); static Texture3D m_ClearTexture3D; @@ -473,7 +473,7 @@ internal static RenderPipelineAsset SwitchToBuiltinRenderPipeline(out bool asset } // Set the renderPipelineAsset, either on the quality settings if it was unset from there or in GraphicsSettings. - // IMPORTANT: RenderPipelineManager.currentPipeline won't be HDRP until a camera.Render() call is made. + // IMPORTANT: RenderPipelineManager.currentPipeline won't be HDRP until a camera.Render() call is made. internal static void RestoreRenderPipelineAsset(bool wasUnsetFromQuality, RenderPipelineAsset renderPipelineAsset) { if(wasUnsetFromQuality) @@ -1001,5 +1001,12 @@ internal static void DisplayUnsupportedAPIMessage(string graphicAPI = null) string msg = "Platform " + currentPlatform + " with device " + graphicAPI + " is not supported with High Definition Render Pipeline, no rendering will occur"; DisplayUnsupportedMessage(msg); } + + internal static void ReleaseComponentSingletons() + { + ComponentSingleton.Release(); + ComponentSingleton.Release(); + ComponentSingleton.Release(); + } } } diff --git a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/XR/XRSystem.cs b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/XR/XRSystem.cs index e7d5d48222d..bb117be26be 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/XR/XRSystem.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/XR/XRSystem.cs @@ -73,6 +73,9 @@ internal XRSystem(RenderPipelineResources.ShaderResources shaders) [RuntimeInitializeOnLoadMethod(RuntimeInitializeLoadType.BeforeSplashScreen)] internal static void XRSystemInit() { + if (GraphicsSettings.currentRenderPipeline == null) + return; + SubsystemManager.GetInstances(displayList); for (int i = 0; i < displayList.Count; i++) diff --git a/com.unity.render-pipelines.high-definition/Runtime/Sky/PhysicallyBasedSky/PhysicallyBasedSkyRenderer.cs b/com.unity.render-pipelines.high-definition/Runtime/Sky/PhysicallyBasedSky/PhysicallyBasedSkyRenderer.cs index a9f1a4b69e7..6c206f55888 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Sky/PhysicallyBasedSky/PhysicallyBasedSkyRenderer.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/Sky/PhysicallyBasedSky/PhysicallyBasedSkyRenderer.cs @@ -32,7 +32,7 @@ public enum PbrSkyConfig static ComputeShader s_GroundIrradiancePrecomputationCS; static ComputeShader s_InScatteredRadiancePrecomputationCS; - static Material s_PbrSkyMaterial; + Material s_PbrSkyMaterial; static MaterialPropertyBlock s_PbrSkyMaterialProperties; static GraphicsFormat s_ColorFormat = GraphicsFormat.R16G16B16A16_SFloat; @@ -80,8 +80,7 @@ public override void Build() s_InScatteredRadiancePrecomputationCS = hdrpResources.shaders.inScatteredRadiancePrecomputationCS; s_PbrSkyMaterialProperties = new MaterialPropertyBlock(); - if (s_PbrSkyMaterial == null) // Material instance is static. - s_PbrSkyMaterial = CoreUtils.CreateEngineMaterial(hdrpResources.shaders.physicallyBasedSkyPS); + s_PbrSkyMaterial = CoreUtils.CreateEngineMaterial(hdrpResources.shaders.physicallyBasedSkyPS); Debug.Assert(s_GroundIrradiancePrecomputationCS != null); Debug.Assert(s_InScatteredRadiancePrecomputationCS != null); @@ -126,6 +125,8 @@ public override void Cleanup() RTHandles.Release(m_InScatteredRadianceTables[3]); m_InScatteredRadianceTables[3] = null; RTHandles.Release(m_InScatteredRadianceTables[4]); m_InScatteredRadianceTables[4] = null; + CoreUtils.Destroy(s_PbrSkyMaterial); + m_LastPrecomputedBounce = 0; } diff --git a/com.unity.shadergraph/Documentation~/Baked-GI-Node.md b/com.unity.shadergraph/Documentation~/Baked-GI-Node.md index 1b29650f742..e4c27f44dfa 100644 --- a/com.unity.shadergraph/Documentation~/Baked-GI-Node.md +++ b/com.unity.shadergraph/Documentation~/Baked-GI-Node.md @@ -9,7 +9,7 @@ Note: The behavior of this [Node](Node.md) is undefined globally. Shader Graph d Different Render Pipelines may produce different results. If you're building a shader in one Render Pipeline that you want to use in both, try checking it in both pipelines before production. A [Node](Node.md) might be defined in one Render Pipeline and undefined in the other. If this [Node](Node.md) is undefined, it returns 0 (black). #### Unity Render Pipelines Support -- High Definition Render Pipeline +- High Definition Render Pipeline. Although, this Node does not work in a Shader Graph that targets HDRP's [Unlit Master Node](https://docs.unity3d.com/Packages/com.unity.render-pipelines.high-definition@latest/index.html?subfolder=/manual/Master-Node-Unlit.html). - Universal Render Pipeline ## Ports diff --git a/com.unity.visualeffectgraph/Documentation~/ComponentAPI.md b/com.unity.visualeffectgraph/Documentation~/ComponentAPI.md index a819e258e83..f9da96d6bce 100644 --- a/com.unity.visualeffectgraph/Documentation~/ComponentAPI.md +++ b/com.unity.visualeffectgraph/Documentation~/ComponentAPI.md @@ -1,5 +1,3 @@ -
Draft: The content on this page is complete, but it has not been reviewed yet.
- # Visual Effect component API To create an instance of a [Visual Effect Graph](VisualEffectGraphAsset.md) in a Scene, Unity uses the [Visual Effect component](VisualEffectComponent.md). The Visual Effect component attaches to GameObjects in your Scene and references a Visual Effect Graph which defines the visual effect. This allows you to create different instances of effects at various positions and orientations, and control each effect independently. To control an effect at runtime, Unity provides C# API that you can use to modify the Visual Effect component and set [Property](Properties.md) overrides. diff --git a/com.unity.visualeffectgraph/Documentation~/Contexts.md b/com.unity.visualeffectgraph/Documentation~/Contexts.md index 6cef77728e7..b88f7430276 100644 --- a/com.unity.visualeffectgraph/Documentation~/Contexts.md +++ b/com.unity.visualeffectgraph/Documentation~/Contexts.md @@ -1,137 +1,142 @@ -
Draft: The content on this page is complete, but it has not been reviewed yet.
# Contexts -Contexts are the main elements of the Graph Workflow logic (vertical) and define the succession and the relationships of operations and simulations. Every context defines one stage of computing, for example computing how many particles need to be spawned, creating new particles or updating all living particles. +Contexts are the main element of the Visual Effect Graph's **processing** (vertical) workflow and determine how particles spawn and simulate. The way you organize Contexts on the graph defines order of operation for the processing workflow. For information on the processing workflow, see [Visual Effect Graph Logic](GraphLogicAndPhilosophy.md). Every Context defines one stage of computation. For example a Context can: -Context connect to each other when there is meaning : After creating new particles, an Initialize context can connect to a Update Particle context, or directly to a Output Particle Context to render the particles without simulating them. +* Calculate how many particles the effect should spawn. +* Create new particles. +* Update all living particles. -## Creating and Connecting Contexts +Contexts connect to one another sequentially to define the lifecycle of particles. After a graph creates new particles, the **Initialize** Context can connect to an **Update Particle** Context to simulate each particle. Also, the **Initialize** Context can instead connect directly to an **Output Particle** Context to render the particles without simulating any behavior. -Contexts are Graph elements, so they can be created using the Right Click > Add Node Menu, Spacebar Menu or by making a workflow (vertical) connection from another context (providing only compatible contexts) +## Creating and connecting Contexts -Contexts connect to each other using the Ports at the top and the bottom. +A Context is a type of [graph element](GraphLogicAndPhilosophy.md#graph-elements) so to create one, see [Adding graph elements](VisualEffectGraphWindow.md#adding-graph-elements). + +Contexts connect to one another in a vertical, linear order. To achieve this, they use [flow slots](). Depending on which part of the particle lifecycle a Context defines, it may have flow slots on its top, its bottom, or both. ## Configuring Contexts -Adjusting Context [Settings](GraphLogicAndPhilosophy.md#settings) in the Node UI or the Inspector can change the way the Operator looks and behaves. +To change the behavior of the Context, adjust its [settings](GraphLogicAndPhilosophy.md#settings) in the Node UI or the Inspector. -> For instance, Changing the UV Mode of a `Quad Output` Context, from *Simple* to *FlipbookMotionBlend* will add Extra *Flipbook Size*, *Motion Vector Map* and *Motion Vector Scale* Properties to the Context Header. +Some settings also change how the Context looks. For example in a **Quad Output** Context, if you set the UV Mode to **FlipbookMotionBlend**, Unity adds the following extra properties to the Context header: **Flipbook Size**, **Motion Vector Map**, and **Motion Vector Scale**. -## Flow Compatibility +## Flow compatibility -Not all contexts can be connected altogether, in any order. Some rules apply to keep a consistent workflow: +Not all Contexts can connect to one another. To keep a consistent workflow, the following rules apply: -* Contexts connect by compatible input/output data type. -* Events can connect to one or many events / initialize contexts. -* Initialize contexts can have one or many SpawnEvent source or one or many GPUSpawnEvent source, but these data type are mutually exclusive. -* Only One Initialize can be connected to one Update Context -* You can connect any Output Contexts to a Initialize / Update context. +* Contexts only connect to compatible input/output data types. +* [Events](Events.md) can connect to one or many Events or **Initialize** Contexts. +* **Initialize** Contexts can have one or many **SpawnEvent** sources or one or many **GPUSpawnEvent** sources, but these data type are mutually exclusive. +* Only one **Initialize** Context can connect to one **Update** Context. +* You can connect an **Output** Context to an **Initialize** or **Update** Context. - Here is a recap table of the context compatibility: +For a breakdown of context compatibility, see the table below. | Context | Input Data Type | Output Data Type | Specific Comments | -| ------------------ | ------------------------------------ | ---------------- | ------------------------------------------------------------ | -| Event | None | SpawnEvent (1+) | | -| Spawn | SpawnEvent (1+) | SpawnEvent (1+) | Two input pins, start and stop the spawn context | -| GPU Event | None | SpawnEvent | Outputs to Initialize Context | -| Initialize | SpawnEvent (1+) / GPUSpawnEvent (1+) | Particle (1) | Can output to Particle Update or Particle Output. Input types SpawnEvent/GPUSpawnEvent are mutually exclusive. | -| Update | Particle (1) | Particle (1+) | Can output to a Particle Update or Particle Output | -| Particle Output | Particle (1) | None | Can either have input from an Initialize or Update | -| Static Mesh Output | None | None | Standalone Context | +| ---------------------- | --------------------------------------------- | ------------------- | ------------------------------------------------------------ | +| **Event** | **None** | **SpawnEvent** (1+) | **None** | +| **Spawn** | **SpawnEvent** (1+) | **SpawnEvent** (1+) | Has two input flow slots which start and stop the **Spawn** context respectively. | +| **GPU Event** | **None** | **SpawnEvent** | Outputs to **Initialize** Context | +| **Initialize** | **SpawnEvent** (1+) or **GPUSpawnEvent** (1+) | **Particle** (1) | Input types are either **SpawnEvent** or **GPUSpawnEvent**. These input types are mutually exclusive.
Can output to **Particle Update** or **Particle Output**. | +| **Update** | **Particle** (1) | **Particle** (1+) | Can output to a **Particle Update** or **Particle Output**. | +| **Particle Output** | **Particle** (1) | **None** | Can either have input from an **Initialize** or **Update** Context.
No output. | +| **Static Mesh Output** | **None** | **None** | Standalone Context. | -# Context Type Overview +# Context type overview -This section covers all the common settings of every kind of context. For more details about specific contexts, see [Context Library]() +This section covers all the common settings for every kind of Context. ## Event -Event Contexts only display a Name as a string that need to be called on the Component API in order to Send this event to the graph and activate a workflow from this Node. +Event Contexts only display their name, which is a string. To trigger an Event Context and activate a workflow from it, use the Event Context's name in the [component API](ComponentApi.md). For information on how to do this, see [Sending Events](ComponentApi.md#sending-events). ## Spawn -Spawn Contexts are standalone systems that have three States : Playing, Stopped and Delayed. - -* **Looping** (Running) state means that the Blocks are computed and will perform spawn of new particles -* **Finished** (Idle) state means that the spawn machine is off and will not spawn particles -* **DelayingBeforeLoop/DelayingAfterLoop** (Waiting) state stops spawning particles until the end of a user-set delay, then restarts spawning particles. +Spawn Contexts are standalone systems that have three States: Running, Idle, and Waiting. -Spawn contexts can be customized using compatible **Blocks**. +* **Looping** (Running): This state means that Unity computes the Blocks in the Context and spawns new particles. +* **Finished** (Idle): This state means that the spawn machine is off and does not compute Blocks in the Context or spawn particles. +* **DelayingBeforeLoop/DelayingAfterLoop** (Waiting): This state pauses the Context for the duration of a delay time which you can specify. After the delay, the Context resumes, computes Blocks in the Context, and spawns particles. -You can find Spawn Context API Reference [here](https://docs.unity3d.com/2019.3/Documentation/ScriptReference/VFX.VFXSpawnerLoopState.html). +To customize **Spawn** Contexts, you can add compatible **Blocks** to them. For information on the Spawn Context API, see the [Script Reference](https://docs.unity3d.com/2019.3/Documentation/ScriptReference/VFX.VFXSpawnerLoopState.html). -### Turning On and Off +### Enabling and disabling -Spawn Contexts expose two [Flow Input Slots](GraphLogicAndPhilosophy.md#processing-workflow-vertical-logic): Start and Stop: +Spawn Contexts expose two [flow slots](GraphLogicAndPhilosophy.md#processing-workflow-vertical-logic): **Start** and **Stop**: -- Start input **Resets** and/or **Start** the Spawn System : if not connected, it is implicitly bound to the `OnPlay` [Event](Events.md) . Hitting Start many times has the same effect as pushing it once. -- Stop input **Stops** the Spawn System : if not connected, it is implicitly bound to the `OnStop` [Event](Events.md) +- The **Start** input resets/starts the Spawn Context. If you do not connect anything to this flow slot, it implicitly uses the **OnPlay** [Event](Events.md). Using **Start** many times has the same effect as using it once. +- The **Stop** input stops the Spawn System. If you do not connect anything to this flow slot, it implicitly uses the **OnStop** [Event](Events.md). -### Looping and Delaying +### Looping and delaying -Spawn contexts contains a state and will perform spawning particles based on a looping system. +Each Spawn Context contains a state to determine when the Context spawns particles. -* The spawn context can emit during **loops of defined duration** (meaning the internal spawn time will reset at each loop's beginning) . By default the duration is **infinite**. - * In order to set the loop mode, select the context in the graph and change the loop duration popup in the Inspector. (Possible Values : Infinite, Constant, Random) -* Spawn contexts can perform **one**, **many** or an **infinity** of **loops**. - * In order to set this setting, select the spawn context in the graph and change the Loop count popup in the Inspector (Possible Values : Infinite, Constant, Random) -* Spawn contexts can perform a **delay** **before** and/or a**delay after** each loop. During a delay, the spawn time elapses normally but no spawn is performed. - * In order to set these setting, select the spawn context in the graph and change the Delay Before Loop and Delay After Loop popups in the Inspector (Possible Values: None, Constant, Random) +* The Spawn Context emits particles during loops of a particular duration. This means the internal spawn time resets when each loop starts. By default, the duration is **infinite**, but you can change this.
To set the loop mode: + 1. Select the Spawn Context in the graph. + 2. In the Inspector, click the **Loop Duration** drop-down. + 3. From the list, click either **Infinite**, **Constant**, or **Random**. +* Spawn Contexts can perform one, many, or an infinite number of loops.
To set the number of loops: + 1. Select the Spawn Context in the graph. + 2. In the Inspector, click the **Loop** drop-down. + 3. From the list, click either **Infinite**, **Constant**, or **Random**. +* Spawn Contexts can perform a delay before and after each loop. During a delay, the spawn time elapses normally but the Spawn Context does not spawn any particles.
To set the delay duration: + 1. Select the Spawn Context in the graph. + 2. In the Inspector, click either the **Delay Before Loop** or **Delay After Loop** drop-down. + 3. From the list, click either **None**, **Constant**, or **Random**. -Here is a visual illustration of the Looping and Delay System. +If you set **Loop Duration**, **Loop**, **Delay Before Loop**, or **Delay After Loop** to either **Constant** or **Random**, the Spawn Context displays extra properties in its header to control each behavior. To evaluates the values you set, Unity uses the following rules: -![Figure explaining the Loop/Delay System](Images/LoopDelaySystem.png) +- If set, Unity evaluates **Loop Count** when the **Start** flow input of the Context triggers. +- If set, Unity evaluates **Loop Duration** every time a loop starts. +- If set, Unity evaluates **Loop Before/After Delay** every time a delay starts. -Setting a loop count, loop duration and / or delays will display new connectable properties on the context's header. Evaluation of these values will follow these rules: +For a visualization of the looping and delay system, see the following illustration: -* If set : **Loop Count** is evaluated when the Start workflow input of the context is hit. -* If set : **Loop Duration** is evaluated every time a loop starts -* If set : **Loop Delay** (Before/After) is evaluated every time a delay starts. +![Figure explaining the Loop/Delay System](Images/LoopDelaySystem.png) ## GPU Event -GPU Event contexts are experimental contexts that connect inputs to output GPU Events from other systems. They differ from Traditional Spawn as they are computed by the GPU. Only one kind of Spawn can be connected to an Initialize Context (GPU Event and Spawn/Events are mutually Exclusive) +GPU Event Contexts are experimental Contexts that connect inputs to output GPU Events from other systems. They differ from the normal Event Contexts in two ways: -> GPU Event contexts cannot be customized with Blocks. -> +* The GPU computes GPU Events and the CPU computes normal Events. +* You can't customize GPU Event Contexts with Blocks. + +**Note**: When you connect Spawn Events to an Initialize Context, be aware that GPU Spawn Events and normal Spawn Events are mutually Exclusive. You can only connect one type of Spawn Event to an **Initialize** Context at the same time. ## Initialize -Initialize Contexts will generate new particles based on **SpawnEvent** Data, computed from Events, Spawn or GPU Event contexts. +Initialize Contexts generate new particles based on **SpawnEvent** Data, which Unity computes from Events, Spawn Contexts, or GPU Event Contexts. -> For example: upon receiving an order of creation of 200 new particles from a spawn context, the context will be processed and will result in executing the context's Blocks for all 200 new particles. +For example: If a Spawn Context states that the effect should create 200 new particles, the Initialize Context processes its Blocks for all 200 new particles. -Initialize contexts can be customized using compatible **Blocks**. +To customize **Initialize **Contexts, you can add compatible **Blocks** to them. -Initialize contexts are the entry point of new systems. As such, they display information and configuration in their header: +Initialize contexts are the entry point of new systems. As such, they display the following information and configuration details in their header: | Property/Setting | Description | -| ------------------ | ------------------------------------------- | -| Bounds (Property) | Controls the Bounding box of the System | -| Capacity (Setting) | Controls the allocation count of the System | - - +| ---------------------- | -------------------------------------------- | +| **Bounds** (Property) | Controls the Bounding box of the System. | +| **Capacity** (Setting) | Controls the allocation count of the System. | ## Update -Update contexts update all living particles based on **Particle** Data computed from Initialize and Update Contexts. These contexts are executed every frame and will update every particle. +Update Contexts update all living particles in the system based on **Particle** Data, which Unity computes from Initialize and Update Contexts. Unity executes Update Contexts, and thus updates every particle, every frame. -Particle Update Contexts also process automatically some computations for particles in order to simplify common editing tasks. +Particle Update Contexts also automatically process some computations for particles in order to simplify common editing tasks. -Update contexts can be customized using compatible **Blocks**. +To customize **Update** Contexts, you can add compatible **Blocks** to them. | Setting | Description | -| ------------------- | ------------------------------------------------------------ | -| Integration | None : No velocity Integration
Euler : Applies simple Euler velocity integration to the particles positions every frame. | -| Angular Integration | None : No velocity Integration
Euler : Applies simple Euler angular velocity integration to the particles angles every frame. | -| Age Particles | If Age attribute is used, Controls whether update will make particles age over time | -| Reap Particles | If Age and Lifetime attributes are used, Control whether update will kill all particles which age is greater than its lifetime. | +| ----------------------- | ------------------------------------------------------------ | +| **Update Position** | Specifies whether Unity applies velocity integration to the particles. When enabled, Unity applies simple Euler velocity integration to each particle's position every frame. When disabled, Unity does not apply any velocity integration. | +| **Update Rotation** | Specifies whether Unity applies angular integration to the particles. When enabled, Unity applies simple Euler integration to each particle's rotation every frame. When disabled, Unity does not apply any angular integration. | +| **Age Particles** | If the Context uses the Age attribute, this controls whether the Update Context makes particles age over time. | +| **Reap Particles** | If the Context uses the Age and Lifetime attributes, this control whether the Update Context removes a particles if the particle's age is greater than its lifetime. | ## Output -Output Contexts renders a system with different modes and settings depending on Particle Data incoming from an **Initialize** or **Update** context. Every element will be rendered using a specific configuration as a specific primitive. - -Output contexts can be customized using compatible **Blocks**. +Output Contexts render the particles in a system. They render the particles with different modes and settings depending on the particle Data from the **Initialize** and **Update** Contexts in the same system. It then renders the configuration as a particular primitive shape. -For more information, and a comprehensive list of all output contexts and their settings, see [Output Contexts Reference]() +To customize **Output** Contexts, you can add compatible **Blocks** to them. diff --git a/com.unity.visualeffectgraph/Documentation~/Events.md b/com.unity.visualeffectgraph/Documentation~/Events.md index 863e7c084dc..9778fa82eaa 100644 --- a/com.unity.visualeffectgraph/Documentation~/Events.md +++ b/com.unity.visualeffectgraph/Documentation~/Events.md @@ -1,60 +1,62 @@ -
Draft: The content on this page is complete, but it has not been reviewed yet.
# Events -Events are the Processing Workflow inputs of a Visual Effect Graph. Through Events, a Visual Effect can : +Events define the inputs for a Visual Effect Graph's [**processing** workflow](GraphLogicAndPhilosophy.md#processing-workflow-(vertical-logic)). The Spawn and Initialize [Contexts](Contexts.md) use Events as their inputs. Through Events, a Visual Effect Graph can : -* Start and stop spawning particles, -* Read Attribute payloads sent from C# - -Events are used in the graph as inputs for Spawn Contexts and Initialize +* Start and stop spawning particles. +* Read [Event Attribute payloads](#eventattribute-payloads) sent from C# scripts. ## Creating Events ![](Images/EventContexts.png) -You can Create Events using Event Contexts. These contexts have no Flow input and connect to Spawn or Initialize Contexts. +In general, an Event is just a string that represents the Event's name. To receive an Event in the Visual Effect Graph, create an Event [Context](Contexts.md) and type the name of the Event you want to receive in the **Event Name** property. Event Contexts have no input flow slots and can only connect their output flow slot to Spawn or Initialize Contexts. + +To create an Event Context: -In order to Create an Event Context, right click in an empty space of the Workspace and select Create Node, then Select **Event (Context)** from the Node Creation menu. +1. In the [Visual Effect Graph window](VisualEffectGraphWindow.md), right-click in an empty space. +2. From the menu, click **Create Node**. +3. In the Node Creation menu, click **Contexts > Event (Context)**. +4. In the **Event Name** input field, type the name of your Event. ## Default Events -Visual Effect Graphs provide two Default Events that are implicitly bound to the Start and Stop Flow Inputs of the Spawn Contexts: +The Visual Effect Graph provide two default Events: -* `OnPlay` for the intent *Enabling the Spawn of Particles*, is implicitly bound to the Start Flow input of any Spawn Context. -* `OnStop` for the intent of *Stopping the Spawn of Particles*, is implicitly bound to the Stop Flow input of any Spawn Context. +* **OnPlay**: To enable the spawning of particles. If you do not assign an Event to a Spawn Context's **Start** input flow slot, the Visual Effect Graph implicitly binds this Event to that input flow slot instead. +* **OnStop**: To disable the spawning of particles. If you do not assign an Event to a Spawn Context's **Stop** input flow slot, the Visual Effect Graph implicitly binds this Event to that input flow slot instead. -Connecting Event Contexts on the Start and Stop Flow inputs of a Spawn Contexts will remove the implicit binding to the `OnPlay` and `OnStop` Events +If you connect an Event Context to a Spawn Context's **Start** or **Stop** input flow slot, this removes the implicit binding to the **OnPlay** and **OnStop** Events respectively. ## Custom Events -Custom Events can be created inside Visual Effect Graphs using Event Contexts. +If you do not want to use the default Events, you can use an Event Context to create your own custom Event. -In order to create a custom event, create an event using the **Create Node** menu, then change its name in the **Event Name** field +To do this, first [create an Event Context](#creating-events), then type the name of your custom Event in the **Event Name** property. -## EventAttribute Payloads +## Event Attribute Payloads -Event Attribute payloads are attributes attached on one event. You can set these attributes in Visual Effect Graph using the **Set [Attribute] Event Attribute>** Blocks in Spawn Contexts, but you can also attach them to events sent from the scene using the [Component API](ComponentAPI.md#event-attributes) . +Event Attribute payloads are attributes that you can attach to an Event. To set these attributes in a Visual Effect Graph, you can use the **Set [Attribute]** Blocks in Spawn Contexts, but you can also attach them to Events you send from C# scripts. For information on how to do that latter, see [Component API](ComponentAPI.md#event-attributes) . -EventAttribute Payloads are attributes that will implicitly travel through the graph from Events, through Spawn Systems, and that can be caught in Initialize Contexts using **Get Source Attribute Operators** and **Inherit [Attribute] Blocks** +Event Attribute Payloads are attributes that implicitly travel through the graph from Events, through Spawn Contexts, and eventually to an Initialize Context. To catch a payload in an Initialize Context, use **Get Source Attribute** Operators or **Inherit [Attribute]** Blocks. -## Default VisualEffect Event +## Default Visual Effect Event -The default Visual Effect Event defines the name of the event that is implicitly sent when a `Reset` is performed on a [Visual Effect](VisualEffectComponent.md) instance (this can happen at first start or any restart of the effect). +The default Visual Effect Event defines the name of the Event that the Visual Effect Graph implicitly sends when a [Visual Effect](VisualEffectComponent.md) instance **Resets**. This happens when the effect first starts, or when the effect restarts. -Default VisualEffect Event is defined in the [Visual Effect Graph Asset Inspector](VisualEffectGraphAsset.md) but can be overridden in any [Visual Effect Inspector](VisualEffectComponent.md) for any instance in the scene. +You can define the default Visual Effect Event for each [Visual Effect Graph Asset](VisualEffectGraphAsset.md) independently. You can also override this value for every instance of the Visual Effect Graph Asset. To override the default Visual Effect Event for an instance, see **Initial Event Name** in the [Visual Effect Inspector](VisualEffectComponent.md). ## GPU Events -GPU Events is an **Experimental feature** of Visual Effect Graph : It enables particle spawn based on other Particles. You can enable this option in [Visual Effect Preferences](VisualEffectPreferences.md) . +GPU Events are an **Experimental feature** of the Visual Effect Graph. They allow you to spawn particles based on other particles. To enable this option, enable the **Experimental Operators/Blocks** checkbox in the [Visual Effect Preferences](VisualEffectPreferences.md) . -![](Images/GPUEvent.png) +GPU Events are Event Contexts that rely on data sent from other systems, for example, when a particle dies. The following Update Blocks can send GPU Event Data: -GPU Events are Event Contexts that relies on Data sent from other Systems, for instance when a particle dies, or other conditions. The following Update Blocks can send GPU Event Data: +* **Trigger Event On Die**: Spawns N Particles on another system when a particle dies. +* **Trigger Event Rate**: Spawn N Particles per second (or per distance travelled), based on a particle from a system. +* **Trigger Event Always**: Spawns N Particles every frame. -* **Trigger Event On Die** : Spawns N Particles on another system when a particle dies -* **Trigger Event Rate** : Spawn N Particles per second (or per distance travelled), based on a particle from a system -* **Trigger Event Always** : Spawns N Particles every Frame. +These Blocks connect to a **GPUEvent** Context. This Context does not handle any Blocks, but instead connects to an Initialize Context of a child system. -These Blocks connect to a **GPUEvent** Context. This context does not handle any Blocks but instead connects to a Initialize Context of a child system. +To gather data from the parent particle, a child system must refer to [Source Attributes](Attributes.md) in its Initialize Context. To do this, a child system can use a **Get Source Attribute** Operator, or an **Inherit Attribute** Block. For a visual example, see the image below. -In order to gather data from the parent particle, the child system must refer to [Source Attributes](Attributes.md) in its Initialize Context, by using **Get Source Attribute Operator**, or **Inherit Attribute Block**,as shown as in the example above : The child System inherits the source position of the particle that created it, and inherits roughly 50% of its speed. +![](Images/GPUEvent.png)*In this example, the child System inherits the source position of the particle that creates it. It also inherit roughly 50% of the parent particle's speed.* diff --git a/com.unity.visualeffectgraph/Documentation~/GettingStarted.md b/com.unity.visualeffectgraph/Documentation~/GettingStarted.md index 6e026959837..887e79b30e2 100644 --- a/com.unity.visualeffectgraph/Documentation~/GettingStarted.md +++ b/com.unity.visualeffectgraph/Documentation~/GettingStarted.md @@ -69,7 +69,7 @@ To preview an effect, you can: This lets you edit parameters directly in the Scene, see the lighting on your effect, and use the [Target GameObject Panel](VisualEffectGraphWindow.md#target-visual-effect-gameobject) features for the specific target instance of your effect. -## Manipulating Graph Elements +## Manipulating graph elements When you open an Asset inside the Visual Effect Graph window, you can see and edit the graph for that specific Asset. A Visual Effect Graph contains [Operator Nodes](Operators.md) and [Blocks](Blocks.md). Each Node is in charge of processing its input properties. You can link Nodes together to perform a series of calculations. All Nodes end up connecting into a Block (or a context) : A Block defines an operation on an effect, based on its input properties. diff --git a/com.unity.visualeffectgraph/Documentation~/GraphLogicAndPhilosophy.md b/com.unity.visualeffectgraph/Documentation~/GraphLogicAndPhilosophy.md index 9eccb1e57c6..ba09cc4c79c 100644 --- a/com.unity.visualeffectgraph/Documentation~/GraphLogicAndPhilosophy.md +++ b/com.unity.visualeffectgraph/Documentation~/GraphLogicAndPhilosophy.md @@ -9,7 +9,7 @@ The Visual Effect Graph uses two distinct workflows: ## Processing workflow (vertical logic) The processing workflow links together a succession of customizable stages to define the complete system logic. This is where you can determine when the spawn, initialization, updating, and rendering of the particles happen during the effect. -The processing workflow connects Contexts using their **Flow Slots** located at the top and the bottom of the Context Node. +The processing workflow connects Contexts using their **flow slots** located at the top and the bottom of the Context Node. The processing logic defines the different stages of processing of a visual effect. Each stage consists of a large colored container called a [Contexts](Contexts.md). Each Context connects to another compatible Context, which defines how the next stage of processing uses the current Context. @@ -23,13 +23,13 @@ The Visual Effect Graph comes with a large Block and Node library that you can t To customize how particles behave, you can connect horizontal Nodes to a Block to create a custom a mathematical expression. To do this, use the **Create Node** context menu to add Nodes, change their values, then connect the Nodes to Block properties. -## Graph Elements +## Graph elements -A Visual Effect Graph provides a workspace where you can create Graph Elements and connect them together to define effect behaviors. The Visual Effect Graph comes with many different types of Graph Elements that fit into the workspace. +A Visual Effect Graph provides a workspace where you can create graph elements and connect them together to define effect behaviors. The Visual Effect Graph comes with many different types of graph elements that fit into the workspace. ### Workspace -A Visual Effect Graph provides a **Workspace** where you can create Graph Elements and connect them together to define effect behaviors. +A Visual Effect Graph provides a **Workspace** where you can create graph elements and connect them together to define effect behaviors. ![The vertical workflow contains Systems, which then contain Contexts, which then contain Blocks. Together, they determine when something happens during the “lifecycle” of the visual effect.](Images/SystemVisual.png) @@ -74,7 +74,7 @@ While the graph elements are different, their contents and behavior tend to be t #### Settings -Settings are Fields that you cannot connect to using the property workflow. Every Graph Element displays settings: +Settings are Fields that you cannot connect to using the property workflow. Every graph element displays settings: * In the **Graph** : Between the Title and the property container in the Graph. * In the **Inspector** : When you select a Node, the Inspector displays additional, advanced settings. @@ -85,7 +85,7 @@ If you change the value of a setting, you need to recompile the Graph to see the [Properties](Properties.md) are Fields that you can edit and connect to using the property workflow. You can connect them to other properties contained in other graph elements. -## Other Graph Elements +## Other graph elements ### Groups diff --git a/com.unity.visualeffectgraph/Documentation~/Properties.md b/com.unity.visualeffectgraph/Documentation~/Properties.md index 3d8f1168ad6..631d286a367 100644 --- a/com.unity.visualeffectgraph/Documentation~/Properties.md +++ b/com.unity.visualeffectgraph/Documentation~/Properties.md @@ -1,7 +1,7 @@
Draft: The content on this page is complete, but it has not been reviewed yet.
# Properties -Properties are editable fields that you can connect to graph elements using [Property workflow](GraphLogicAndPhilosophy.md). They can be found on Graph Elements such as [Contexts](Contexts.md), [Blocks](Blocks.md) and [Operators](Operators.md). +Properties are editable fields that you can connect to graph elements using [Property workflow](GraphLogicAndPhilosophy.md). They can be found on graph elements such as [Contexts](Contexts.md), [Blocks](Blocks.md) and [Operators](Operators.md). ## Using Properties diff --git a/com.unity.visualeffectgraph/Documentation~/StickyNotes.md b/com.unity.visualeffectgraph/Documentation~/StickyNotes.md new file mode 100644 index 00000000000..b354091c749 --- /dev/null +++ b/com.unity.visualeffectgraph/Documentation~/StickyNotes.md @@ -0,0 +1,46 @@ +# Sticky Notes + +Sticky Notes are objects in a graph view that you can write in. They are the graph view equivalent of a comment in code, and consist of a title and body. You can create as many as you want in the graph, and use them for a variety of purposes, for example: + +- To describe how a section of your graph works. +- To leave notes for yourself or others collaborating in your Unity Project. +- As a to-do list that includes tasks to complete at a later date. + +## Using Sticky Notes + +To create a Sticky Note, right-click an empty space in the graph view and, in the context menu, click **Create Sticky note**. You can now customize and add content to the new Sticky Note. There are two text areas that you can write to: + +- **Title**: The text area at the top of the Sticky Note is the title. Here you can concisely describe what the Sticky Note contains in its body text area. For example, this could be the name of the graph section the Sticky Note describes. +- **Body**: The larger text area below the title area is the body. You can write the full contents of the note here. + +### Controls + +This section describes how to edit text, move and resize the Sticky Note, and perform other actions. + +#### Editing text + +To edit text on a Sticky Note, double-click on a text area. This also selects all of the text in the area so be sure to move the text cursor before you edit the text. + +#### Moving and resizing + +You can move Sticky Notes anywhere on the graph. For information on how to do this, see [manipulating graph elements](VisualEffectGraphWindow.md#moving-elements). + +You can also resize Sticky Notes. You can resize the Sticky Note manually or the Sticky Note can resize itself automatically to fit its contents. For information on how to make the Sticky Note resize itself, see the **Fit To Text** option in the [Context menu](#context-menu). For information on how to resize the Sticky Note manually, see [manipulating graph elements](VisualEffectGraphWindow.md#moving-elements). + +#### Duplicating + +You can also cut, copy, paste, and duplicate Sticky Notes. For information on how to perform these actions, see [duplicating elements](VisualEffectGraphWindow.md#copy-cut-and-paste-and-duplicate-elements). + +#### Context menu + +To open the context menu for the Sticky Note, right-click anywhere on the Sticky Note. The options in the context menu are as follows. + +| **Option** | **Description** | +| -------------------------- | ------------------------------------------------------------ | +| **Dark Theme/Light Theme** | Toggles the color theme of the Sticky Note between light theme and dark theme. | +| **Text Size** | Resizes the font in the text areas to the following values: | +| Small | Title: 20
Body: 11 | +| Medium | Title: 40
Body: 24 | +| Large | Title: 60
Body: 36 | +| Huge | Title: 80
Body: 56 | +| **Fit To Text** | Resizes the Sticky Note so that it precisely fits the text areas.
**Note**: If your title spreads over more than a single line, this horizontally resizes the Sticky Note to the smallest size where the title text fits on a single line. | diff --git a/com.unity.visualeffectgraph/Documentation~/TableOfContents.md b/com.unity.visualeffectgraph/Documentation~/TableOfContents.md index 01a144e55e7..46e0417c594 100644 --- a/com.unity.visualeffectgraph/Documentation~/TableOfContents.md +++ b/com.unity.visualeffectgraph/Documentation~/TableOfContents.md @@ -12,6 +12,7 @@ * [Attributes](Attributes.md) * [Subgraph](Subgraph.md) * [Blackboard](Blackboard.md) + * [Sticky Notes](StickyNotes.md) * [Project Settings](VisualEffectProjectSettings.md) * [Preferences](VisualEffectPreferences.md) * [The Visual Effect Component](VisualEffectComponent.md) diff --git a/com.unity.visualeffectgraph/Documentation~/VisualEffectGraphWindow.md b/com.unity.visualeffectgraph/Documentation~/VisualEffectGraphWindow.md index 01162247281..3bc325ed149 100644 --- a/com.unity.visualeffectgraph/Documentation~/VisualEffectGraphWindow.md +++ b/com.unity.visualeffectgraph/Documentation~/VisualEffectGraphWindow.md @@ -100,7 +100,7 @@ The navigation controls for the Node Workspace are similar to those that other g * **Duplicate**: Ctrl+D. * **Duplicate with edges**: Ctrl+Alt+D. -### Adding Graph Elements +### Adding graph elements To add graph elements, you can use any of the following methods: