diff --git a/TestProjects/HDRP_DXR_Tests/LocalPackages/com.unity.render-pipelines.high-definition-config/Runtime/ShaderConfig.cs b/TestProjects/HDRP_DXR_Tests/LocalPackages/com.unity.render-pipelines.high-definition-config/Runtime/ShaderConfig.cs index 697159b4b37..22ae4a9b21d 100644 --- a/TestProjects/HDRP_DXR_Tests/LocalPackages/com.unity.render-pipelines.high-definition-config/Runtime/ShaderConfig.cs +++ b/TestProjects/HDRP_DXR_Tests/LocalPackages/com.unity.render-pipelines.high-definition-config/Runtime/ShaderConfig.cs @@ -58,6 +58,9 @@ public enum ShaderOptions // Changing a value in this enum Config here require to regenerate the hlsl include and recompile C# and shaders public class ShaderConfig { + public const int k_XRMaxViewsForCBuffer = 2; // REALLY IMPORTANT! This needs to be the maximum possible XrMaxViews for any supported platform! + // this needs to be constant and not vary like XrMaxViews does as it is used to generate the cbuffer declarations + public static int s_CameraRelativeRendering = (int)ShaderOptions.CameraRelativeRendering; public static int s_PreExposition = (int)ShaderOptions.PreExposition; public static int s_XrMaxViews = (int)ShaderOptions.XrMaxViews; diff --git a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/LinuxEditor/Vulkan/None/2203_PlanarProbes.png b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/LinuxEditor/Vulkan/None/2203_PlanarProbes.png index fdb71657a76..468a8bccd4f 100644 --- a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/LinuxEditor/Vulkan/None/2203_PlanarProbes.png +++ b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/LinuxEditor/Vulkan/None/2203_PlanarProbes.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8b9468ad4eaf403e3d7ece2949564efe9b7bc4b9151cbf75e645abb12b711eb4 -size 287818 +oid sha256:fbb807957524dba90475ee0fd8dadc4482a99bb171fdc141395a4f7a6554c59f +size 250643 diff --git a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/LinuxEditor/Vulkan/None/5010_CloudLayer.png.meta b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/LinuxEditor/Vulkan/None/5010_CloudLayer.png.meta new file mode 100644 index 00000000000..b5595fb91c4 --- /dev/null +++ b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/LinuxEditor/Vulkan/None/5010_CloudLayer.png.meta @@ -0,0 +1,96 @@ +fileFormatVersion: 2 +guid: 6dc6c9db19227ba46a3eba77174abf9e +TextureImporter: + internalIDToNameTable: [] + externalObjects: {} + serializedVersion: 11 + mipmaps: + mipMapMode: 0 + enableMipMap: 1 + sRGBTexture: 1 + linearTexture: 0 + fadeOut: 0 + borderMipMap: 0 + mipMapsPreserveCoverage: 0 + alphaTestReferenceValue: 0.5 + mipMapFadeDistanceStart: 1 + mipMapFadeDistanceEnd: 3 + bumpmap: + convertToNormalMap: 0 + externalNormalMap: 0 + heightScale: 0.25 + normalMapFilter: 0 + isReadable: 0 + streamingMipmaps: 0 + streamingMipmapsPriority: 0 + vTOnly: 0 + grayScaleToAlpha: 0 + generateCubemap: 6 + cubemapConvolution: 0 + seamlessCubemap: 0 + textureFormat: 1 + maxTextureSize: 2048 + textureSettings: + serializedVersion: 2 + filterMode: -1 + aniso: -1 + mipBias: -100 + wrapU: -1 + wrapV: -1 + wrapW: -1 + nPOTScale: 1 + lightmap: 0 + compressionQuality: 50 + spriteMode: 0 + spriteExtrude: 1 + spriteMeshType: 1 + alignment: 0 + spritePivot: {x: 0.5, y: 0.5} + spritePixelsToUnits: 100 + spriteBorder: {x: 0, y: 0, z: 0, w: 0} + spriteGenerateFallbackPhysicsShape: 1 + alphaUsage: 1 + alphaIsTransparency: 0 + spriteTessellationDetail: -1 + textureType: 0 + textureShape: 1 + singleChannelComponent: 0 + flipbookRows: 1 + flipbookColumns: 1 + maxTextureSizeSet: 0 + compressionQualitySet: 0 + textureFormatSet: 0 + ignorePngGamma: 0 + applyGammaDecoding: 0 + platformSettings: + - serializedVersion: 3 + buildTarget: DefaultTexturePlatform + maxTextureSize: 2048 + resizeAlgorithm: 0 + textureFormat: -1 + textureCompression: 1 + compressionQuality: 50 + crunchedCompression: 0 + allowsAlphaSplitting: 0 + overridden: 0 + androidETC2FallbackOverride: 0 + forceMaximumCompressionQuality_BC6H_BC7: 0 + spriteSheet: + serializedVersion: 2 + sprites: [] + outline: [] + physicsShape: [] + bones: [] + spriteID: + internalID: 0 + vertices: [] + indices: + edges: [] + weights: [] + secondaryTextures: [] + spritePackingTag: + pSDRemoveMatte: 0 + pSDShowRemoveMatteOption: 0 + userData: + assetBundleName: + assetBundleVariant: diff --git a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/OSXEditor/Metal/None/2203_PlanarProbes.png b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/OSXEditor/Metal/None/2203_PlanarProbes.png index 9f3de7e29d3..9d3392e8eb7 100644 --- a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/OSXEditor/Metal/None/2203_PlanarProbes.png +++ b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/OSXEditor/Metal/None/2203_PlanarProbes.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7b6dc89292419a8e6fa1b16df9d87786737834c5f8205d7e88d028f66f1ba215 -size 249122 +oid sha256:1fa30284b8cfd475c55be0d839fc7d68f58b1222f2297050563d50d500a27988 +size 253523 diff --git a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2203_PlanarProbes.png b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2203_PlanarProbes.png index 165ef79ea3c..b6fdb8c4797 100644 --- a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2203_PlanarProbes.png +++ b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2203_PlanarProbes.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:60c1060c81f32c243029cdbec19c825ae19b8316d8973d3035b249109f29fcdc -size 256035 +oid sha256:05fa4c6ce0cd88b7554d0a3b57fe4301d832b8bcb19caeb1e0bfd8eab9ab18da +size 250503 diff --git a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2501_LightLayers.png b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2501_LightLayers.png index c4c6d4d2f1d..47463655def 100644 --- a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2501_LightLayers.png +++ b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2501_LightLayers.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8908b4238dc180c24f4ccdb465034610ca4801a048997ce060fb9690977498a9 -size 120500 +oid sha256:45d38b91f49dfec90d7c2672cc67ccc7ea51baeb146c9122876909ac047e7aeb +size 120529 diff --git a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D12/None/2203_PlanarProbes.png b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D12/None/2203_PlanarProbes.png index 9381fcaa296..b6fdb8c4797 100644 --- a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D12/None/2203_PlanarProbes.png +++ b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D12/None/2203_PlanarProbes.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e6e6ea9401358eda14f0242daabfdd89e75005034437fa20435322ecf169b20 -size 287739 +oid sha256:05fa4c6ce0cd88b7554d0a3b57fe4301d832b8bcb19caeb1e0bfd8eab9ab18da +size 250503 diff --git a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Vulkan/None/2203_PlanarProbes.png b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Vulkan/None/2203_PlanarProbes.png index 6548a948b05..43ef90b0a39 100644 --- a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Vulkan/None/2203_PlanarProbes.png +++ b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Vulkan/None/2203_PlanarProbes.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:977995200e0b81238a2c98040f6e2f467dc34a5aa8aa24a5ba929fc370ac3fac -size 256309 +oid sha256:4321afcd6d2dbdc4b35f1efb6ad1a9fd80bbac1a8c2f7d7cd1c8811703d15cb8 +size 250643 diff --git a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsPlayer/Direct3D11.meta b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsPlayer/Direct3D11.meta new file mode 100644 index 00000000000..4c5f58601d4 --- /dev/null +++ b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsPlayer/Direct3D11.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: 40f20039054a4404fbdd318125f5ca49 +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsPlayer/Direct3D11/None.meta b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsPlayer/Direct3D11/None.meta new file mode 100644 index 00000000000..9f01d137b78 --- /dev/null +++ b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsPlayer/Direct3D11/None.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: bc05cddb8e44c194b8f5101dc99c70ca +folderAsset: yes +DefaultImporter: + externalObjects: {} + userData: + assetBundleName: + assetBundleVariant: diff --git a/TestProjects/HDRP_Tests/Assets/Tests/TestFilters/TestCaseFilters.asset b/TestProjects/HDRP_Tests/Assets/Tests/TestFilters/TestCaseFilters.asset index 81568f58c09..46a68617523 100644 --- a/TestProjects/HDRP_Tests/Assets/Tests/TestFilters/TestCaseFilters.asset +++ b/TestProjects/HDRP_Tests/Assets/Tests/TestFilters/TestCaseFilters.asset @@ -178,17 +178,17 @@ MonoBehaviour: - FilteredScene: {fileID: 102900000, guid: 09baf18a7e1f6584f86675e6a2141c66, type: 3} ColorSpace: 1 BuildPlatform: -2 - GraphicsDevice: 18 + GraphicsDevice: 21 XrSdk: StereoModes: 0 - Reason: Randomly fail on yamato + Reason: flip flop - FilteredScene: {fileID: 102900000, guid: 09baf18a7e1f6584f86675e6a2141c66, type: 3} ColorSpace: 1 BuildPlatform: -2 - GraphicsDevice: 21 + GraphicsDevice: 18 XrSdk: StereoModes: 0 - Reason: flip flop + Reason: Randomly fail on yamato - FilteredScene: {fileID: 102900000, guid: 292a7700209ba7c49ab0bcef8b7c8940, type: 3} ColorSpace: 1 BuildPlatform: -2 @@ -203,6 +203,13 @@ MonoBehaviour: XrSdk: StereoModes: 0 Reason: 'unstable: Geometry explode on Yamato' + - FilteredScene: {fileID: 102900000, guid: 9fdfbe7bced924d4791cf6fa031218d2, type: 3} + ColorSpace: 1 + BuildPlatform: -2 + GraphicsDevice: 16 + XrSdk: + StereoModes: 0 + Reason: 'unstable: Geometry explode on Yamato' - FilteredScene: {fileID: 102900000, guid: c034386e3fb58f44783743edf9b0f4a4, type: 3} ColorSpace: 1 BuildPlatform: -2 @@ -437,17 +444,17 @@ MonoBehaviour: - FilteredScene: {fileID: 102900000, guid: e75b9916869e2384b8e53bc6bffb1314, type: 3} ColorSpace: 1 BuildPlatform: -2 - GraphicsDevice: 21 + GraphicsDevice: 18 XrSdk: StereoModes: 0 - Reason: flip flop + Reason: unstable - FilteredScene: {fileID: 102900000, guid: e75b9916869e2384b8e53bc6bffb1314, type: 3} ColorSpace: 1 BuildPlatform: -2 - GraphicsDevice: 18 + GraphicsDevice: 21 XrSdk: StereoModes: 0 - Reason: unstable + Reason: flip flop - FilteredScene: {fileID: 102900000, guid: 277b6e81295b84afaa297d0889efb8a1, type: 3} ColorSpace: 1 BuildPlatform: -2 @@ -504,6 +511,13 @@ MonoBehaviour: XrSdk: StereoModes: 0 Reason: flip flop + - FilteredScene: {fileID: 102900000, guid: d00305c429b50834eb16887388118552, type: 3} + ColorSpace: 1 + BuildPlatform: -2 + GraphicsDevice: 16 + XrSdk: + StereoModes: 0 + Reason: 'unstable: Geometry explode on Yamato' - FilteredScene: {fileID: 102900000, guid: d00305c429b50834eb16887388118552, type: 3} ColorSpace: 1 BuildPlatform: -2 @@ -518,7 +532,7 @@ MonoBehaviour: XrSdk: StereoModes: 0 Reason: flip flop - - FilteredScene: {fileID: 102900000, guid: d00305c429b50834eb16887388118552, type: 3} + - FilteredScene: {fileID: 102900000, guid: c94380e34bef7d24c9d95f520ee006b1, type: 3} ColorSpace: 1 BuildPlatform: -2 GraphicsDevice: 16 @@ -532,7 +546,7 @@ MonoBehaviour: XrSdk: StereoModes: 0 Reason: flip flop - - FilteredScene: {fileID: 102900000, guid: c94380e34bef7d24c9d95f520ee006b1, type: 3} + - FilteredScene: {fileID: 102900000, guid: 4f2cb986918804f4aad481246d4b54ba, type: 3} ColorSpace: 1 BuildPlatform: -2 GraphicsDevice: 16 @@ -549,17 +563,17 @@ MonoBehaviour: - FilteredScene: {fileID: 102900000, guid: 6cfc34a76b5a8334b850c3129d9069c7, type: 3} ColorSpace: 1 BuildPlatform: -2 - GraphicsDevice: 16 + GraphicsDevice: 21 XrSdk: StereoModes: 0 - Reason: 'unstable: Geometry explode on Yamato' + Reason: flip flop - FilteredScene: {fileID: 102900000, guid: 6cfc34a76b5a8334b850c3129d9069c7, type: 3} ColorSpace: 1 BuildPlatform: -2 - GraphicsDevice: 21 + GraphicsDevice: 16 XrSdk: StereoModes: 0 - Reason: flip flop + Reason: 'unstable: Geometry explode on Yamato' - FilteredScene: {fileID: 102900000, guid: aa2b8ed7d0d276340b794c91ebc7cdce, type: 3} ColorSpace: 1 BuildPlatform: -2 @@ -612,24 +626,24 @@ MonoBehaviour: - FilteredScene: {fileID: 102900000, guid: 520aec2f141eeaf4daceb4e016157b78, type: 3} ColorSpace: 1 BuildPlatform: -2 - GraphicsDevice: 21 + GraphicsDevice: 16 XrSdk: StereoModes: 0 - Reason: unstable + Reason: unstable on yamato - FilteredScene: {fileID: 102900000, guid: 520aec2f141eeaf4daceb4e016157b78, type: 3} ColorSpace: 1 BuildPlatform: -2 - GraphicsDevice: 16 + GraphicsDevice: 18 XrSdk: StereoModes: 0 - Reason: unstable on yamato + Reason: unstable with Yamato - FilteredScene: {fileID: 102900000, guid: 520aec2f141eeaf4daceb4e016157b78, type: 3} ColorSpace: 1 BuildPlatform: -2 - GraphicsDevice: 18 + GraphicsDevice: 21 XrSdk: StereoModes: 0 - Reason: unstable with Yamato + Reason: unstable - FilteredScene: {fileID: 102900000, guid: 78446c36daacd444c8a07b5671274a51, type: 3} ColorSpace: 1 BuildPlatform: -2 diff --git a/com.unity.render-pipelines.core/Runtime/RenderGraph/RenderGraphResourceRegistry.cs b/com.unity.render-pipelines.core/Runtime/RenderGraph/RenderGraphResourceRegistry.cs index ad5997e6590..7de11bc5a74 100644 --- a/com.unity.render-pipelines.core/Runtime/RenderGraph/RenderGraphResourceRegistry.cs +++ b/com.unity.render-pipelines.core/Runtime/RenderGraph/RenderGraphResourceRegistry.cs @@ -118,6 +118,19 @@ public enum TextureSizeMode Functor } + /// + /// Subset of the texture desc containing information for fast memory allocation (when platform supports it) + /// + public struct FastMemoryDesc + { + ///Whether the texture will be in fast memory. + public bool inFastMemory; + ///Flag to determine what parts of the render target is spilled if not fully resident in fast memory. + public FastMemoryFlags flags; + ///How much of the render target is to be switched into fast memory (between 0 and 1). + public float residencyFraction; + } + /// /// Descriptor used to create texture resources /// @@ -169,6 +182,8 @@ public struct TextureDesc public RenderTextureMemoryless memoryless; ///Texture name. public string name; + ///Descriptor to determine how the texture will be in fast memory on platform that supports it. + public FastMemoryDesc fastMemoryDesc; // Initial state. Those should not be used in the hash ///Texture needs to be cleared on first use. @@ -544,6 +559,12 @@ internal void CreateAndClearTexture(RenderGraphContext rgContext, TextureHandle { CreateTextureForPass(ref resource); + var fastMemDesc = resource.desc.fastMemoryDesc; + if(fastMemDesc.inFastMemory) + { + resource.rt.SwitchToFastMemory(rgContext.cmd, fastMemDesc.residencyFraction, fastMemDesc.flags); + } + if (resource.desc.clearBuffer || m_RenderGraphDebug.clearRenderTargetsAtCreation) { bool debugClear = m_RenderGraphDebug.clearRenderTargetsAtCreation && !resource.desc.clearBuffer; diff --git a/com.unity.render-pipelines.core/Runtime/Textures/RTHandle.cs b/com.unity.render-pipelines.core/Runtime/Textures/RTHandle.cs index 9a87fa0c8d3..68dd42878ef 100644 --- a/com.unity.render-pipelines.core/Runtime/Textures/RTHandle.cs +++ b/com.unity.render-pipelines.core/Runtime/Textures/RTHandle.cs @@ -145,5 +145,51 @@ public Vector2Int GetScaledSize(Vector2Int refSize) ); } } + + /// + /// Switch the render target to fast memory on platform that have it. + /// + /// Command buffer used for rendering. + /// How much of the render target is to be switched into fast memory (between 0 and 1). + /// Flag to determine what parts of the render target is spilled if not fully resident in fast memory. + /// Whether the content of render target are copied or not when switching to fast memory. + + public void SwitchToFastMemory(CommandBuffer cmd, + float residencyFraction = 1.0f, + FastMemoryFlags flags = FastMemoryFlags.SpillTop, + bool copyContents = false + ) + { +#if UNITY_2020_2_OR_NEWER + residencyFraction = Mathf.Clamp01(residencyFraction); + cmd.SwitchIntoFastMemory(m_RT, flags, residencyFraction, copyContents); +#endif + } + + /// + /// Switch the render target to fast memory on platform that have it and copies the content. + /// + /// Command buffer used for rendering. + /// How much of the render target is to be switched into fast memory (between 0 and 1). + /// Flag to determine what parts of the render target is spilled if not fully resident in fast memory. + public void CopyToFastMemory(CommandBuffer cmd, + float residencyFraction = 1.0f, + FastMemoryFlags flags = FastMemoryFlags.SpillTop + ) + { + SwitchToFastMemory(cmd, residencyFraction, flags, copyContents: true); + } + + /// + /// Switch out the render target from fast memory back to main memory on platforms that have fast memory. + /// + /// Command buffer used for rendering. + /// Whether the content of render target are copied or not when switching out fast memory. + public void SwitchOutFastMemory(CommandBuffer cmd, bool copyContents = true) + { +#if UNITY_2020_2_OR_NEWER + cmd.SwitchOutOfFastMemory(m_RT, copyContents); +#endif + } } } diff --git a/com.unity.render-pipelines.core/ShaderLibrary/EntityLighting.hlsl b/com.unity.render-pipelines.core/ShaderLibrary/EntityLighting.hlsl index 55c0ccf99f9..1e0dac36490 100644 --- a/com.unity.render-pipelines.core/ShaderLibrary/EntityLighting.hlsl +++ b/com.unity.render-pipelines.core/ShaderLibrary/EntityLighting.hlsl @@ -78,6 +78,7 @@ half3 SampleSH9(half4 SHCoefficients[7], half3 N) return res; } #endif + float3 SampleSH9(float4 SHCoefficients[7], float3 N) { float4 shAr = SHCoefficients[0]; @@ -105,8 +106,10 @@ float3 SampleSH9(float4 SHCoefficients[7], float3 N) // TODO: the packing here is inefficient as we will fetch values far away from each other and they may not fit into the cache - Suggest we pack RGB continuously // TODO: The calcul of texcoord could be perform with a single matrix multicplication calcualted on C++ side that will fold probeVolumeMin and probeVolumeSizeInv into it and handle the identity case, no reasons to do it in C++ (ask Ionut about it) // It should also handle the camera relative path (if the render pipeline use it) -float3 SampleProbeVolumeSH4(TEXTURE3D_PARAM(SHVolumeTexture, SHVolumeSampler), float3 positionWS, float3 normalWS, float4x4 WorldToTexture, - float transformToLocal, float texelSizeX, float3 probeVolumeMin, float3 probeVolumeSizeInv) +// bakeDiffuseLighting and backBakeDiffuseLighting must be initialize outside the function +void SampleProbeVolumeSH4(TEXTURE3D_PARAM(SHVolumeTexture, SHVolumeSampler), float3 positionWS, float3 normalWS, float3 backNormalWS, float4x4 WorldToTexture, + float transformToLocal, float texelSizeX, float3 probeVolumeMin, float3 probeVolumeSizeInv, + inout float3 bakeDiffuseLighting, inout float3 backBakeDiffuseLighting) { float3 position = (transformToLocal == 1.0) ? mul(WorldToTexture, float4(positionWS, 1.0)).xyz : positionWS; float3 texCoord = (position - probeVolumeMin) * probeVolumeSizeInv.xyz; @@ -123,14 +126,30 @@ float3 SampleProbeVolumeSH4(TEXTURE3D_PARAM(SHVolumeTexture, SHVolumeSampler), f texCoord.x += 0.25; float4 shAb = SAMPLE_TEXTURE3D_LOD(SHVolumeTexture, SHVolumeSampler, texCoord, 0); - return SHEvalLinearL0L1(normalWS, shAr, shAg, shAb); + bakeDiffuseLighting += SHEvalLinearL0L1(normalWS, shAr, shAg, shAb); + backBakeDiffuseLighting += SHEvalLinearL0L1(backNormalWS, shAr, shAg, shAb); +} + +// Just a shortcut that call function above +float3 SampleProbeVolumeSH4(TEXTURE3D_PARAM(SHVolumeTexture, SHVolumeSampler), float3 positionWS, float3 normalWS, float4x4 WorldToTexture, + float transformToLocal, float texelSizeX, float3 probeVolumeMin, float3 probeVolumeSizeInv) +{ + float3 backNormalWSUnused = 0.0; + float3 bakeDiffuseLighting = 0.0; + float3 backBakeDiffuseLightingUnused = 0.0; + SampleProbeVolumeSH4(TEXTURE3D_ARGS(SHVolumeTexture, SHVolumeSampler), positionWS, normalWS, backNormalWSUnused, WorldToTexture, + transformToLocal, texelSizeX, probeVolumeMin, probeVolumeSizeInv, + bakeDiffuseLighting, backBakeDiffuseLightingUnused); + return bakeDiffuseLighting; } // The SphericalHarmonicsL2 coefficients are packed into 7 coefficients per color channel instead of 9. // The packing from 9 to 7 is done from engine code and will use the alpha component of the pixel to store an additional SH coefficient. // The 3D atlas texture will contain 7 SH coefficient parts. -float3 SampleProbeVolumeSH9(TEXTURE3D_PARAM(SHVolumeTexture, SHVolumeSampler), float3 positionWS, float3 normalWS, float4x4 WorldToTexture, - float transformToLocal, float texelSizeX, float3 probeVolumeMin, float3 probeVolumeSizeInv) +// bakeDiffuseLighting and backBakeDiffuseLighting must be initialize outside the function +void SampleProbeVolumeSH9(TEXTURE3D_PARAM(SHVolumeTexture, SHVolumeSampler), float3 positionWS, float3 normalWS, float3 backNormalWS, float4x4 WorldToTexture, + float transformToLocal, float texelSizeX, float3 probeVolumeMin, float3 probeVolumeSizeInv, + inout float3 bakeDiffuseLighting, inout float3 backBakeDiffuseLighting) { float3 position = (transformToLocal == 1.0f) ? mul(WorldToTexture, float4(positionWS, 1.0)).xyz : positionWS; float3 texCoord = (position - probeVolumeMin) * probeVolumeSizeInv; @@ -152,8 +171,23 @@ float3 SampleProbeVolumeSH9(TEXTURE3D_PARAM(SHVolumeTexture, SHVolumeSampler), f SHCoefficients[i] = SAMPLE_TEXTURE3D_LOD(SHVolumeTexture, SHVolumeSampler, texCoord, 0); } - return SampleSH9(SHCoefficients, normalize(normalWS)); + bakeDiffuseLighting += SampleSH9(SHCoefficients, normalize(normalWS)); + backBakeDiffuseLighting += SampleSH9(SHCoefficients, normalize(backNormalWS)); } + +// Just a shortcut that call function above +float3 SampleProbeVolumeSH9(TEXTURE3D_PARAM(SHVolumeTexture, SHVolumeSampler), float3 positionWS, float3 normalWS, float4x4 WorldToTexture, + float transformToLocal, float texelSizeX, float3 probeVolumeMin, float3 probeVolumeSizeInv) +{ + float3 backNormalWSUnused = 0.0; + float3 bakeDiffuseLighting = 0.0; + float3 backBakeDiffuseLightingUnused = 0.0; + SampleProbeVolumeSH9(TEXTURE3D_ARGS(SHVolumeTexture, SHVolumeSampler), positionWS, normalWS, backNormalWSUnused, WorldToTexture, + transformToLocal, texelSizeX, probeVolumeMin, probeVolumeSizeInv, + bakeDiffuseLighting, backBakeDiffuseLightingUnused); + return bakeDiffuseLighting; +} + #endif float4 SampleProbeOcclusion(TEXTURE3D_PARAM(SHVolumeTexture, SHVolumeSampler), float3 positionWS, float4x4 WorldToTexture, @@ -252,7 +286,8 @@ real3 SampleSingleLightmap(TEXTURE2D_PARAM(lightmapTex, lightmapSampler), float2 return illuminance; } -real3 SampleDirectionalLightmap(TEXTURE2D_PARAM(lightmapTex, lightmapSampler), TEXTURE2D_PARAM(lightmapDirTex, lightmapDirSampler), float2 uv, float4 transform, float3 normalWS, bool encodedLightmap, real4 decodeInstructions) +void SampleDirectionalLightmap(TEXTURE2D_PARAM(lightmapTex, lightmapSampler), TEXTURE2D_PARAM(lightmapDirTex, lightmapDirSampler), float2 uv, float4 transform, + float3 normalWS, float3 backNormalWS, bool encodedLightmap, real4 decodeInstructions, inout real3 bakeDiffuseLighting, inout real3 backBakeDiffuseLighting) { // In directional mode Enlighten bakes dominant light direction // in a way, that using it for half Lambert and then dividing by a "rebalancing coefficient" @@ -276,8 +311,24 @@ real3 SampleDirectionalLightmap(TEXTURE2D_PARAM(lightmapTex, lightmapSampler), T { illuminance = SAMPLE_TEXTURE2D(lightmapTex, lightmapSampler, uv).rgb; } + real halfLambert = dot(normalWS, direction.xyz - 0.5) + 0.5; - return illuminance * halfLambert / max(1e-4, direction.w); + bakeDiffuseLighting += illuminance * halfLambert / max(1e-4, direction.w); + + real backHalfLambert = dot(backNormalWS, direction.xyz - 0.5) + 0.5; + backBakeDiffuseLighting += illuminance * backHalfLambert / max(1e-4, direction.w); +} + +// Just a shortcut that call function above +real3 SampleDirectionalLightmap(TEXTURE2D_PARAM(lightmapTex, lightmapSampler), TEXTURE2D_PARAM(lightmapDirTex, lightmapDirSampler), float2 uv, float4 transform, float3 normalWS, bool encodedLightmap, real4 decodeInstructions) +{ + float3 backNormalWSUnused = 0.0; + real3 bakeDiffuseLighting = 0.0; + real3 backBakeDiffuseLightingUnused = 0.0; + SampleDirectionalLightmap( lightmapTex, lightmapSampler, lightmapDirTex, lightmapDirSampler, uv, transform, + normalWS, backNormalWSUnused, encodedLightmap, decodeInstructions, bakeDiffuseLighting, backBakeDiffuseLightingUnused); + + return bakeDiffuseLighting; } #endif // UNITY_ENTITY_LIGHTING_INCLUDED diff --git a/com.unity.render-pipelines.core/ShaderLibrary/GeometricTools.hlsl b/com.unity.render-pipelines.core/ShaderLibrary/GeometricTools.hlsl index 5868d98babd..a84aeda1098 100644 --- a/com.unity.render-pipelines.core/ShaderLibrary/GeometricTools.hlsl +++ b/com.unity.render-pipelines.core/ShaderLibrary/GeometricTools.hlsl @@ -132,6 +132,23 @@ float3 IntersectRayPlane(float3 rayOrigin, float3 rayDirection, float3 planeOrig return rayOrigin + rayDirection * dist; } +// Same as above but return intersection distance and true / false if the ray hit/miss +bool IntersectRayPlane(float3 rayOrigin, float3 rayDirection, float3 planePosition, float3 planeNormal, out float t) +{ + bool res = false; + t = -1.0; + + float denom = dot(planeNormal, rayDirection); + if (abs(denom) > 1e-5) + { + float3 d = planePosition - rayOrigin; + t = dot(d, planeNormal) / denom; + res = (t >= 0); + } + + return res; +} + // Can support cones with an elliptic base: pre-scale 'coneAxisX' and 'coneAxisY' by (h/r_x) and (h/r_y). // Returns parametric distances 'tEntr' and 'tExit' along the ray, // subject to constraints 'tMin' and 'tMax'. diff --git a/com.unity.render-pipelines.core/ShaderLibrary/ImageBasedLighting.hlsl b/com.unity.render-pipelines.core/ShaderLibrary/ImageBasedLighting.hlsl index 4f19e17fa7e..8f0eaabe5d0 100644 --- a/com.unity.render-pipelines.core/ShaderLibrary/ImageBasedLighting.hlsl +++ b/com.unity.render-pipelines.core/ShaderLibrary/ImageBasedLighting.hlsl @@ -32,12 +32,6 @@ real PerceptualRoughnessToMipmapLevel(real perceptualRoughness) return PerceptualRoughnessToMipmapLevel(perceptualRoughness, UNITY_SPECCUBE_LOD_STEPS); } -// Mapping for convolved Texture2D, this is an empirical remapping to match GGX version of cubemap convolution -real PlanarPerceptualRoughnessToMipmapLevel(real perceptualRoughness, uint mipMapcount) -{ - return PositivePow(perceptualRoughness, 0.8) * uint(max(mipMapcount - 1, 0)); -} - // The *accurate* version of the non-linear remapping. It works by // approximating the cone of the specular lobe, and then computing the MIP map level // which (approximately) covers the footprint of the lobe with a single texel. diff --git a/com.unity.render-pipelines.high-definition-config/Runtime/ShaderConfig.cs b/com.unity.render-pipelines.high-definition-config/Runtime/ShaderConfig.cs index 6d773ae079c..dd3ff265869 100644 --- a/com.unity.render-pipelines.high-definition-config/Runtime/ShaderConfig.cs +++ b/com.unity.render-pipelines.high-definition-config/Runtime/ShaderConfig.cs @@ -60,6 +60,9 @@ public enum ShaderOptions // Changing a value in this enum Config here require to regenerate the hlsl include and recompile C# and shaders public class ShaderConfig { + public const int k_XRMaxViewsForCBuffer = 2; // REALLY IMPORTANT! This needs to be the maximum possible XrMaxViews for any supported platform! + // this needs to be constant and not vary like XrMaxViews does as it is used to generate the cbuffer declarations + public static int s_CameraRelativeRendering = (int)ShaderOptions.CameraRelativeRendering; public static int s_PreExposition = (int)ShaderOptions.PreExposition; public static int s_XrMaxViews = (int)ShaderOptions.XrMaxViews; diff --git a/com.unity.render-pipelines.high-definition/CHANGELOG.md b/com.unity.render-pipelines.high-definition/CHANGELOG.md index b643a5f3075..6228fc388ff 100644 --- a/com.unity.render-pipelines.high-definition/CHANGELOG.md +++ b/com.unity.render-pipelines.high-definition/CHANGELOG.md @@ -143,6 +143,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. - Added presets quality settings for RTAO and RTGI. - Added an override for the shadow culling that allows better directional shadow maps in ray tracing effects (RTR, RTGI, RTSSS and RR). - Added a Cloud Layer volume override. +- Added Fast Memory support for platform that support it. +- Added CPU and GPU timings for ray tracing effects. ### Fixed - Fix when rescale probe all direction below zero (1219246) @@ -648,6 +650,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. - Fixed path traced DoF focusing issue - Fix an issue with the half resolution Mode (performance) - Fix an issue with the color intensity of emissive for performance rtgi +- Fixed issue with rendering being mostly broken when target platform disables VR. +- Workaround an issue caused by GetKernelThreadGroupSizes failing to retrieve correct group size. +- Fix issue with fast memory and rendergraph. +- Fixed transparent motion vector framesetting not sanitized. ### Changed - Improve MIP selection for decals on Transparents @@ -786,6 +792,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. - Pre-warm the RTHandle system to reduce the amount of memory allocations and the total memory needed at all points. - DXR: Only read the geometric attributes that are required using the share pass info and shader graph defines. - DXR: Dispatch binned rays in 1D instead of 2D. +- Lit and LayeredLit tessellation cross lod fade don't used dithering anymore between LOD but fade the tessellation height instead. Allow a smoother transition +- Changed the way planar reflections are filtered in order to be a bit more "physically based". ## [7.1.1] - 2019-09-05 diff --git a/com.unity.render-pipelines.high-definition/Documentation~/Upgrading-from-2019.3-to-2020.1.md b/com.unity.render-pipelines.high-definition/Documentation~/Upgrading-from-2019.3-to-2020.1.md index 624f51a07a5..b669882f9ae 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/Upgrading-from-2019.3-to-2020.1.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/Upgrading-from-2019.3-to-2020.1.md @@ -2,6 +2,10 @@ In the High Definition Render Pipeline (HDRP), some features work differently between major versions of Unity. This document helps you upgrade HDRP from Unity 2019.3 to 2020.1. +## Mesh LOD Transition + +From Unity 2020.1, HDRP no longer uses dithering for the LOD crossfade transition between a LOD that uses a material with tessellation and a LOD that uses a material with no tessellation. Instead, HDRP smoothly decreases the tessellation displacement strength. This improves the transition between the first high-quality LOD with tessellation and a second mid-quality LOD without tessellation. The remaining transitions between non-tessellation materials still use dithering. + ## Scene View Camera Settings From Unity 2020.1, the HDRP-specific settings of the scene view camera (anti-aliasing mode and stop NaNs) can be found in the same pop-up window as the standard scene camera settings, which are accessible by clicking the scene camera button on the toolbar of the scene window. These settings were previously in the HDRP preferences window (Edit > Preferences). @@ -12,4 +16,5 @@ From Unity 2020.1, Cookie on light are not taken into account for the lightmaps ## Default Volume Profile -From Unity 2020.1, the Default Volume Profile asset has changed so that the Exposure component sets the default Compensation to 0. This may cause a decrease of brightness of 1EV on scene that haven't change the default settings and aren't overriding it. +From Unity 2020.1, the default Volume Profile Asset has the [Exposure](Override-Exposure.md) override's **Compensation** set to 0. This may cause a decrease in brightness of 1[EV](Physical-Light-Units.md#EV) in scene's that have not changed the default settings and do not override them. + diff --git a/com.unity.render-pipelines.high-definition/Documentation~/Upgrading-from-2020.1-to-2020.2.md b/com.unity.render-pipelines.high-definition/Documentation~/Upgrading-from-2020.1-to-2020.2.md index 491a29aa72e..4bdfd3c45a8 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/Upgrading-from-2020.1-to-2020.2.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/Upgrading-from-2020.1-to-2020.2.md @@ -4,26 +4,59 @@ In the High Definition Render Pipeline (HDRP), some features work differently be ## Constant Buffer API -From Unity 2020.2, HDRP is using a new constant buffer API that allow to setup uniform used during the Frame and sent to the shader in a single transfer instead of multiple one. The consequence is that it is no longer possible to setup any of the values declare in ShaderVariablesGlobal.cs individualy with cmd.SetVectorXXX() or related function. It is now required to update the value of ShaderVariablesGlobal to be able to update the values use in the shaders. +From Unity 2020.2, HDRP uses a new constant buffer API that allows it to set up uniforms during the frame and send them to the shader in a single transfer instead of multiple transfers. To do this, the global variables that were declared individually are now all within the `ShaderVariablesGlobal ` struct. The consequence of this is that its no longer possible to setup any of the global values individually using `CommandBuffer.SetVectorXXX()` or its related functions. Instead, to change a global variable, you need to update the struct in its entirety. -## FrameSettings +Currently, the only publicly accessible variables in the `ShaderVariablesGlobal` struct are camera related and only available within [Custom Passes](Custom-Pass.md) via the following functions: -From Unity 2020.2, "MSAA Within Forward" Camera Frame Setting is enabled by default when new Render Pipeline asset is created. +* `RenderFromCamera()` +* `RenderDepthFromCamera()` +* `RenderNormalFromCamera()` +* `RenderTangentFromCamera()` + + +## Frame Settings + +From Unity 2020.2, if you create a new [HDRP Asset](HDRP-Asset.md), the **MSAA Within Forward** Frame Setting is enabled by default. ## Lighting -From Unity 2020.2, when the Sky component affected to the volume profile used for Static Lighting Sky in Environment settings of the Lighting panel is disabled. It now don't affect the bake lighting. Previously the Sky was still affecting the bake lighting even if disabled. +From Unity 2020.2, if you disable the sky override used as the **Static Lighting Sky** in the **Lighting** window, the sky no longer affects the baked lighting. Previously, the sky affected the baked lighting even when it was disabled. ## Shadows -From Unity 2020.2, it is not necessary to change the [HDRP Config package](HDRP-Config-Package.html) in order to set the [Shadows filtering quality](HDRP-Asset.html#FilteringQualities) for Deferred rendering. Instead the filtering quality can be simply set on the [HDRP Asset](HDRP-Asset.html#FilteringQualities) similarly to what was previously setting only the quality for Forward. Note that if previously the Shadow filtering quality wasn't setup on medium on the HDRP Asset you will experience a change of shadow quality as now it will be taken into account. +From Unity 2020.2, it is no longer necessary to change the [HDRP Config package](HDRP-Config-Package.md) to set the [shadow filtering quality](HDRP-Asset.md#FilteringQualities) for deferred rendering. Instead, you can now change the filtering quality directly on the [HDRP Asset](HDRP-Asset.md#FilteringQualities). Note if you previously had not set the shadow filtering quality to **Medium** on the HDRP Asset, the automatic project upgrade process changes the shadow quality which means you may need to manually change it back to its original value. -Starting from 2020.2, HDRP now stores OnEnable and OnDemand shadows in a separate atlas and more API is available to handle them. For more information, see [Shadows in HDRP](Shadows-in-HDRP.md). +HDRP now stores OnEnable and OnDemand shadows in a separate atlas and more API is available to handle them. For more information, see [Shadows in HDRP](Shadows-in-HDRP.md). -From Unity 2020.2, the shader function `SampleShadow_PCSS` now requires you to pass in an additional float2 parameter which contains the shadow atlas resolution in x and the inverse of the atlas resolution in y. +The shader function `SampleShadow_PCSS` now requires you to pass in an additional float2 parameter which contains the shadow atlas resolution in x and the inverse of the atlas resolution in y. ## Shader code -A new structure is use to output the information of the LightLoop. LightLoop struct is use instead of the pair (float3 diffuseLighting, float3 specularLighting). This is to allow to export more information from the LightLoop in the future without breaking the API. The function LightLoop() - For rasterization and raytracing - PostEvaluateBSDF(), ApplyDebug() and PostEvaluateBSDFDebugDisplay now pass this structure instead of the Pair. The function LightLoop() will initialize this structure to zero. To upgrade existing shader, replace the declaration "float3 diffuseLighting; float3 specularLighting;" by "LightLoopOutput lightLoopOutput;" before call of LightLoop and repalce the argument pair "out float3 diffuseLighting, out float3 specularLighting" by "out LightLoopOutput lightLoopOutput" in all the function mention. +From Unity 2020.2, HDRP uses a new structure to output information from the LightLoop. It now uses a custom LightLoop struct instead of the `float3 diffuseLighting`, `float3 specularLighting` pair. This is to allow HDRP to export more information from the LightLoop in the future without breaking the API. + +The following functions now pass this structure instead of the pair: + +* LightLoop(), for both rasterization and raytracing. +* PostEvaluateBSDF() +* ApplyDebug() +* PostEvaluateBSDFDebugDisplay() + +To upgrade an existing shader, for all the above functions: + +1. Replace the declaration `float3 diffuseLighting; float3 specularLighting;` with `LightLoopOutput lightLoopOutput;` before the LightLoop call. +2. Replace the argument pair `out float3 diffuseLighting, out float3 specularLighting` with `out LightLoopOutput lightLoopOutput`. + + + +The prototype for the function `ModifyBakedDiffuseLighting()` in the various materials has changed from: +`void ModifyBakedDiffuseLighting(float3 V, PositionInputs posInput, SurfaceData surfaceData, inout BuiltinData builtinData)` +to: + `void ModifyBakedDiffuseLighting(float3 V, PositionInputs posInput, PreLightData preLightData, BSDFData bsdfData, inout BuiltinData builtinData)` + +There is also a new definition for `ModifyBakedDiffuseLighting()` that uses the former prototype definition and calls the new function prototype with the correct arguments. The purpose of this change it to prepare for future lighting features. To update your custom shaders, in addition of the prototype update, you must remove the following lines: +``` +BSDFData bsdfData = ConvertSurfaceDataToBSDFData(posInput.positionSS, surfaceData); + +PreLightData preLightData = GetPreLightData(V, posInput, bsdfData); +``` -The prototype of the function ModifyBakedDiffuseLighting() in the various material have change from "void ModifyBakedDiffuseLighting(float3 V, PositionInputs posInput, SurfaceData surfaceData, inout BuiltinData builtinData)" to "void ModifyBakedDiffuseLighting(float3 V, PositionInputs posInput, PreLightData preLightData, BSDFData bsdfData, inout BuiltinData builtinData)". There is a new ModifyBakedDiffuseLighting using the former prototype added in the file BuiltinUtilities.hlsl which will call the new function prototype with the correct arguments. The purpose of this change it to prepare for future lighting features. To update code, in addition of the prototype update it is required to remove those line "BSDFData bsdfData = ConvertSurfaceDataToBSDFData(posInput.positionSS, surfaceData); PreLightData preLightData = GetPreLightData(V, posInput, bsdfData);" as it is now perform by the common code from BuiltinUtilities.hlsl. diff --git a/com.unity.render-pipelines.high-definition/Editor/Lighting/HDLightUI.cs b/com.unity.render-pipelines.high-definition/Editor/Lighting/HDLightUI.cs index 528c612f80b..c6d08511ad1 100644 --- a/com.unity.render-pipelines.high-definition/Editor/Lighting/HDLightUI.cs +++ b/com.unity.render-pipelines.high-definition/Editor/Lighting/HDLightUI.cs @@ -1005,7 +1005,9 @@ static void DrawShadowMapContent(SerializedHDLight serialized, Editor owner) EditorGUILayout.PropertyField(serialized.filterTracedShadow, s_Styles.denoiseTracedShadow); EditorGUI.indentLevel++; EditorGUILayout.PropertyField(serialized.filterSizeTraced, s_Styles.denoiserRadius); - EditorGUILayout.PropertyField(serialized.distanceBasedFiltering, s_Styles.distanceBasedFiltering); + // We only support distance based filtering if we have a punctual light source (point or spot) + if (isPunctual) + EditorGUILayout.PropertyField(serialized.distanceBasedFiltering, s_Styles.distanceBasedFiltering); EditorGUI.indentLevel--; EditorGUI.indentLevel--; } diff --git a/com.unity.render-pipelines.high-definition/Editor/Material/TerrainLit/StandardsTerrainToHDTerrainLitUpgrader.cs b/com.unity.render-pipelines.high-definition/Editor/Material/TerrainLit/StandardsTerrainToHDTerrainLitUpgrader.cs new file mode 100644 index 00000000000..91d4812cf55 --- /dev/null +++ b/com.unity.render-pipelines.high-definition/Editor/Material/TerrainLit/StandardsTerrainToHDTerrainLitUpgrader.cs @@ -0,0 +1,21 @@ +using UnityEngine; +using UnityEngine.Rendering.HighDefinition; + +namespace UnityEditor.Rendering.HighDefinition +{ + class StandardsTerrainToHDTerrainLitUpgrader : MaterialUpgrader + { + + public StandardsTerrainToHDTerrainLitUpgrader(string sourceShaderName, string destShaderName, MaterialFinalizer finalizer = null) + { + RenameShader(sourceShaderName, destShaderName, finalizer); + } + + public override void Convert(Material srcMaterial, Material dstMaterial) + { + base.Convert(srcMaterial, dstMaterial); + + HDShaderUtils.ResetMaterialKeywords(dstMaterial); + } + } +} diff --git a/com.unity.render-pipelines.high-definition/Editor/Material/TerrainLit/StandardsTerrainToHDTerrainLitUpgrader.cs.meta b/com.unity.render-pipelines.high-definition/Editor/Material/TerrainLit/StandardsTerrainToHDTerrainLitUpgrader.cs.meta new file mode 100644 index 00000000000..5b33cc09037 --- /dev/null +++ b/com.unity.render-pipelines.high-definition/Editor/Material/TerrainLit/StandardsTerrainToHDTerrainLitUpgrader.cs.meta @@ -0,0 +1,11 @@ +fileFormatVersion: 2 +guid: 6d3e54e3dd1c6c249a59f788380085b1 +MonoImporter: + externalObjects: {} + serializedVersion: 2 + defaultReferences: [] + executionOrder: 0 + icon: {instanceID: 0} + userData: + assetBundleName: + assetBundleVariant: diff --git a/com.unity.render-pipelines.high-definition/Editor/RenderPipeline/Settings/FrameSettingsUI.Drawers.cs b/com.unity.render-pipelines.high-definition/Editor/RenderPipeline/Settings/FrameSettingsUI.Drawers.cs index 7a98ef3a26d..3e924151579 100644 --- a/com.unity.render-pipelines.high-definition/Editor/RenderPipeline/Settings/FrameSettingsUI.Drawers.cs +++ b/com.unity.render-pipelines.high-definition/Editor/RenderPipeline/Settings/FrameSettingsUI.Drawers.cs @@ -213,6 +213,7 @@ static void Drawer_SectionRenderingSettings(SerializedFrameSettings serialized, area.AmmendInfo(FrameSettingsField.RayTracing, overrideable: () => hdrpSettings.supportRayTracing); area.AmmendInfo(FrameSettingsField.MotionVectors, overrideable: () => hdrpSettings.supportMotionVectors); area.AmmendInfo(FrameSettingsField.ObjectMotionVectors, overrideable: () => hdrpSettings.supportMotionVectors); + area.AmmendInfo(FrameSettingsField.TransparentsWriteMotionVector, overrideable: () => hdrpSettings.supportMotionVectors); area.AmmendInfo(FrameSettingsField.Decals, overrideable: () => hdrpSettings.supportDecals); area.AmmendInfo(FrameSettingsField.Distortion, overrideable: () => hdrpSettings.supportDistortion); diff --git a/com.unity.render-pipelines.high-definition/Editor/Upgraders/UpgradeStandardShaderMaterials.cs b/com.unity.render-pipelines.high-definition/Editor/Upgraders/UpgradeStandardShaderMaterials.cs index d07e9d15459..3caefc2fd49 100644 --- a/com.unity.render-pipelines.high-definition/Editor/Upgraders/UpgradeStandardShaderMaterials.cs +++ b/com.unity.render-pipelines.high-definition/Editor/Upgraders/UpgradeStandardShaderMaterials.cs @@ -18,6 +18,9 @@ static List GetHDUpgraders() upgraders.Add(new UnlitsToHDUnlitUpgrader("Unlit/Texture", "HDRP/Unlit")); upgraders.Add(new UnlitsToHDUnlitUpgrader("Unlit/Transparent", "HDRP/Unlit")); upgraders.Add(new UnlitsToHDUnlitUpgrader("Unlit/Transparent Cutout", "HDRP/Unlit")); + + upgraders.Add(new StandardsTerrainToHDTerrainLitUpgrader("Nature/Terrain/Standard", "HDRP/TerrainLit")); + return upgraders; } diff --git a/com.unity.render-pipelines.high-definition/Runtime/Debug/DebugDisplay.cs b/com.unity.render-pipelines.high-definition/Runtime/Debug/DebugDisplay.cs index 5c052d933de..3d9c378d267 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Debug/DebugDisplay.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/Debug/DebugDisplay.cs @@ -150,6 +150,7 @@ public class DebugDisplaySettings : IDebugData static bool needsRefreshingCameraFreezeList = true; List m_RecordedSamplers = new List(); + List m_RecordedSamplersRT = new List(); enum DebugProfilingType { CPU, @@ -691,6 +692,36 @@ void DisableProfilingRecorders() m_RecordedSamplers.Clear(); } + void EnableProfilingRecordersRT() + { + Debug.Assert(m_RecordedSamplersRT.Count == 0); + + m_RecordedSamplersRT.Add(ProfilingSampler.Get(HDProfileId.RaytracingBuildCluster)); + m_RecordedSamplersRT.Add(ProfilingSampler.Get(HDProfileId.RaytracingCullLights)); + m_RecordedSamplersRT.Add(ProfilingSampler.Get(HDProfileId.RaytracingIntegrateReflection)); + m_RecordedSamplersRT.Add(ProfilingSampler.Get(HDProfileId.RaytracingFilterReflection)); + m_RecordedSamplersRT.Add(ProfilingSampler.Get(HDProfileId.RaytracingAmbientOcclusion)); + m_RecordedSamplersRT.Add(ProfilingSampler.Get(HDProfileId.RaytracingFilterAmbientOcclusion)); + m_RecordedSamplersRT.Add(ProfilingSampler.Get(HDProfileId.RaytracingDirectionalLightShadow)); + m_RecordedSamplersRT.Add(ProfilingSampler.Get(HDProfileId.RaytracingLightShadow)); + m_RecordedSamplersRT.Add(ProfilingSampler.Get(HDProfileId.RaytracingIntegrateIndirectDiffuse)); + m_RecordedSamplersRT.Add(ProfilingSampler.Get(HDProfileId.RaytracingFilterIndirectDiffuse)); + m_RecordedSamplersRT.Add(ProfilingSampler.Get(HDProfileId.RaytracingDebugOverlay)); + m_RecordedSamplersRT.Add(ProfilingSampler.Get(HDProfileId.ForwardPreRefraction)); + m_RecordedSamplersRT.Add(ProfilingSampler.Get(HDProfileId.RayTracingRecursiveRendering)); + m_RecordedSamplersRT.Add(ProfilingSampler.Get(HDProfileId.RayTracingPrepass)); + } + + void DisableProfilingRecordersRT() + { + foreach (var sampler in m_RecordedSamplersRT) + { + sampler.enableRecording = false; + } + + m_RecordedSamplersRT.Clear(); + } + ObservableList BuildProfilingSamplerList(DebugProfilingType type) { var result = new ObservableList(); @@ -708,17 +739,40 @@ void DisableProfilingRecorders() return result; } + ObservableList BuildProfilingSamplerListRT(DebugProfilingType type) + { + var result = new ObservableList(); + foreach (var sampler in m_RecordedSamplersRT) + { + sampler.enableRecording = true; + result.Add(new DebugUI.Value + { + displayName = sampler.name, + getter = () => string.Format("{0:F2}", (type == DebugProfilingType.CPU) ? sampler.cpuElapsedTime : ((type == DebugProfilingType.GPU) ? sampler.gpuElapsedTime : sampler.inlineCpuElapsedTime)), + refreshRate = 1.0f / 5.0f + }); + } + + return result; + } + void RegisterDisplayStatsDebug() { var list = new List(); list.Add(new DebugUI.Value { displayName = "Frame Rate (fps)", getter = () => 1f / Time.smoothDeltaTime, refreshRate = 1f / 5f }); list.Add(new DebugUI.Value { displayName = "Frame Time (ms)", getter = () => Time.smoothDeltaTime * 1000f, refreshRate = 1f / 5f }); + EnableProfilingRecorders(); list.Add(new DebugUI.Foldout("CPU timings (Command Buffers)", BuildProfilingSamplerList(DebugProfilingType.CPU))); list.Add(new DebugUI.Foldout("GPU timings", BuildProfilingSamplerList(DebugProfilingType.GPU))); + if (HDRenderPipeline.currentAsset?.currentPlatformRenderPipelineSettings.supportRayTracing ?? true) + { + EnableProfilingRecordersRT(); + list.Add(new DebugUI.Foldout("CPU timings RT (Command Buffers)", BuildProfilingSamplerListRT(DebugProfilingType.CPU))); + list.Add(new DebugUI.Foldout("GPU timings RT", BuildProfilingSamplerListRT(DebugProfilingType.GPU))); + } list.Add(new DebugUI.Foldout("Inline CPU timings", BuildProfilingSamplerList(DebugProfilingType.InlineCPU))); - list.Add(new DebugUI.BoolField { displayName = "Count Rays (MRays/Frame)", getter = () => data.countRays, setter = value => data.countRays = value, onValueChanged = RefreshDisplayStatsDebug }); if (data.countRays) { @@ -1551,6 +1605,8 @@ internal void UnregisterDebug() UnregisterDebugItems(k_PanelDecals, m_DebugDecalsAffectingTransparentItems); DisableProfilingRecorders(); + if (HDRenderPipeline.currentAsset?.currentPlatformRenderPipelineSettings.supportRayTracing ?? true) + DisableProfilingRecordersRT(); UnregisterDebugItems(k_PanelDisplayStats, m_DebugDisplayStatsItems); UnregisterDebugItems(k_PanelMaterials, m_DebugMaterialItems); diff --git a/com.unity.render-pipelines.high-definition/Runtime/Lighting/LightLoop/LightLoop.cs b/com.unity.render-pipelines.high-definition/Runtime/Lighting/LightLoop/LightLoop.cs index 84890509e9f..ccfa707d79d 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Lighting/LightLoop/LightLoop.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/Lighting/LightLoop/LightLoop.cs @@ -217,14 +217,14 @@ public enum TileClusterCategoryDebug : int [GenerateHLSL(needAccessors = false, generateCBuffer = true)] unsafe struct ShaderVariablesLightList { - [HLSLArray((int)ShaderOptions.XrMaxViews, typeof(Matrix4x4))] - public fixed float g_mInvScrProjectionArr[(int)ShaderOptions.XrMaxViews * 16]; - [HLSLArray((int)ShaderOptions.XrMaxViews, typeof(Matrix4x4))] - public fixed float g_mScrProjectionArr[(int)ShaderOptions.XrMaxViews * 16]; - [HLSLArray((int)ShaderOptions.XrMaxViews, typeof(Matrix4x4))] - public fixed float g_mInvProjectionArr[(int)ShaderOptions.XrMaxViews * 16]; - [HLSLArray((int)ShaderOptions.XrMaxViews, typeof(Matrix4x4))] - public fixed float g_mProjectionArr[(int)ShaderOptions.XrMaxViews * 16]; + [HLSLArray(ShaderConfig.k_XRMaxViewsForCBuffer, typeof(Matrix4x4))] + public fixed float g_mInvScrProjectionArr[ShaderConfig.k_XRMaxViewsForCBuffer * 16]; + [HLSLArray(ShaderConfig.k_XRMaxViewsForCBuffer, typeof(Matrix4x4))] + public fixed float g_mScrProjectionArr[ShaderConfig.k_XRMaxViewsForCBuffer * 16]; + [HLSLArray(ShaderConfig.k_XRMaxViewsForCBuffer, typeof(Matrix4x4))] + public fixed float g_mInvProjectionArr[ShaderConfig.k_XRMaxViewsForCBuffer * 16]; + [HLSLArray(ShaderConfig.k_XRMaxViewsForCBuffer, typeof(Matrix4x4))] + public fixed float g_mProjectionArr[ShaderConfig.k_XRMaxViewsForCBuffer * 16]; public Vector4 g_screenSize; @@ -1788,7 +1788,44 @@ internal bool GetEnvLightData(CommandBuffer cmd, HDCamera hdCamera, in Processed && !hdCamera.frameSettings.IsEnabled(FrameSettingsField.PlanarProbe)) break; - var scaleOffset = m_TextureCaches.reflectionPlanarProbeCache.FetchSlice(cmd, probe.texture, out int fetchIndex); + // Grab the render data that was used to render the probe + var renderData = planarProbe.renderData; + // Grab the world to camera matrix of the capture camera + var worldToCameraRHSMatrix = renderData.worldToCameraRHS; + // Grab the projection matrix that was used to render + var projectionMatrix = renderData.projectionMatrix; + // Build an alternative matrix for projection that is not oblique + var projectionMatrixNonOblique = Matrix4x4.Perspective(renderData.fieldOfView, probe.texture.width / probe.texture.height, probe.settings.cameraSettings.frustum.nearClipPlaneRaw, probe.settings.cameraSettings.frustum.farClipPlane); + + // Convert the projection matrices to their GPU version + var gpuProj = GL.GetGPUProjectionMatrix(projectionMatrix, true); + var gpuProjNonOblique = GL.GetGPUProjectionMatrix(projectionMatrixNonOblique, true); + + // Build the oblique and non oblique view projection matrices + var vp = gpuProj * worldToCameraRHSMatrix; + var vpNonOblique = gpuProjNonOblique * worldToCameraRHSMatrix; + + // We need to collect the set of parameters required for the filtering + IBLFilterBSDF.PlanarTextureFilteringParameters planarTextureFilteringParameters = new IBLFilterBSDF.PlanarTextureFilteringParameters(); + planarTextureFilteringParameters.probeNormal = Vector3.Normalize(hdCamera.camera.transform.position - renderData.capturePosition); + planarTextureFilteringParameters.probePosition = probe.gameObject.transform.position; + planarTextureFilteringParameters.captureCameraDepthBuffer = planarProbe.realtimeDepthTexture; + planarTextureFilteringParameters.captureCameraScreenSize = new Vector4(probe.texture.width, probe.texture.height, 1.0f / probe.texture.width, 1.0f / probe.texture.height); + planarTextureFilteringParameters.captureCameraIVP = vp.inverse; + planarTextureFilteringParameters.captureCameraIVP_NonOblique = vpNonOblique.inverse; + planarTextureFilteringParameters.captureCameraVP_NonOblique = vpNonOblique; + planarTextureFilteringParameters.captureCameraPosition = renderData.capturePosition; + planarTextureFilteringParameters.captureFOV = renderData.fieldOfView; + planarTextureFilteringParameters.captureNearPlane = probe.settings.cameraSettings.frustum.nearClipPlaneRaw; + planarTextureFilteringParameters.captureFarPlane = probe.settings.cameraSettings.frustum.farClipPlane; + + // Fetch the slice and do the filtering + var scaleOffset = m_TextureCaches.reflectionPlanarProbeCache.FetchSlice(cmd, probe.texture, ref planarTextureFilteringParameters, out int fetchIndex); + + // We don't need to provide the capture position + // It is already encoded in the 'worldToCameraRHSMatrix' + capturePosition = Vector3.zero; + // Indices start at 1, because -0 == 0, we can know from the bit sign which cache to use envIndex = scaleOffset == Vector4.zero ? int.MinValue : -(fetchIndex + 1); @@ -1800,19 +1837,7 @@ internal bool GetEnvLightData(CommandBuffer cmd, HDCamera hdCamera, in Processed } atlasScaleOffset = scaleOffset; - - var renderData = planarProbe.renderData; - var worldToCameraRHSMatrix = renderData.worldToCameraRHS; - var projectionMatrix = renderData.projectionMatrix; - - // We don't need to provide the capture position - // It is already encoded in the 'worldToCameraRHSMatrix' - capturePosition = Vector3.zero; - - // get the device dependent projection matrix - var gpuProj = GL.GetGPUProjectionMatrix(projectionMatrix, true); - var gpuView = worldToCameraRHSMatrix; - var vp = gpuProj * gpuView; + m_TextureCaches.env2DAtlasScaleOffset[fetchIndex] = scaleOffset; m_TextureCaches.env2DCaptureVP[fetchIndex] = vp; diff --git a/com.unity.render-pipelines.high-definition/Runtime/Lighting/PlanarReflectionFiltering.compute b/com.unity.render-pipelines.high-definition/Runtime/Lighting/PlanarReflectionFiltering.compute new file mode 100644 index 00000000000..426c8273d03 --- /dev/null +++ b/com.unity.render-pipelines.high-definition/Runtime/Lighting/PlanarReflectionFiltering.compute @@ -0,0 +1,194 @@ +#pragma kernel FilterPlanarReflection +#pragma kernel DownScale +#pragma kernel DepthConversion + +#pragma only_renderers d3d11 playstation xboxone vulkan metal switch +// #pragma enable_d3d11_debug_symbols + +// The process is done in 3 steps. We start by converting the depth from oblique to regular frustum depth. +// Then we build a mip chain of both the depth and the color. The depth is averaged in 2x2 and the color +// is filtered in a wider neighborhood (otherwise we get too much artifacts) when doing the actual filtering. +// The filtering estimates the pixel footprint of the blur based on the distance to the occluder, the roughness +// of the current mip and the distance to the pixel. we then select the input from the right mip (the idea) +// Is to avoid a 128x128 blur for the rougher values. + +// HDRP generic includes +#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Common.hlsl" +#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/GeometricTools.hlsl" +#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/ImageBasedLighting.hlsl" +#include "Packages/com.unity.render-pipelines.high-definition/Runtime/ShaderLibrary/ShaderVariables.hlsl" +#include "Packages/com.unity.render-pipelines.high-definition/Runtime/Material/Material.hlsl" + +// Tile size of this compute +#define PLANAR_REFLECTION_TILE_SIZE 8 + +// Mip chain of depth and color +TEXTURE2D(_DepthTextureMipChain); +TEXTURE2D(_ReflectionColorMipChain); + +CBUFFER_START(ShaderVariablesPlanarReflectionFiltering) + // The screen size (width, height, 1.0 / width, 1.0 / height) that is produced by the capture + float4 _CaptureBaseScreenSize; + // The screen size (width, height, 1.0 / width, 1.0 / height) of the current level processed + float4 _CaptureCurrentScreenSize; + // Normal of the planar reflection plane + float3 _ReflectionPlaneNormal; + // World space position of the planar reflection (non camera relative) + float3 _ReflectionPlanePosition; + // FOV of the capture camera + float _CaptureCameraFOV; + // World space position of the capture camera (non camera relative) + float3 _CaptureCameraPositon; + // The mip index of the source data + uint _SourceMipIndex; + // Inverse view projection of the capture camera (oblique) + float4x4 _CaptureCameraIVP; + // Inverse view projection of the capture camera (non oblique) + float4x4 _CaptureCameraIVP_NO; + // View projection of the capture camera (non oblique) + float4x4 _CaptureCameraVP_NO; + // Given that sometimes our writing texture can be bigger than the current target, we need to apply a scale factor before using the sampling intrinsic + float _RTScaleFactor; + // Far plane of the capture camera + float _CaptureCameraFarPlane; + // The number of valid mips in the mip chain + uint _MaxMipLevels; +CBUFFER_END + +// Output buffer of our filtering code +RW_TEXTURE2D(float4, _FilteredPlanarReflectionBuffer); + +// These angles have been experimentally computed to match the result of reflection probes. Initially this was a table dependent on angle and roughness, but given that every planar has a +// finite number of LODs and those LODS have fixed roughness and the angle changes the result, but not that much. I changed it to a per LOD LUT +static const float reflectionProbeEquivalentAngles[UNITY_SPECCUBE_LOD_STEPS + 1] = {0.0, 0.04, 0.12, 0.4, 0.9, 1.2, 1.2}; + +[numthreads(PLANAR_REFLECTION_TILE_SIZE, PLANAR_REFLECTION_TILE_SIZE, 1)] +void FilterPlanarReflection(uint3 dispatchThreadId : SV_DispatchThreadID, uint2 groupThreadId : SV_GroupThreadID, uint2 groupId : SV_GroupID) +{ + UNITY_XR_ASSIGN_VIEW_INDEX(dispatchThreadId.z); + + // Compute the pixel position to process + uint2 currentCoord = (uint2)(groupId * PLANAR_REFLECTION_TILE_SIZE + groupThreadId); + + // Compute the coordinates that shall be used for sampling + float2 sampleCoords = (currentCoord << (int)(_SourceMipIndex)) * _CaptureBaseScreenSize.zw * _RTScaleFactor; + + // Fetch the depth value for the current pixel. + float centerDepthValue = SAMPLE_TEXTURE2D_LOD(_DepthTextureMipChain, s_trilinear_clamp_sampler, sampleCoords, _SourceMipIndex).x; + + // Compute the world position of the tapped pixel + PositionInputs centralPosInput = GetPositionInput(currentCoord, _CaptureCurrentScreenSize.zw, centerDepthValue, _CaptureCameraIVP_NO, 0, 0); + + // Compute the direction to the reflection pixel + const float3 rayDirection = normalize(centralPosInput.positionWS - _CaptureCameraPositon); + + // Compute the position on the plane we shall be integrating from + float t = -1.0; + if (!IntersectRayPlane(_CaptureCameraPositon, rayDirection, _ReflectionPlanePosition, _ReflectionPlaneNormal, t)) + { + // If there is no plane intersection, there is nothing to filter (means that is a position that cannot be reflected) + _FilteredPlanarReflectionBuffer[currentCoord] = float4(0.0, 0.0, 0.0, 1.0); + return; + } + + // Compute the integration position (position on the plane) + const float3 integrationPositionRWS = _CaptureCameraPositon + rayDirection * t; + + // Evaluate the cone halfangle for the filtering + const float halfAngle = reflectionProbeEquivalentAngles[_SourceMipIndex]; + + // Compute the distances we need for our filtering + const float distanceCameraToPlane = length(integrationPositionRWS - _CaptureCameraPositon); + const float distancePlaneToObject = length(centralPosInput.positionWS - integrationPositionRWS); + + // Compute the cone footprint on the image reflection plane for this configuration + const float brdfConeRadius = tan(halfAngle) * distancePlaneToObject; + + // We need to compute the view cone radius + const float viewConeRadius = brdfConeRadius * distanceCameraToPlane / (distancePlaneToObject + distanceCameraToPlane); + + // Compute the view cone's half angle. This matches the FOV angle to see exactly the half of the cone (The tangent could be precomputed in the table) + const float viewConeHalfAngle = FastATanPos(viewConeRadius / distanceCameraToPlane); + // Given the camera's fov and pixel resolution convert the viewConeHalfAngle to a number of pixels + const float pixelDistance = viewConeHalfAngle / _CaptureCameraFOV * _CaptureCurrentScreenSize.x; + + // Convert this to a mip level shift starting from the mip 0 + const float miplevel = log2(pixelDistance / 2); + + // Because of the high level of aliasing that this algorithm causes, especially on the higher mips, we apply a mip bias during the sampling to try to hide it + const float mipBias = _SourceMipIndex > 3 ? lerp(0.0, 2.0, (_MaxMipLevels - _SourceMipIndex) / _MaxMipLevels) : 0.0; + + // Read the integration color that we should take + const float3 integrationColor = SAMPLE_TEXTURE2D_LOD(_ReflectionColorMipChain, s_trilinear_clamp_sampler, sampleCoords, clamp(miplevel + _SourceMipIndex + mipBias, 0, _MaxMipLevels)).xyz; + + // Write the output ray data + _FilteredPlanarReflectionBuffer[currentCoord] = float4(integrationColor, 1.0); +} + +// Half resolution output texture for our mip chain build. +RW_TEXTURE2D(float4, _HalfResReflectionBuffer); +RW_TEXTURE2D(float, _HalfResDepthBuffer); + +[numthreads(PLANAR_REFLECTION_TILE_SIZE, PLANAR_REFLECTION_TILE_SIZE, 1)] +void DownScale(uint3 dispatchThreadId : SV_DispatchThreadID, uint2 groupThreadId : SV_GroupThreadID, uint2 groupId : SV_GroupID) +{ + UNITY_XR_ASSIGN_VIEW_INDEX(dispatchThreadId.z); + + // Compute the pixel position to process + int2 currentCoord = (int2)(groupId * PLANAR_REFLECTION_TILE_SIZE + groupThreadId); + + // Unfortunately, we have to go wider than the simple 2x2 neighborhood or there is too much aliasing + float3 averageColor = 0.0; + float sumW = 0.0; + // In order to avoid a one pixel shift to the right, we need to center our down sample. + for (int y = -1; y <= 2; ++y) + { + for (int x = -1; x <= 2; ++x) + { + const int2 tapCoord = currentCoord * 2 + uint2(x, y); + // If the pixel is outside the current screen size, its weight becomes zero + float weight = tapCoord.x > _CaptureCurrentScreenSize.x || tapCoord.x < 0 + || tapCoord.y > _CaptureCurrentScreenSize.y || tapCoord.y < 0 ? 0.0 : 1.0; + averageColor += LOAD_TEXTURE2D_LOD(_ReflectionColorMipChain, tapCoord, _SourceMipIndex).xyz * weight; + sumW += weight; + } + } + // Normalize and output + _HalfResReflectionBuffer[currentCoord] = float4(averageColor / sumW, 1.0); + + // We average the 4 depths and move on + _HalfResDepthBuffer[currentCoord] = (LOAD_TEXTURE2D_LOD(_DepthTextureMipChain, currentCoord * 2, _SourceMipIndex).x + + LOAD_TEXTURE2D_LOD(_DepthTextureMipChain, currentCoord * 2 + uint2(0,1), _SourceMipIndex).x + + LOAD_TEXTURE2D_LOD(_DepthTextureMipChain, currentCoord * 2 + uint2(1,0), _SourceMipIndex).x + + LOAD_TEXTURE2D_LOD(_DepthTextureMipChain, currentCoord * 2 + uint2(1,1), _SourceMipIndex).x) * 0.25; +} + +// Initial depth buffer (oblique) +TEXTURE2D(_DepthTextureOblique); +// Converted depth values (non oblique) +RW_TEXTURE2D(float, _DepthTextureNonOblique); + +[numthreads(PLANAR_REFLECTION_TILE_SIZE, PLANAR_REFLECTION_TILE_SIZE, 1)] +void DepthConversion(uint3 dispatchThreadId : SV_DispatchThreadID, uint2 groupThreadId : SV_GroupThreadID, uint2 groupId : SV_GroupID) +{ + UNITY_XR_ASSIGN_VIEW_INDEX(dispatchThreadId.z); + + // Compute the pixel position to process + int2 currentCoord = (int2)(groupId * PLANAR_REFLECTION_TILE_SIZE + groupThreadId); + + // Fetch the depth value for the current pixel. It would be great to use sample instead, but oblique matrices prevent us from doing it. + float centerDepthValue = LOAD_TEXTURE2D_LOD(_DepthTextureOblique, currentCoord, 0).x; + + // Compute the world position of the tapped pixel + PositionInputs centralPosInput = GetPositionInput(currentCoord, _CaptureCurrentScreenSize.zw, centerDepthValue, _CaptureCameraIVP, 0, 0); + + // For some reason, with oblique matrices, when the point is on the background the reconstructed position ends up behind the camera and at the wrong position + float3 rayDirection = normalize(_CaptureCameraPositon - centralPosInput.positionWS); + rayDirection = centerDepthValue == 0.0 ? -rayDirection : rayDirection; + // Adjust the position + centralPosInput.positionWS = centerDepthValue == 0.0 ? _CaptureCameraPositon + rayDirection * _CaptureCameraFarPlane : centralPosInput.positionWS; + + // Re-do the projection, but this time without the oblique part and export it + float4 hClip = mul(_CaptureCameraVP_NO, float4(centralPosInput.positionWS, 1.0)); + _DepthTextureNonOblique[currentCoord] = saturate(hClip.z / hClip.w); +} diff --git a/com.unity.render-pipelines.high-definition/Runtime/Lighting/PlanarReflectionFiltering.compute.meta b/com.unity.render-pipelines.high-definition/Runtime/Lighting/PlanarReflectionFiltering.compute.meta new file mode 100644 index 00000000000..2277cb2d810 --- /dev/null +++ b/com.unity.render-pipelines.high-definition/Runtime/Lighting/PlanarReflectionFiltering.compute.meta @@ -0,0 +1,8 @@ +fileFormatVersion: 2 +guid: 9f3f8a01b8caaaa4595591dc96d43dd2 +ComputeShaderImporter: + externalObjects: {} + currentAPIMask: 4 + userData: + assetBundleName: + assetBundleVariant: diff --git a/com.unity.render-pipelines.high-definition/Runtime/Lighting/ProbeVolume/ProbeVolume.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Lighting/ProbeVolume/ProbeVolume.hlsl index 75a0432cae0..b26f72f11c7 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Lighting/ProbeVolume/ProbeVolume.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Lighting/ProbeVolume/ProbeVolume.hlsl @@ -487,9 +487,8 @@ float3 EvaluateProbeVolumesLightLoop(inout float probeVolumeHierarchyWeight, Pos } // Fallback to global ambient probe lighting when probe volume lighting weight is not fully saturated. -float3 EvaluateProbeVolumeAmbientProbeFallback(inout float probeVolumeHierarchyWeight, float3 normalWS) +void EvaluateProbeVolumeAmbientProbeFallback(float3 normalWS, float3 backNormalWS, inout float3 bakeDiffuseLighting, inout float3 backBakeDiffuseLighting, inout float probeVolumeHierarchyWeight) { - float3 sampleAmbientProbeOutgoingRadiance = float3(0.0, 0.0, 0.0); if (probeVolumeHierarchyWeight < 1.0 #ifdef DEBUG_DISPLAY && (_DebugProbeVolumeMode != PROBEVOLUMEDEBUGMODE_VISUALIZE_DEBUG_COLORS) @@ -497,10 +496,11 @@ float3 EvaluateProbeVolumeAmbientProbeFallback(inout float probeVolumeHierarchyW #endif ) { - sampleAmbientProbeOutgoingRadiance = SampleSH9(_ProbeVolumeAmbientProbeFallbackPackedCoeffs, normalWS) * (1.0 - probeVolumeHierarchyWeight); + float fallbackWeight = 1.0 - probeVolumeHierarchyWeight; + bakeDiffuseLighting += SampleSH9(_ProbeVolumeAmbientProbeFallbackPackedCoeffs, normalWS) * fallbackWeight; + backBakeDiffuseLighting += SampleSH9(_ProbeVolumeAmbientProbeFallbackPackedCoeffs, backNormalWS) * fallbackWeight; probeVolumeHierarchyWeight = 1.0; } - return sampleAmbientProbeOutgoingRadiance; } #endif // __PROBEVOLUME_HLSL__ diff --git a/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/HDProbe.cs b/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/HDProbe.cs index 145981a32b3..c2a1babc366 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/HDProbe.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/HDProbe.cs @@ -118,6 +118,7 @@ float aspect // Runtime Data RenderTexture m_RealtimeTexture; + RenderTexture m_RealtimeDepthBuffer; RenderData m_RealtimeRenderData; bool m_WasRenderedSinceLastOnDemandRequest = true; @@ -189,6 +190,12 @@ public RenderTexture realtimeTexture set => m_RealtimeTexture = value; } + public RenderTexture realtimeDepthTexture + { + get => m_RealtimeDepthBuffer; + set => m_RealtimeDepthBuffer = value; + } + /// /// The texture used during lighting for this probe. /// @@ -231,6 +238,20 @@ public Texture SetTexture(ProbeSettings.Mode targetMode, Texture texture) } } + public Texture SetDepthTexture(ProbeSettings.Mode targetMode, Texture texture) + { + if (targetMode == ProbeSettings.Mode.Realtime && !(texture is RenderTexture)) + throw new ArgumentException("'texture' must be a RenderTexture for the Realtime mode."); + + switch (targetMode) + { + case ProbeSettings.Mode.Baked: return m_BakedTexture = texture; + case ProbeSettings.Mode.Custom: return m_CustomTexture = texture; + case ProbeSettings.Mode.Realtime: return m_RealtimeDepthBuffer = (RenderTexture)texture; + default: throw new ArgumentOutOfRangeException(); + } + } + /// /// The render data of the last bake /// diff --git a/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/PlanarReflectionProbeCache.cs b/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/PlanarReflectionProbeCache.cs index e9e37cceb4a..1143e6db8fa 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/PlanarReflectionProbeCache.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/PlanarReflectionProbeCache.cs @@ -109,7 +109,7 @@ void ConvertTexture(CommandBuffer cmd, Texture input, RenderTexture target) CoreUtils.DrawFullScreen(cmd, m_ConvertTextureMaterial, m_ConvertTextureMPB); } - Texture ConvolveProbeTexture(CommandBuffer cmd, Texture texture, out Vector4 sourceScaleOffset) + Texture ConvolveProbeTexture(CommandBuffer cmd, Texture texture, ref IBLFilterBSDF.PlanarTextureFilteringParameters planarTextureFilteringParameters, out Vector4 sourceScaleOffset) { // Probes can be either Cubemaps (for baked probes) or RenderTextures (for realtime probes) Texture2D texture2D = texture as Texture2D; @@ -158,12 +158,12 @@ Texture ConvolveProbeTexture(CommandBuffer cmd, Texture texture, out Vector4 sou float scaleX = (float)texture.width / m_ConvolutionTargetTexture.width; float scaleY = (float)texture.height / m_ConvolutionTargetTexture.height; sourceScaleOffset = new Vector4(scaleX, scaleY, 0, 0); - m_IBLFilterGGX.FilterPlanarTexture(cmd, convolutionSourceTexture, m_ConvolutionTargetTexture); + m_IBLFilterGGX.FilterPlanarTexture(cmd, convolutionSourceTexture, ref planarTextureFilteringParameters, m_ConvolutionTargetTexture); return m_ConvolutionTargetTexture; } - public Vector4 FetchSlice(CommandBuffer cmd, Texture texture, out int fetchIndex) + public Vector4 FetchSlice(CommandBuffer cmd, Texture texture, ref IBLFilterBSDF.PlanarTextureFilteringParameters planarTextureFilteringParameters, out int fetchIndex) { Vector4 scaleOffset = Vector4.zero; fetchIndex = m_FrameProbeIndex++; @@ -172,17 +172,17 @@ public Vector4 FetchSlice(CommandBuffer cmd, Texture texture, out int fetchIndex { // If the texture is already in the atlas, we update it only if needed if (NeedsUpdate(texture) || m_ProbeBakingState[scaleOffset] != ProbeFilteringState.Ready) - if (!UpdatePlanarTexture(cmd, texture, ref scaleOffset)) + if (!UpdatePlanarTexture(cmd, texture, ref planarTextureFilteringParameters, ref scaleOffset)) Debug.LogError("Can't convolve or update the planar reflection render target"); } else // Either we add it to the atlas - if (!UpdatePlanarTexture(cmd, texture, ref scaleOffset)) + if (!UpdatePlanarTexture(cmd, texture, ref planarTextureFilteringParameters, ref scaleOffset)) Debug.LogError("No more space in the planar reflection probe atlas. To solve this issue, increase the size of the Planar Reflection Probe Atlas in the HDRP settings."); return scaleOffset; } - bool UpdatePlanarTexture(CommandBuffer cmd, Texture texture, ref Vector4 scaleOffset) + bool UpdatePlanarTexture(CommandBuffer cmd, Texture texture, ref IBLFilterBSDF.PlanarTextureFilteringParameters planarTextureFilteringParameters, ref Vector4 scaleOffset) { bool success = false; @@ -192,7 +192,7 @@ bool UpdatePlanarTexture(CommandBuffer cmd, Texture texture, ref Vector4 scaleOf m_ProbeBakingState[scaleOffset] = ProbeFilteringState.Convolving; Vector4 sourceScaleOffset; - Texture convolvedTexture = ConvolveProbeTexture(cmd, texture, out sourceScaleOffset); + Texture convolvedTexture = ConvolveProbeTexture(cmd, texture, ref planarTextureFilteringParameters, out sourceScaleOffset); if (convolvedTexture == null) return false; diff --git a/com.unity.render-pipelines.high-definition/Runtime/Lighting/VolumetricLighting/VolumetricLighting.cs b/com.unity.render-pipelines.high-definition/Runtime/Lighting/VolumetricLighting/VolumetricLighting.cs index 7c3280d4d4f..6180f149bf1 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Lighting/VolumetricLighting/VolumetricLighting.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/Lighting/VolumetricLighting/VolumetricLighting.cs @@ -43,8 +43,8 @@ public static DensityVolumeEngineData GetNeutralValues() [GenerateHLSL(needAccessors = false, generateCBuffer = true)] unsafe struct ShaderVariablesVolumetric { - [HLSLArray((int)ShaderOptions.XrMaxViews, typeof(Matrix4x4))] - public fixed float _VBufferCoordToViewDirWS[(int)ShaderOptions.XrMaxViews * 16]; + [HLSLArray(ShaderConfig.k_XRMaxViewsForCBuffer, typeof(Matrix4x4))] + public fixed float _VBufferCoordToViewDirWS[ShaderConfig.k_XRMaxViewsForCBuffer * 16]; public float _VBufferUnitDepthTexelSpacing; public uint _NumVisibleDensityVolumes; diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.hlsl index 6163a043f09..bf318142059 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.hlsl @@ -2374,23 +2374,6 @@ IndirectLighting EvaluateBSDF_ScreenspaceRefraction( LightLoopContext lightLo //----------------------------------------------------------------------------- // EvaluateBSDF_Env // ---------------------------------------------------------------------------- -float GetEnvMipLevel(EnvLightData lightData, float iblPerceptualRoughness) -{ - float iblMipLevel; - - // TODO: We need to match the PerceptualRoughnessToMipmapLevel formula for planar, so we don't do this test (which is specific to our current lightloop) - // Specific case for Texture2Ds, their convolution is a gaussian one and not a GGX one - So we use another roughness mip mapping. - if (IsEnvIndexTexture2D(lightData.envIndex)) - { - // Empirical remapping - iblMipLevel = PlanarPerceptualRoughnessToMipmapLevel(iblPerceptualRoughness, _ColorPyramidLodCount); - } - else - { - iblMipLevel = PerceptualRoughnessToMipmapLevel(iblPerceptualRoughness); - } - return iblMipLevel; -} float3 GetModifiedEnvSamplingDir(EnvLightData lightData, float3 N, float3 iblR, float iblPerceptualRoughness, float clampedNdotV) { @@ -2468,11 +2451,8 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext, // bottom reflection is full. Lit doesn't have this problem too much in practice since only GetModifiedEnvSamplingDir // changes the direction vs the coat.) - float IBLMipLevel; - IBLMipLevel = GetEnvMipLevel(lightData, preLightData.iblPerceptualRoughness); - // Sample the pre-integrated environment lighting - float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, envSamplingDirForBottomLayer, IBLMipLevel, lightData.rangeCompressionFactorCompensation); + float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, envSamplingDirForBottomLayer, PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness), lightData.rangeCompressionFactorCompensation); weight *= preLD.w; // Used by planar reflection to discard pixel envLighting = GetSpecularIndirectDimmer() * preLightData.specularFGD * preLD.xyz; @@ -2526,11 +2506,8 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext, // Single lobe approach // We computed an average mip level stored in preLightData.iblPerceptualRoughness that we use for all CT lobes - float IBLMipLevel; - IBLMipLevel = GetEnvMipLevel(lightData, preLightData.iblPerceptualRoughness); - // Sample the actual environment lighting - float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, envSamplingDirForBottomLayer, IBLMipLevel, lightData.rangeCompressionFactorCompensation); + float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, envSamplingDirForBottomLayer, PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness), lightData.rangeCompressionFactorCompensation); float3 envLighting; envLighting = preLightData.specularCTFGDSingleLobe * GetSpecularIndirectDimmer(); diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/BuiltinGIUtilities.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/BuiltinGIUtilities.hlsl index f5ad182e0de..0fc5da7c729 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/BuiltinGIUtilities.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/BuiltinGIUtilities.hlsl @@ -25,10 +25,8 @@ float4x4 GetProbeVolumeWorldToObject() return ApplyCameraTranslationToInverseMatrix(unity_ProbeVolumeWorldToObject); } -float3 EvaluateLightmap(float3 positionRWS, float3 normalWS, float2 uvStaticLightmap, float2 uvDynamicLightmap) +void EvaluateLightmap(float3 positionRWS, float3 normalWS, float3 backNormalWS, float2 uvStaticLightmap, float2 uvDynamicLightmap, inout float3 bakeDiffuseLighting, inout float3 backBakeDiffuseLighting) { - float3 bakeDiffuseLighting = float3(0.0, 0.0, 0.0); - #ifdef UNITY_LIGHTMAP_FULL_HDR bool useRGBMLightmap = false; float4 decodeInstructions = float4(0.0, 0.0, 0.0, 0.0); // Never used but needed for the interface since it supports gamma lightmaps @@ -43,28 +41,30 @@ float3 EvaluateLightmap(float3 positionRWS, float3 normalWS, float2 uvStaticLigh #ifdef LIGHTMAP_ON #ifdef DIRLIGHTMAP_COMBINED - bakeDiffuseLighting += SampleDirectionalLightmap(TEXTURE2D_ARGS(unity_Lightmap, samplerunity_Lightmap), + SampleDirectionalLightmap(TEXTURE2D_ARGS(unity_Lightmap, samplerunity_Lightmap), TEXTURE2D_ARGS(unity_LightmapInd, samplerunity_Lightmap), - uvStaticLightmap, unity_LightmapST, normalWS, useRGBMLightmap, decodeInstructions); + uvStaticLightmap, unity_LightmapST, normalWS, backNormalWS, useRGBMLightmap, decodeInstructions, bakeDiffuseLighting, backBakeDiffuseLighting); #else - bakeDiffuseLighting += SampleSingleLightmap(TEXTURE2D_ARGS(unity_Lightmap, samplerunity_Lightmap), uvStaticLightmap, unity_LightmapST, useRGBMLightmap, decodeInstructions); + float3 illuminance = SampleSingleLightmap(TEXTURE2D_ARGS(unity_Lightmap, samplerunity_Lightmap), uvStaticLightmap, unity_LightmapST, useRGBMLightmap, decodeInstructions); + bakeDiffuseLighting += illuminance; + backBakeDiffuseLighting += illuminance; #endif #endif #ifdef DYNAMICLIGHTMAP_ON #ifdef DIRLIGHTMAP_COMBINED - bakeDiffuseLighting += SampleDirectionalLightmap(TEXTURE2D_ARGS(unity_DynamicLightmap, samplerunity_DynamicLightmap), + SampleDirectionalLightmap(TEXTURE2D_ARGS(unity_DynamicLightmap, samplerunity_DynamicLightmap), TEXTURE2D_ARGS(unity_DynamicDirectionality, samplerunity_DynamicLightmap), - uvDynamicLightmap, unity_DynamicLightmapST, normalWS, false, decodeInstructions); + uvDynamicLightmap, unity_DynamicLightmapST, normalWS, backNormalWS, false, decodeInstructions, bakeDiffuseLighting, backBakeDiffuseLighting); #else - bakeDiffuseLighting += SampleSingleLightmap(TEXTURE2D_ARGS(unity_DynamicLightmap, samplerunity_DynamicLightmap), uvDynamicLightmap, unity_DynamicLightmapST, false, decodeInstructions); + float3 illuminance += SampleSingleLightmap(TEXTURE2D_ARGS(unity_DynamicLightmap, samplerunity_DynamicLightmap), uvDynamicLightmap, unity_DynamicLightmapST, false, decodeInstructions); + bakeDiffuseLighting += illuminance; + backBakeDiffuseLighting += illuminance; #endif #endif - - return bakeDiffuseLighting; } -float3 EvaluateProbeVolumeLegacy(float3 positionRWS, float3 normalWS) +void EvaluateLightProbeBuiltin(float3 positionRWS, float3 normalWS, float3 backNormalWS, inout float3 bakeDiffuseLighting, inout float3 backBakeDiffuseLighting) { if (unity_ProbeVolumeParams.x == 0.0) { @@ -78,22 +78,24 @@ float3 EvaluateProbeVolumeLegacy(float3 positionRWS, float3 normalWS) SHCoefficients[5] = unity_SHBb; SHCoefficients[6] = unity_SHC; - return SampleSH9(SHCoefficients, normalWS); + bakeDiffuseLighting += SampleSH9(SHCoefficients, normalWS); + backBakeDiffuseLighting += SampleSH9(SHCoefficients, backNormalWS); } else { #if RAYTRACING_ENABLED if (unity_ProbeVolumeParams.w == 1.0) - return SampleProbeVolumeSH9(TEXTURE3D_ARGS(unity_ProbeVolumeSH, samplerunity_ProbeVolumeSH), positionRWS, normalWS, GetProbeVolumeWorldToObject(), - unity_ProbeVolumeParams.y, unity_ProbeVolumeParams.z, unity_ProbeVolumeMin.xyz, unity_ProbeVolumeSizeInv.xyz); + SampleProbeVolumeSH9(TEXTURE3D_ARGS(unity_ProbeVolumeSH, samplerunity_ProbeVolumeSH), positionRWS, normalWS, backNormalWS, GetProbeVolumeWorldToObject(), + unity_ProbeVolumeParams.y, unity_ProbeVolumeParams.z, unity_ProbeVolumeMin.xyz, unity_ProbeVolumeSizeInv.xyz, bakeDiffuseLighting, backBakeDiffuseLighting); else #endif - return SampleProbeVolumeSH4(TEXTURE3D_ARGS(unity_ProbeVolumeSH, samplerunity_ProbeVolumeSH), positionRWS, normalWS, GetProbeVolumeWorldToObject(), - unity_ProbeVolumeParams.y, unity_ProbeVolumeParams.z, unity_ProbeVolumeMin.xyz, unity_ProbeVolumeSizeInv.xyz); + SampleProbeVolumeSH4(TEXTURE3D_ARGS(unity_ProbeVolumeSH, samplerunity_ProbeVolumeSH), positionRWS, normalWS, backNormalWS, GetProbeVolumeWorldToObject(), + unity_ProbeVolumeParams.y, unity_ProbeVolumeParams.z, unity_ProbeVolumeMin.xyz, unity_ProbeVolumeSizeInv.xyz, bakeDiffuseLighting, backBakeDiffuseLighting); } } -float3 EvaluateProbeVolumes(inout float probeVolumeHierarchyWeight, PositionInputs posInputs, float3 normalWS, uint renderingLayers) +void EvaluateProbeVolumes( PositionInputs posInputs, float3 normalWS, float3 backNormalWS, uint renderingLayers, + inout float3 bakeDiffuseLighting, inout float3 backBakeDiffuseLighting, inout float probeVolumeHierarchyWeight) { // SHADEROPTIONS_PROBE_VOLUMES can be defined in ShaderConfig.cs.hlsl but set to 0 for disabled. #if SHADEROPTIONS_PROBE_VOLUMES_EVALUATION_MODE == PROBEVOLUMESEVALUATIONMODES_LIGHT_LOOP @@ -113,46 +115,49 @@ float3 EvaluateProbeVolumes(inout float probeVolumeHierarchyWeight, PositionInpu posInputs.tileCoord = tileCoord; #endif - float3 combinedGI = EvaluateProbeVolumesMaterialPass(probeVolumeHierarchyWeight, posInputs, normalWS, renderingLayers); - combinedGI += EvaluateProbeVolumeAmbientProbeFallback(probeVolumeHierarchyWeight, normalWS); - return combinedGI; + // TODO: In a future PR, we will update EvaluateProbeVolumes to support a single call that evaluates front and back facing normals. + // For now, we simply call Evaluate 2x, and pay the additional cost when backBakeDiffuseLighting is in use. + float backProbeVolumeHierarchyWeight = probeVolumeHierarchyWeight; + bakeDiffuseLighting += EvaluateProbeVolumesMaterialPass(posInputs, normalWS, renderingLayers, probeVolumeHierarchyWeight); + backBakeDiffuseLighting += EvaluateProbeVolumesMaterialPass(posInputs, backNormalWS, renderingLayers, backProbeVolumeHierarchyWeight); - #else - // !(SHADERPASS == SHADERPASS_GBUFFER || SHADERPASS == SHADERPASS_FORWARD) - return float3(0, 0, 0); + EvaluateProbeVolumeAmbientProbeFallback(normalWS, backNormalWS, bakeDiffuseLighting, backBakeDiffuseLighting, probeVolumeHierarchyWeight); #endif - #else - return float3(0, 0, 0); #endif // #ifdef SHADERPASS - #else - // SHADEROPTIONS_PROBE_VOLUMES_EVALUATION_MODE == PROBEVOLUMESEVALUATIONMODES_DISABLED - return float3(0, 0, 0); #endif } -// In unity we can have a mix of fully baked lightmap (static lightmap) + enlighten realtime lightmap (dynamic lightmap) -// for each case we can have directional lightmap or not. -// Else we have lightprobe for dynamic/moving entity. Either SH9 per object lightprobe or SH4 per pixel per object volume probe -float3 SampleBakedGI(PositionInputs posInputs, float3 normalWS, uint renderingLayers, float2 uvStaticLightmap, float2 uvDynamicLightmap) +// No need to initialize bakeDiffuseLighting and backBakeDiffuseLighting must be initialize outside the function +void SampleBakedGI( + PositionInputs posInputs, + float3 normalWS, + float3 backNormalWS, + uint renderingLayers, + float2 uvStaticLightmap, + float2 uvDynamicLightmap, + out float3 bakeDiffuseLighting, + out float3 backBakeDiffuseLighting) { float3 positionRWS = posInputs.positionWS; #define SAMPLE_LIGHTMAP (defined(LIGHTMAP_ON) || defined(DYNAMICLIGHTMAP_ON)) #define SAMPLE_PROBEVOLUME (SHADEROPTIONS_PROBE_VOLUMES_EVALUATION_MODE != PROBEVOLUMESEVALUATIONMODES_DISABLED) \ && (!SAMPLE_LIGHTMAP || SHADEROPTIONS_PROBE_VOLUMES_ADDITIVE_BLENDING) -#define SAMPLE_PROBEVOLUME_LEGACY (!SAMPLE_LIGHTMAP && !SAMPLE_PROBEVOLUME) +#define SAMPLE_PROBEVOLUME_BUILTIN (!SAMPLE_LIGHTMAP && !SAMPLE_PROBEVOLUME) - float3 combinedGI = float3(0, 0, 0); + bakeDiffuseLighting = float3(0, 0, 0); + backBakeDiffuseLighting = float3(0, 0, 0); #if SAMPLE_LIGHTMAP - combinedGI += EvaluateLightmap(positionRWS, normalWS, uvStaticLightmap, uvDynamicLightmap); + EvaluateLightmap(positionRWS, normalWS, backNormalWS, uvStaticLightmap, uvDynamicLightmap, bakeDiffuseLighting, backBakeDiffuseLighting); #endif #if SHADEROPTIONS_PROBE_VOLUMES_EVALUATION_MODE == PROBEVOLUMESEVALUATIONMODES_LIGHT_LOOP // If probe volumes are evaluated in the lightloop, we place a sentinel value to detect that no lightmap data is present at the current pixel, // and we can safely overwrite baked data value with value from probe volume evaluation in light loop. #if !SAMPLE_LIGHTMAP - return UNINITIALIZED_GI; + bakeDiffuseLighting = UNINITIALIZED_GI; + return; #endif #else // PROBEVOLUMESEVALUATIONMODES_MATERIAL_PASS || PROBEVOLUMESEVALUATIONMODES_DISABLED @@ -162,23 +167,20 @@ float3 SampleBakedGI(PositionInputs posInputs, float3 normalWS, uint renderingLa #else float probeVolumeHierarchyWeight = 0.0f; #endif - combinedGI += EvaluateProbeVolumes(probeVolumeHierarchyWeight, posInputs, normalWS, renderingLayers); + EvaluateProbeVolumes(posInputs, normalWS, backNormalWS, renderingLayers, bakeDiffuseLighting, backBakeDiffuseLighting, probeVolumeHierarchyWeight); #endif -#if SAMPLE_PROBEVOLUME_LEGACY - combinedGI += EvaluateProbeVolumeLegacy(positionRWS, normalWS); +#if SAMPLE_PROBEVOLUME_BUILTIN + EvaluateLightProbeBuiltin(positionRWS, normalWS, backNormalWS, bakeDiffuseLighting, backBakeDiffuseLighting); #endif #endif -return combinedGI; - #undef SAMPLE_LIGHTMAP #undef SAMPLE_PROBEVOLUME -#undef SAMPLE_PROBEVOLUME_LEGACY +#undef SAMPLE_PROBEVOLUME_BUILTIN } -// Function signature of SampleBakedGI changed when probe volumes we added, as they require full PositionInputs. -// This legacy function signature is exposed in a shader graph node, so must continue to be supported. +// Function signature exposed in a shader graph node, to keep float3 SampleBakedGI(float3 positionRWS, float3 normalWS, float2 uvStaticLightmap, float2 uvDynamicLightmap) { // Need PositionInputs for indexing probe volume clusters, but they are not availbile from the current SampleBakedGI() function signature. @@ -208,7 +210,12 @@ float3 SampleBakedGI(float3 positionRWS, float3 normalWS, float2 uvStaticLightma #endif // #ifdef SHADERPASS #endif - return SampleBakedGI(posInputs, normalWS, renderingLayers, uvStaticLightmap, uvDynamicLightmap); + const float3 backNormalWSUnused = 0.0; + float3 bakeDiffuseLighting; + float3 backBakeDiffuseLightingUnused; + SampleBakedGI(posInputs, normalWS, backNormalWSUnused, renderingLayers, uvStaticLightmap, uvDynamicLightmap, bakeDiffuseLighting, backBakeDiffuseLightingUnused); + + return bakeDiffuseLighting; } float4 SampleShadowMask(float3 positionRWS, float2 uvStaticLightmap) // normalWS not use for now diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/BuiltinUtilities.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/BuiltinUtilities.hlsl index efd7a3a5096..7e380e98bac 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/BuiltinUtilities.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/BuiltinUtilities.hlsl @@ -40,6 +40,12 @@ void InitBuiltinData(PositionInputs posInput, float alpha, float3 normalWS, floa // Use uniform directly - The float need to be cast to uint (as unity don't support to set a uint as uniform) builtinData.renderingLayers = _EnableLightLayers ? asuint(unity_RenderingLayer.x) : DEFAULT_LIGHT_LAYERS; + + // Sample lightmap/probevolume/lightprobe/volume proxy + builtinData.bakeDiffuseLighting = 0.0; + builtinData.backBakeDiffuseLighting = 0.0; + SampleBakedGI( posInput, normalWS, backNormalWS, builtinData.renderingLayers, texCoord1.xy, texCoord2.xy, + builtinData.bakeDiffuseLighting, builtinData.backBakeDiffuseLighting); // We only want to read the screen space buffer that holds the indirect diffuse signal if this is not a transparent surface #if RAYTRACING_ENABLED && ((SHADERPASS == SHADERPASS_GBUFFER) || (SHADERPASS == SHADERPASS_FORWARD)) && !defined(_SURFACE_TYPE_TRANSPARENT) @@ -55,18 +61,7 @@ void InitBuiltinData(PositionInputs posInput, float alpha, float3 normalWS, floa builtinData.bakeDiffuseLighting *= GetInverseCurrentExposureMultiplier(); #endif } - else #endif - { - // Sample lightmap/probevolume/lightprobe/volume proxy - builtinData.bakeDiffuseLighting = SampleBakedGI(posInput, normalWS, builtinData.renderingLayers, texCoord1.xy, texCoord2.xy); - } - - // We also sample the back lighting in case we have transmission. If not use this will be optimize out by the compiler - // For now simply recall the function with inverted normal, the compiler should be able to optimize the lightmap case to not resample the directional lightmap - // however it may not optimize the lightprobe case due to the proxy volume relying on dynamic if (to verify), not a problem for SH9, but a problem for proxy volume. - // TODO: optimize more this code. - builtinData.backBakeDiffuseLighting = SampleBakedGI(posInput, backNormalWS, builtinData.renderingLayers, texCoord1.xy, texCoord2.xy); #ifdef SHADOWS_SHADOWMASK float4 shadowMask = SampleShadowMask(posInput.positionWS, texCoord1.xy); diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/Eye/Eye.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/Eye/Eye.hlsl index cb7a88e04a3..0cf77d6a9c1 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/Eye/Eye.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/Eye/Eye.hlsl @@ -787,20 +787,7 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext, float3 F = preLightData.specularFGD; - float iblMipLevel; - // TODO: We need to match the PerceptualRoughnessToMipmapLevel formula for planar, so we don't do this test (which is specific to our current lightloop) - // Specific case for Texture2Ds, their convolution is a gaussian one and not a GGX one - So we use another roughness mip mapping. - if (IsEnvIndexTexture2D(lightData.envIndex)) - { - // Empirical remapping - iblMipLevel = PlanarPerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness, _ColorPyramidLodCount); - } - else - { - iblMipLevel = PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness); - } - - float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, iblMipLevel, lightData.rangeCompressionFactorCompensation); + float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness), lightData.rangeCompressionFactorCompensation); weight *= preLD.a; // Used by planar reflection to discard pixel envLighting = F * preLD.rgb; diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/Fabric/Fabric.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/Fabric/Fabric.hlsl index 71810177be5..c0b4ee444a8 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/Fabric/Fabric.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/Fabric/Fabric.hlsl @@ -611,19 +611,6 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext, // Note: using influenceShapeType and projectionShapeType instead of (lightData|proxyData).shapeType allow to make compiler optimization in case the type is know (like for sky) EvaluateLight_EnvIntersection(positionWS, bsdfData.normalWS, lightData, influenceShapeType, R, weight); - float iblMipLevel; - // TODO: We need to match the PerceptualRoughnessToMipmapLevel formula for planar, so we don't do this test (which is specific to our current lightloop) - // Specific case for Texture2Ds, their convolution is a gaussian one and not a GGX one - So we use another roughness mip mapping. - if (IsEnvIndexTexture2D(lightData.envIndex)) - { - // Empirical remapping - iblMipLevel = PositivePow(preLightData.iblPerceptualRoughness, 0.8) * uint(max(_ColorPyramidLodCount - 1, 0)); - } - else - { - iblMipLevel = PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness); - } - // If it is a silk, we need to use the GGX convolution (slice0), otherwise the charlie convolution (slice1) int sliceIndex = 0; if (HasFlag(bsdfData.materialFeatures, MATERIALFEATUREFLAGS_FABRIC_COTTON_WOOL)) @@ -631,7 +618,7 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext, sliceIndex = _EnvSliceSize - 1; } - float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, iblMipLevel, lightData.rangeCompressionFactorCompensation, sliceIndex); + float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness), lightData.rangeCompressionFactorCompensation, sliceIndex); weight *= preLD.a; // Used by planar reflection to discard pixel envLighting = preLightData.specularFGD * preLD.rgb; diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/Fabric/IBLFilterCharlie.cs b/com.unity.render-pipelines.high-definition/Runtime/Material/Fabric/IBLFilterCharlie.cs index 2c6a500e3f6..297c9743d49 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/Fabric/IBLFilterCharlie.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/Fabric/IBLFilterCharlie.cs @@ -85,5 +85,10 @@ override public void FilterCubemap(CommandBuffer cmd, Texture source, RenderText public override void FilterCubemapMIS(CommandBuffer cmd, Texture source, RenderTexture target, RenderTexture conditionalCdf, RenderTexture marginalRowCdf) { } + + override public void FilterPlanarTexture(CommandBuffer cmd, RenderTexture source, ref PlanarTextureFilteringParameters planarTextureFilteringParameters, RenderTexture target) + { + m_MipGenerator.RenderColorGaussianPyramid(cmd, new Vector2Int(source.width, source.height), source, target); + } } } diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/GGXConvolution/IBLFilterGGX.cs b/com.unity.render-pipelines.high-definition/Runtime/Material/GGXConvolution/IBLFilterGGX.cs index 5ef402930e4..54dbc40af2b 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/GGXConvolution/IBLFilterGGX.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/GGXConvolution/IBLFilterGGX.cs @@ -1,3 +1,5 @@ +using UnityEngine.Experimental.Rendering; + namespace UnityEngine.Rendering.HighDefinition { class IBLFilterGGX : IBLFilterBSDF @@ -13,6 +15,19 @@ class IBLFilterGGX : IBLFilterBSDF int m_ConditionalDensitiesKernel = -1; int m_MarginalRowDensitiesKernel = -1; + // Planar reflection filtering + ComputeShader m_PlanarReflectionFilteringCS; + int m_PlanarReflectionDepthConversionKernel = -1; + int m_PlanarReflectionDownScaleKernel = -1; + int m_PlanarReflectionFilteringKernel = -1; + RTHandle m_PlanarReflectionFilterTex0; + RTHandle m_PlanarReflectionFilterTex1; + RTHandle m_PlanarReflectionFilterDepthTex0; + RTHandle m_PlanarReflectionFilterDepthTex1; + const int k_DefaultPlanarResolution = 512; + // Intermediate variables + Vector4 currentScreenSize = new Vector4(1.0f, 1.0f, 1.0f, 1.0f); + public IBLFilterGGX(RenderPipelineResources renderPipelineResources, MipGenerator mipGenerator) { m_RenderPipelineResources = renderPipelineResources; @@ -58,6 +73,14 @@ public override void Initialize(CommandBuffer cmd) InitializeGgxIblSampleData(cmd); } + if (!m_PlanarReflectionFilteringCS) + { + m_PlanarReflectionFilteringCS = m_RenderPipelineResources.shaders.planarReflectionFilteringCS; + m_PlanarReflectionDepthConversionKernel = m_PlanarReflectionFilteringCS.FindKernel("DepthConversion"); + m_PlanarReflectionDownScaleKernel = m_PlanarReflectionFilteringCS.FindKernel("DownScale"); + m_PlanarReflectionFilteringKernel = m_PlanarReflectionFilteringCS.FindKernel("FilterPlanarReflection"); + } + for (int i = 0; i < 6; ++i) { var lookAt = Matrix4x4.LookAt(Vector3.zero, CoreUtils.lookAtList[i], CoreUtils.upVectorList[i]); @@ -75,6 +98,14 @@ public override void Cleanup() { CoreUtils.Destroy(m_convolveMaterial); CoreUtils.Destroy(m_GgxIblSampleData); + RTHandles.Release(m_PlanarReflectionFilterTex0); + m_PlanarReflectionFilterTex0 = null; + RTHandles.Release(m_PlanarReflectionFilterTex1); + m_PlanarReflectionFilterTex1 = null; + RTHandles.Release(m_PlanarReflectionFilterDepthTex0); + m_PlanarReflectionFilterDepthTex0 = null; + RTHandles.Release(m_PlanarReflectionFilterDepthTex1); + m_PlanarReflectionFilterDepthTex1 = null; } void FilterCubemapCommon(CommandBuffer cmd, @@ -153,9 +184,158 @@ override public void FilterCubemapMIS(CommandBuffer cmd, FilterCubemapCommon(cmd, source, target, m_faceWorldToViewMatrixMatrices); } + override public void FilterCubemap(CommandBuffer cmd, Texture source, RenderTexture target) { FilterCubemapCommon(cmd, source, target, m_faceWorldToViewMatrixMatrices); } + + void CheckIntermediateTexturesSize(int texWidth, int texHeight) + { + // If the first texture is not the right size + if (m_PlanarReflectionFilterTex0 == null || m_PlanarReflectionFilterTex0.rt.width < texWidth) + { + // We re-allocate them all + RTHandles.Release(m_PlanarReflectionFilterTex0); + RTHandles.Release(m_PlanarReflectionFilterTex1); + RTHandles.Release(m_PlanarReflectionFilterDepthTex0); + RTHandles.Release(m_PlanarReflectionFilterDepthTex1); + m_PlanarReflectionFilterTex0 = RTHandles.Alloc(texWidth, texHeight, TextureXR.slices, colorFormat: GraphicsFormat.R16G16B16A16_SFloat, enableRandomWrite: true, useDynamicScale: false, useMipMap: true, name: "PlanarReflectionTextureIntermediate0"); + m_PlanarReflectionFilterTex1 = RTHandles.Alloc(texWidth, texHeight, TextureXR.slices, colorFormat: GraphicsFormat.R16G16B16A16_SFloat, enableRandomWrite: true, useDynamicScale: false, useMipMap: false, name: "PlanarReflectionTextureIntermediate1"); + m_PlanarReflectionFilterDepthTex0 = RTHandles.Alloc(texWidth, texHeight, TextureXR.slices, colorFormat: GraphicsFormat.R32_SFloat, enableRandomWrite: true, useDynamicScale: false, useMipMap: true, name: "PlanarReflectionTextureIntermediateDepth0"); + m_PlanarReflectionFilterDepthTex1 = RTHandles.Alloc(texWidth, texHeight, TextureXR.slices, colorFormat: GraphicsFormat.R32_SFloat, enableRandomWrite: true, useDynamicScale: false, useMipMap: false, name: "PlanarReflectionTextureIntermediateDepth1"); + } + } + + void BuildColorAndDepthMipChain(CommandBuffer cmd, RenderTexture sourceColor, RenderTexture sourceDepth, ref PlanarTextureFilteringParameters planarTextureFilteringParameters) + { + int currentTexWidth = sourceColor.width; + int currentTexHeight = sourceColor.height; + + // The first color level can be copied straight away in the mip chain, nothing special to be done + cmd.CopyTexture(sourceColor, 0, 0, 0, 0, sourceColor.width, sourceColor.height, m_PlanarReflectionFilterTex0, 0, 0, 0, 0); + + // For depth it is a bit trickier, we want to convert the depth from oblique space to non-oblique space due to the poor interpolation properties of the oblique matrix + cmd.SetComputeVectorParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCameraPositon, planarTextureFilteringParameters.captureCameraPosition); + cmd.SetComputeMatrixParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCameraVP_NO, planarTextureFilteringParameters.captureCameraVP_NonOblique); + cmd.SetComputeMatrixParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCameraIVP, planarTextureFilteringParameters.captureCameraIVP); + currentScreenSize.Set(currentTexWidth, currentTexHeight, 1.0f / currentTexWidth, 1.0f / currentTexHeight); + cmd.SetComputeVectorParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCurrentScreenSize, currentScreenSize); + cmd.SetComputeFloatParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCameraFarPlane, planarTextureFilteringParameters.captureFarPlane); + + // Input textures + cmd.SetComputeTextureParam(m_PlanarReflectionFilteringCS, m_PlanarReflectionDepthConversionKernel, HDShaderIDs._DepthTextureOblique, sourceDepth); + + // Output textures + cmd.SetComputeTextureParam(m_PlanarReflectionFilteringCS, m_PlanarReflectionDepthConversionKernel, HDShaderIDs._DepthTextureNonOblique, m_PlanarReflectionFilterDepthTex0); + + // Compute the dispatch parameters and evaluate the new mip + int tileSize = 8; + int numTilesXHR = (currentTexWidth + (tileSize - 1)) / tileSize; + int numTilesYHR = (currentTexHeight + (tileSize - 1)) / tileSize; + cmd.DispatchCompute(m_PlanarReflectionFilteringCS, m_PlanarReflectionDepthConversionKernel, numTilesXHR, numTilesYHR, 1); + + // Move to the next mip and build the chain + int currentMipSource = 0; + int texWidthHalf = sourceColor.width >> 1; + int texHeightHalf = sourceColor.height >> 1; + + // Until we have a 2x2 texture, continue + while (texWidthHalf >= 2 && texHeightHalf >= 2) + { + // Constant inputs + cmd.SetComputeIntParam(m_PlanarReflectionFilteringCS, HDShaderIDs._SourceMipIndex, currentMipSource); + + // Input textures + cmd.SetComputeTextureParam(m_PlanarReflectionFilteringCS, m_PlanarReflectionDownScaleKernel, HDShaderIDs._ReflectionColorMipChain, m_PlanarReflectionFilterTex0); + cmd.SetComputeTextureParam(m_PlanarReflectionFilteringCS, m_PlanarReflectionDownScaleKernel, HDShaderIDs._HalfResReflectionBuffer, m_PlanarReflectionFilterTex1); + + // Output textures + cmd.SetComputeTextureParam(m_PlanarReflectionFilteringCS, m_PlanarReflectionDownScaleKernel, HDShaderIDs._DepthTextureMipChain, m_PlanarReflectionFilterDepthTex0); + cmd.SetComputeTextureParam(m_PlanarReflectionFilteringCS, m_PlanarReflectionDownScaleKernel, HDShaderIDs._HalfResDepthBuffer, m_PlanarReflectionFilterDepthTex1); + currentScreenSize.Set(currentTexWidth, currentTexHeight, 1.0f / currentTexWidth, 1.0f / currentTexHeight); + cmd.SetComputeVectorParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCurrentScreenSize, currentScreenSize); + + // Compute the dispatch parameters and evaluate the new mip + int numTilesXHRHalf = (texWidthHalf + (tileSize - 1)) / tileSize; + int numTilesYHRHalf = (texHeightHalf + (tileSize - 1)) / tileSize; + cmd.DispatchCompute(m_PlanarReflectionFilteringCS, m_PlanarReflectionDownScaleKernel, numTilesXHRHalf, numTilesYHRHalf, 1); + + // Given that mip to mip in compute doesn't work, we have to do this :( + cmd.CopyTexture(m_PlanarReflectionFilterTex1, 0, 0, 0, 0, texWidthHalf, texHeightHalf, m_PlanarReflectionFilterTex0, 0, currentMipSource + 1, 0, 0); + cmd.CopyTexture(m_PlanarReflectionFilterDepthTex1, 0, 0, 0, 0, texWidthHalf, texHeightHalf, m_PlanarReflectionFilterDepthTex0, 0, currentMipSource + 1, 0, 0); + + // Update the parameters for the next mip + currentTexWidth = currentTexWidth >> 1; + currentTexHeight = currentTexHeight >> 1; + texWidthHalf = texWidthHalf >> 1; + texHeightHalf = texHeightHalf >> 1; + currentMipSource++; + } + } + + override public void FilterPlanarTexture(CommandBuffer cmd, RenderTexture source, ref PlanarTextureFilteringParameters planarTextureFilteringParameters, RenderTexture target) + { + // First we need to make sure that our intermediate textures are the big enough to do our process (these textures are squares) + CheckIntermediateTexturesSize(source.width, source.height); + + // Then we need to build a mip chain (one for color, one for depth) that we will sample later on in the process + BuildColorAndDepthMipChain(cmd, source, planarTextureFilteringParameters.captureCameraDepthBuffer, ref planarTextureFilteringParameters); + + // Init the mip descent + int texWidth = source.width; + int texHeight = source.height; + + // First we need to copy the Mip0 (that matches perfectly smooth surface), no processing to be done on it + cmd.CopyTexture(m_PlanarReflectionFilterTex0, 0, 0, 0, 0, texWidth, texHeight, target, 0, 0, 0, 0); + + // Initialize the parameters for the descent + int mipIndex = 1; + int tileSize = 8; + // Based on the initial texture resolution, the number of available mips for us to read from is variable and is based on the maximal texture width + int numMipsChain = (int)(Mathf.Log((float)texWidth, 2.0f) - 1.0f); + float rtScaleFactor = texWidth / (float)m_PlanarReflectionFilterTex0.rt.width; + texWidth = texWidth >> 1; + texHeight = texHeight >> 1; + + // Loop until we have the right amount of mips + while (mipIndex < (int)EnvConstants.ConvolutionMipCount) + { + // Evaluate the dispatch parameters + int numTilesXHR = (texWidth + (tileSize - 1)) / tileSize; + int numTilesYHR = (texHeight + (tileSize - 1)) / tileSize; + + // Set input textures + cmd.SetComputeTextureParam(m_PlanarReflectionFilteringCS, m_PlanarReflectionFilteringKernel, HDShaderIDs._DepthTextureMipChain, m_PlanarReflectionFilterDepthTex0); + cmd.SetComputeTextureParam(m_PlanarReflectionFilteringCS, m_PlanarReflectionFilteringKernel, HDShaderIDs._ReflectionColorMipChain, m_PlanarReflectionFilterTex0); + + // Input constant parameters required + cmd.SetComputeVectorParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureBaseScreenSize, planarTextureFilteringParameters.captureCameraScreenSize); + currentScreenSize.Set(texWidth, texHeight, 1.0f / texWidth, 1.0f / texHeight); + cmd.SetComputeVectorParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCurrentScreenSize, currentScreenSize); + cmd.SetComputeIntParam(m_PlanarReflectionFilteringCS, HDShaderIDs._SourceMipIndex, mipIndex); + cmd.SetComputeIntParam(m_PlanarReflectionFilteringCS, HDShaderIDs._MaxMipLevels, numMipsChain); + cmd.SetComputeFloatParam(m_PlanarReflectionFilteringCS, HDShaderIDs._RTScaleFactor, rtScaleFactor); + cmd.SetComputeVectorParam(m_PlanarReflectionFilteringCS, HDShaderIDs._ReflectionPlaneNormal, planarTextureFilteringParameters.probeNormal); + cmd.SetComputeVectorParam(m_PlanarReflectionFilteringCS, HDShaderIDs._ReflectionPlanePosition, planarTextureFilteringParameters.probePosition); + cmd.SetComputeVectorParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCameraPositon, planarTextureFilteringParameters.captureCameraPosition); + cmd.SetComputeMatrixParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCameraIVP_NO, planarTextureFilteringParameters.captureCameraIVP_NonOblique); + cmd.SetComputeFloatParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCameraFOV, planarTextureFilteringParameters.captureFOV * Mathf.PI / 180.0f); + + // Set output textures + cmd.SetComputeTextureParam(m_PlanarReflectionFilteringCS, m_PlanarReflectionFilteringKernel, HDShaderIDs._FilteredPlanarReflectionBuffer, m_PlanarReflectionFilterTex1); + + // Evaluate the next convolution + cmd.DispatchCompute(m_PlanarReflectionFilteringCS, m_PlanarReflectionFilteringKernel, numTilesXHR, numTilesYHR, 1); + + // Copy the convoluted texture into the next mip and move on + cmd.CopyTexture(m_PlanarReflectionFilterTex1, 0, 0, 0, 0, texWidth, texHeight, target, 0, mipIndex, 0, 0); + + // Move to the next mip + texWidth = texWidth >> 1; + texHeight = texHeight >> 1; + mipIndex++; + } + } } } diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/Hair/Hair.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/Hair/Hair.hlsl index a8cfd8479c9..ac0e17be16b 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/Hair/Hair.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/Hair/Hair.hlsl @@ -555,20 +555,7 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext, // Note: using influenceShapeType and projectionShapeType instead of (lightData|proxyData).shapeType allow to make compiler optimization in case the type is know (like for sky) EvaluateLight_EnvIntersection(positionWS, bsdfData.normalWS, lightData, influenceShapeType, R, weight); - float iblMipLevel; - // TODO: We need to match the PerceptualRoughnessToMipmapLevel formula for planar, so we don't do this test (which is specific to our current lightloop) - // Specific case for Texture2Ds, their convolution is a gaussian one and not a GGX one - So we use another roughness mip mapping. - if (IsEnvIndexTexture2D(lightData.envIndex)) - { - // Empirical remapping - iblMipLevel = PositivePow(preLightData.iblPerceptualRoughness, 0.8) * uint(max(_ColorPyramidLodCount - 1, 0)); - } - else - { - iblMipLevel = PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness); - } - - float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, iblMipLevel, lightData.rangeCompressionFactorCompensation); + float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness), lightData.rangeCompressionFactorCompensation); weight *= preLD.a; // Used by planar reflection to discard pixel envLighting = preLightData.specularFGD * preLD.rgb; diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/IBLFilterBSDF.cs b/com.unity.render-pipelines.high-definition/Runtime/Material/IBLFilterBSDF.cs index 990459e6eba..7198cb15810 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/IBLFilterBSDF.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/IBLFilterBSDF.cs @@ -19,11 +19,33 @@ abstract class IBLFilterBSDF // Filters MIP map levels (other than 0) with GGX using BRDF importance sampling. abstract public void FilterCubemap(CommandBuffer cmd, Texture source, RenderTexture target); - public void FilterPlanarTexture(CommandBuffer cmd, RenderTexture source, RenderTexture target) + internal struct PlanarTextureFilteringParameters { - m_MipGenerator.RenderColorGaussianPyramid(cmd, new Vector2Int(source.width, source.height), source, target); - } + // Depth buffer (oblique) that was produced + public RenderTexture captureCameraDepthBuffer; + // Inverse view projection matrix (oblique) + public Matrix4x4 captureCameraIVP; + // View projection matrix (non oblique) + public Matrix4x4 captureCameraVP_NonOblique; + // Inverse view projection matrix (non oblique) + public Matrix4x4 captureCameraIVP_NonOblique; + // Position of the capture camera + public Vector3 captureCameraPosition; + // Resolution of the capture camera + public Vector4 captureCameraScreenSize; + // Position of the probe + public Vector3 probePosition; + // Normal of the reflection probe + public Vector3 probeNormal; + // FOV of the capture camera + public float captureFOV; + // Near clipping plane of the capture camera + public float captureNearPlane; + // Far clipping plane of the capture camera + public float captureFarPlane; + }; + abstract public void FilterPlanarTexture(CommandBuffer cmd, RenderTexture source, ref PlanarTextureFilteringParameters planarTextureFilteringParameters, RenderTexture target); public abstract void FilterCubemapMIS(CommandBuffer cmd, Texture source, RenderTexture target, RenderTexture conditionalCdf, RenderTexture marginalRowCdf); } } diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/LayeredLit/LayeredLitData.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/LayeredLit/LayeredLitData.hlsl index 501d6c8f3a4..9a60fc3c5f7 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/LayeredLit/LayeredLitData.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/LayeredLit/LayeredLitData.hlsl @@ -658,7 +658,8 @@ void GetSurfaceAndBuiltinData(FragInputs input, float3 V, inout PositionInputs p input.texCoord1 = ((_UVMappingMask0.y + _UVMappingMask1.y + _UVMappingMask2.y + _UVMappingMask3.y + _UVDetailsMappingMask0.y + _UVDetailsMappingMask1.y + _UVDetailsMappingMask2.y + _UVDetailsMappingMask3.y) > 0) ? input.texCoord1 : 0; #endif -#ifndef SHADER_STAGE_RAY_TRACING +// Don't dither if displaced tessellation (we're fading out the displacement instead to match the next LOD) +#if !defined(SHADER_STAGE_RAY_TRACING) && !defined(_TESSELLATION_DISPLACEMENT) #ifdef LOD_FADE_CROSSFADE // enable dithering LOD transition if user select CrossFade transition in LOD group LODDitheringTransition(ComputeFadeMaskSeed(V, posInput.positionSS), unity_LODFade.x); #endif diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/LayeredLit/LayeredLitDataDisplacement.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/LayeredLit/LayeredLitDataDisplacement.hlsl index b408f25a983..2be0e26e622 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/LayeredLit/LayeredLitDataDisplacement.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/LayeredLit/LayeredLitDataDisplacement.hlsl @@ -307,6 +307,15 @@ float3 ComputePerVertexDisplacement(LayerTexCoord layerTexCoord, float4 vertexCo float height1 = (SAMPLE_UVMAPPING_TEXTURE2D_LOD(_HeightMap1, SAMPLER_HEIGHTMAP_IDX, layerTexCoord.base1, lod).r - _HeightCenter1) * _HeightAmplitude1; float height2 = (SAMPLE_UVMAPPING_TEXTURE2D_LOD(_HeightMap2, SAMPLER_HEIGHTMAP_IDX, layerTexCoord.base2, lod).r - _HeightCenter2) * _HeightAmplitude2; float height3 = (SAMPLE_UVMAPPING_TEXTURE2D_LOD(_HeightMap3, SAMPLER_HEIGHTMAP_IDX, layerTexCoord.base3, lod).r - _HeightCenter3) * _HeightAmplitude3; + + // Scale by lod factor to ensure tessellated displacement influence is fully removed by the time we transition LODs +#if defined(LOD_FADE_CROSSFADE) && defined(_TESSELLATION_DISPLACEMENT) + height0 *= unity_LODFade.x; + height1 *= unity_LODFade.x; + height2 *= unity_LODFade.x; + height3 *= unity_LODFade.x; +#endif + // Height is affected by tiling property and by object scale (depends on option). // Apply scaling from tiling properties (TexWorldScale and tiling from BaseColor) ApplyDisplacementTileScale(height0, height1, height2, height3); diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/Lit.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/Lit.hlsl index 629a99a22b7..89b05cde86c 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/Lit.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/Lit.hlsl @@ -1925,20 +1925,7 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext, float3 F = preLightData.specularFGD; - float iblMipLevel; - // TODO: We need to match the PerceptualRoughnessToMipmapLevel formula for planar, so we don't do this test (which is specific to our current lightloop) - // Specific case for Texture2Ds, their convolution is a gaussian one and not a GGX one - So we use another roughness mip mapping. - if (IsEnvIndexTexture2D(lightData.envIndex)) - { - // Empirical remapping - iblMipLevel = PlanarPerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness, _ColorPyramidLodCount); - } - else - { - iblMipLevel = PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness); - } - - float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, iblMipLevel, lightData.rangeCompressionFactorCompensation); + float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness), lightData.rangeCompressionFactorCompensation); weight *= preLD.a; // Used by planar reflection to discard pixel if (GPUImageBasedLightingType == GPUIMAGEBASEDLIGHTINGTYPE_REFLECTION) diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/LitData.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/LitData.hlsl index d7458ebba82..5d1d048151e 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/LitData.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/LitData.hlsl @@ -186,7 +186,8 @@ void GetSurfaceAndBuiltinData(FragInputs input, float3 V, inout PositionInputs p input.texCoord1 = (_UVMappingMask.y + _UVDetailsMappingMask.y) > 0 ? input.texCoord1 : 0; #endif -#if !defined(SHADER_STAGE_RAY_TRACING) +// Don't dither if displaced tessellation (we're fading out the displacement instead to match the next LOD) +#if !defined(SHADER_STAGE_RAY_TRACING) && !defined(_TESSELLATION_DISPLACEMENT) #ifdef LOD_FADE_CROSSFADE // enable dithering LOD transition if user select CrossFade transition in LOD group LODDitheringTransition(ComputeFadeMaskSeed(V, posInput.positionSS), unity_LODFade.x); #endif diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/LitDataDisplacement.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/LitDataDisplacement.hlsl index d5203b67196..b2ccd968abf 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/LitDataDisplacement.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/LitDataDisplacement.hlsl @@ -202,6 +202,12 @@ float3 ComputePerVertexDisplacement(LayerTexCoord layerTexCoord, float4 vertexCo { #ifdef _HEIGHTMAP float height = (SAMPLE_UVMAPPING_TEXTURE2D_LOD(_HeightMap, sampler_HeightMap, layerTexCoord.base, lod).r - _HeightCenter) * _HeightAmplitude; + + // Scale by lod factor to ensure tessellated displacement influence is fully removed by the time we transition LODs +#if defined(LOD_FADE_CROSSFADE) && defined(_TESSELLATION_DISPLACEMENT) + height *= unity_LODFade.x; +#endif + #else float height = 0.0; #endif diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/SimpleLit.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/SimpleLit.hlsl index 555893a975e..9144a53b969 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/SimpleLit.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/SimpleLit.hlsl @@ -417,20 +417,7 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext, EvaluateLight_EnvIntersection(posInput.positionWS, bsdfData.normalWS, lightData, influenceShapeType, R, weight); - float iblMipLevel; - // TODO: We need to match the PerceptualRoughnessToMipmapLevel formula for planar, so we don't do this test (which is specific to our current lightloop) - // Specific case for Texture2Ds, their convolution is a gaussian one and not a GGX one - So we use another roughness mip mapping. - if (IsEnvIndexTexture2D(lightData.envIndex)) - { - // Empirical remapping - iblMipLevel = PlanarPerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness, _ColorPyramidLodCount); - } - else - { - iblMipLevel = PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness); - } - - float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, iblMipLevel, lightData.rangeCompressionFactorCompensation); + float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness), lightData.rangeCompressionFactorCompensation); weight *= preLD.a; // Used by planar reflection to discard pixel envLighting = F_Schlick(bsdfData.fresnel0, dot(bsdfData.normalWS, V)) * preLD.rgb; diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/StackLit/StackLit.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/StackLit/StackLit.hlsl index 6421ead5411..07e07bb258d 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Material/StackLit/StackLit.hlsl +++ b/com.unity.render-pipelines.high-definition/Runtime/Material/StackLit/StackLit.hlsl @@ -4377,20 +4377,8 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext, EvaluateLight_EnvIntersection(positionWS, normal, lightData, influenceShapeType, R[i], tempWeight[i]); - float iblMipLevel; - // TODO: We need to match the PerceptualRoughnessToMipmapLevel formula for planar, so we don't do this test (which is specific to our current lightloop) - // Specific case for Texture2Ds, their convolution is a gaussian one and not a GGX one - So we use another roughness mip mapping. - if (IsEnvIndexTexture2D(lightData.envIndex)) - { - // Empirical remapping - iblMipLevel = PlanarPerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness[i], _ColorPyramidLodCount); - } - else - { - iblMipLevel = PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness[i]); - } + float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R[i], PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness[i]), lightData.rangeCompressionFactorCompensation); - float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R[i], iblMipLevel, lightData.rangeCompressionFactorCompensation); // Used by planar reflection to discard pixel: tempWeight[i] *= preLD.a; diff --git a/com.unity.render-pipelines.high-definition/Runtime/PostProcessing/PostProcessSystem.cs b/com.unity.render-pipelines.high-definition/Runtime/PostProcessing/PostProcessSystem.cs index 624c9db68cd..b3e5e94eb36 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/PostProcessing/PostProcessSystem.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/PostProcessing/PostProcessSystem.cs @@ -2725,7 +2725,11 @@ static void DoColorGrading( in ColorGradingParameters parameters, // Generate the lut // See the note about Metal & Intel in LutBuilder3D.compute - builderCS.GetKernelThreadGroupSizes(builderKernel, out uint threadX, out uint threadY, out uint threadZ); + // GetKernelThreadGroupSizes is currently broken on some binary versions. + //builderCS.GetKernelThreadGroupSizes(builderKernel, out uint threadX, out uint threadY, out uint threadZ); + uint threadX = 4; + uint threadY = 4; + uint threadZ = 4; cmd.DispatchCompute(builderCS, builderKernel, (int)((parameters.lutSize + threadX - 1u) / threadX), (int)((parameters.lutSize + threadY - 1u) / threadY), diff --git a/com.unity.render-pipelines.high-definition/Runtime/PostProcessing/Shaders/LutBuilder3D.compute b/com.unity.render-pipelines.high-definition/Runtime/PostProcessing/Shaders/LutBuilder3D.compute index de68f1e2701..b882684f898 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/PostProcessing/Shaders/LutBuilder3D.compute +++ b/com.unity.render-pipelines.high-definition/Runtime/PostProcessing/Shaders/LutBuilder3D.compute @@ -231,6 +231,7 @@ float3 Tonemap(float3 colorLinear) // allow anything higher than 256 threads. We'll use 4x4x4 then. // Ultimately it would nice to expose `maxTotalThreadsPerThreadgroup` for Metal... // Source: https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf +// It is important to keep this in sync with the group-size declared in PostProcessSystem.cs [numthreads(4,4,4)] void KBuild(uint3 dispatchThreadId : SV_DispatchThreadID) { diff --git a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.Prepass.cs b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.Prepass.cs index 05bc44ace30..240e0087e09 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.Prepass.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.Prepass.cs @@ -331,12 +331,18 @@ void SetupGBufferTargets(RenderGraph renderGraph, HDCamera hdCamera, GBufferPass passData.depthBuffer = builder.UseDepthBuffer(prepassOutput.depthBuffer, DepthAccess.ReadWrite); passData.gbufferRT[0] = builder.UseColorBuffer(sssBuffer, 0); passData.gbufferRT[1] = builder.UseColorBuffer(prepassOutput.normalBuffer, 1); + + FastMemoryDesc gbufferFastMemDesc; + gbufferFastMemDesc.inFastMemory = true; + gbufferFastMemDesc.residencyFraction = 1.0f; + gbufferFastMemDesc.flags = FastMemoryFlags.SpillTop; + // If we are in deferred mode and the SSR is enabled, we need to make sure that the second gbuffer is cleared given that we are using that information for clear coat selection bool clearGBuffer2 = clearGBuffer || hdCamera.IsSSREnabled(); passData.gbufferRT[2] = builder.UseColorBuffer(renderGraph.CreateTexture( - new TextureDesc(Vector2.one, true, true) { colorFormat = GraphicsFormat.R8G8B8A8_UNorm, clearBuffer = clearGBuffer2, clearColor = Color.clear, name = "GBuffer2" }, HDShaderIDs._GBufferTexture[2]), 2); + new TextureDesc(Vector2.one, true, true) { colorFormat = GraphicsFormat.R8G8B8A8_UNorm, clearBuffer = clearGBuffer2, clearColor = Color.clear, name = "GBuffer2", fastMemoryDesc = gbufferFastMemDesc }, HDShaderIDs._GBufferTexture[2]), 2); passData.gbufferRT[3] = builder.UseColorBuffer(renderGraph.CreateTexture( - new TextureDesc(Vector2.one, true, true) { colorFormat = Builtin.GetLightingBufferFormat(), clearBuffer = clearGBuffer, clearColor = Color.clear, name = "GBuffer3" }, HDShaderIDs._GBufferTexture[3]), 3); + new TextureDesc(Vector2.one, true, true) { colorFormat = Builtin.GetLightingBufferFormat(), clearBuffer = clearGBuffer, clearColor = Color.clear, name = "GBuffer3", fastMemoryDesc = gbufferFastMemDesc }, HDShaderIDs._GBufferTexture[3]), 3); prepassOutput.gbuffer.lightLayersTextureIndex = -1; int currentIndex = 4; diff --git a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.RenderGraph.cs b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.RenderGraph.cs index 2545d5366b8..f1f3a6f4bfa 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.RenderGraph.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.RenderGraph.cs @@ -982,6 +982,11 @@ void RenderDistortion( RenderGraph renderGraph, TextureHandle CreateColorBuffer(RenderGraph renderGraph, HDCamera hdCamera, bool msaa) { + FastMemoryDesc colorFastMemDesc; + colorFastMemDesc.inFastMemory = true; + colorFastMemDesc.residencyFraction = 1.0f; + colorFastMemDesc.flags = FastMemoryFlags.SpillTop; + return renderGraph.CreateTexture( new TextureDesc(Vector2.one, true, true) { @@ -991,7 +996,8 @@ TextureHandle CreateColorBuffer(RenderGraph renderGraph, HDCamera hdCamera, bool enableMSAA = msaa, clearBuffer = NeedClearColorBuffer(hdCamera), clearColor = GetColorBufferClearColor(hdCamera), - name = msaa ? "CameraColorMSAA" : "CameraColor" + name = msaa ? "CameraColorMSAA" : "CameraColor", + fastMemoryDesc = colorFastMemDesc }); } diff --git a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.cs b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.cs index a4f08a4cc44..65fb9f0c4ef 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.cs @@ -647,13 +647,35 @@ void ValidateResources() #endif - /// - /// Resets the reference size of the internal RTHandle System. - /// This allows users to reduce the memory footprint of render textures after doing a super sampled rendering pass for example. - /// - /// New width of the internal RTHandle System. - /// New height of the internal RTHandle System. - public void ResetRTHandleReferenceSize(int width, int height) + internal void SwitchRenderTargetsToFastMem(CommandBuffer cmd, HDCamera camera) + { + // Color and normal buffer will always be in fast memory + m_CameraColorBuffer.SwitchToFastMemory(cmd, residencyFraction: 1.0f, FastMemoryFlags.SpillTop, copyContents: false); + m_SharedRTManager.GetNormalBuffer().SwitchToFastMemory(cmd, residencyFraction: 1.0f, FastMemoryFlags.SpillTop, copyContents: false); + // Following might need to change depending on context... TODO: Do a deep investigation of projects we have to check what is the most beneficial. + RenderPipelineSettings settings = m_Asset.currentPlatformRenderPipelineSettings; + + if (settings.supportedLitShaderMode != RenderPipelineSettings.SupportedLitShaderMode.ForwardOnly) + { + // Switch gbuffers to fast memory when we are in deferred + var buffers = m_GbufferManager.GetBuffers(); + foreach (var buffer in buffers) + { + buffer.SwitchToFastMemory(cmd, residencyFraction: 1.0f, FastMemoryFlags.SpillTop, copyContents: false); + } + } + + // Trying to fit the depth pyramid + m_SharedRTManager.GetDepthTexture().SwitchToFastMemory(cmd, residencyFraction: 1.0f, FastMemoryFlags.SpillTop, false); + } + + /// + /// Resets the reference size of the internal RTHandle System. + /// This allows users to reduce the memory footprint of render textures after doing a super sampled rendering pass for example. + /// + /// New width of the internal RTHandle System. + /// New height of the internal RTHandle System. + public void ResetRTHandleReferenceSize(int width, int height) { RTHandles.ResetReferenceSize(width, height); HDCamera.ResetAllHistoryRTHandleSystems(width, height); @@ -1403,6 +1425,7 @@ public struct Target public RenderTargetIdentifier id; public CubemapFace face; public RenderTexture copyToTarget; + public RenderTexture targetDepth; } public HDCamera hdCamera; public bool clearCameraSettings; @@ -1806,6 +1829,10 @@ ref List renderDatas { visibleProbe.SetTexture(ProbeSettings.Mode.Realtime, HDRenderUtilities.CreatePlanarProbeRenderTarget(desiredPlanarProbeSize)); } + if (visibleProbe.realtimeDepthTexture == null || visibleProbe.realtimeDepthTexture.width != desiredPlanarProbeSize) + { + visibleProbe.SetDepthTexture(ProbeSettings.Mode.Realtime, HDRenderUtilities.CreatePlanarProbeDepthRenderTarget(desiredPlanarProbeSize)); + } // Set the viewer's camera as the default camera anchor for (var i = 0; i < cameraSettings.Count; ++i) { @@ -1936,6 +1963,7 @@ ref _cullingResults request.target = new RenderRequest.Target { id = visibleProbe.realtimeTexture, + targetDepth = visibleProbe.realtimeDepthTexture, face = CubemapFace.Unknown }; } @@ -2176,6 +2204,12 @@ AOVRequestData aovRequest hdCamera.BeginRender(cmd); m_CurrentHDCamera = hdCamera; + // Render graph deals with Fast memory support in an automatic way. + if(!m_EnableRenderGraph) + { + SwitchRenderTargetsToFastMem(cmd, hdCamera); + } + if (m_RayTracingSupported) { // This call need to happen once per camera @@ -2818,11 +2852,15 @@ void Callback(CommandBuffer c, HDCamera cam) using (new ProfilingScope(cmd, ProfilingSampler.Get(HDProfileId.BlitToFinalRTDevBuildOnly))) { - for (int viewIndex = 0; viewIndex < hdCamera.viewCount; ++viewIndex) - { - var finalBlitParams = PrepareFinalBlitParameters(hdCamera, viewIndex); - BlitFinalCameraTexture(finalBlitParams, m_BlitPropertyBlock, m_IntermediateAfterPostProcessBuffer, target.id, cmd); - } + for (int viewIndex = 0; viewIndex < hdCamera.viewCount; ++viewIndex) + { + var finalBlitParams = PrepareFinalBlitParameters(hdCamera, viewIndex); + BlitFinalCameraTexture(finalBlitParams, m_BlitPropertyBlock, m_IntermediateAfterPostProcessBuffer, target.id, cmd); + + // If a depth target is specified, fill it + if (target.targetDepth != null) + BlitFinalCameraTexture(finalBlitParams, m_BlitPropertyBlock, m_SharedRTManager.GetDepthTexture(), target.targetDepth, cmd); + } } aovRequest.PushCameraTexture(cmd, AOVBuffers.Output, hdCamera, m_IntermediateAfterPostProcessBuffer, aovBuffers); diff --git a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDStringConstants.cs b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDStringConstants.cs index b8c4858fec1..a7a11ebbf88 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDStringConstants.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDStringConstants.cs @@ -252,6 +252,29 @@ static class HDShaderIDs public static readonly int _CameraFilteringBuffer = Shader.PropertyToID("_CameraFilteringTexture"); public static readonly int _IrradianceSource = Shader.PropertyToID("_IrradianceSource"); + // Planar reflection filtering + public static readonly int _ReflectionColorMipChain = Shader.PropertyToID("_ReflectionColorMipChain"); + public static readonly int _DepthTextureMipChain = Shader.PropertyToID("_DepthTextureMipChain"); + public static readonly int _ReflectionPlaneNormal = Shader.PropertyToID("_ReflectionPlaneNormal"); + public static readonly int _ReflectionPlanePosition = Shader.PropertyToID("_ReflectionPlanePosition"); + public static readonly int _FilteredPlanarReflectionBuffer = Shader.PropertyToID("_FilteredPlanarReflectionBuffer"); + public static readonly int _HalfResReflectionBuffer = Shader.PropertyToID("_HalfResReflectionBuffer"); + public static readonly int _HalfResDepthBuffer = Shader.PropertyToID("_HalfResDepthBuffer"); + public static readonly int _CaptureBaseScreenSize = Shader.PropertyToID("_CaptureBaseScreenSize"); + public static readonly int _CaptureCurrentScreenSize = Shader.PropertyToID("_CaptureCurrentScreenSize"); + public static readonly int _CaptureCameraIVP = Shader.PropertyToID("_CaptureCameraIVP"); + public static readonly int _CaptureCameraPositon = Shader.PropertyToID("_CaptureCameraPositon"); + public static readonly int _SourceMipIndex = Shader.PropertyToID("_SourceMipIndex"); + public static readonly int _MaxMipLevels = Shader.PropertyToID("_MaxMipLevels"); + public static readonly int _ThetaValuesTexture = Shader.PropertyToID("_ThetaValuesTexture"); + public static readonly int _CaptureCameraFOV = Shader.PropertyToID("_CaptureCameraFOV"); + public static readonly int _RTScaleFactor = Shader.PropertyToID("_RTScaleFactor"); + public static readonly int _CaptureCameraVP_NO = Shader.PropertyToID("_CaptureCameraVP_NO"); + public static readonly int _CaptureCameraFarPlane = Shader.PropertyToID("_CaptureCameraFarPlane"); + public static readonly int _DepthTextureOblique = Shader.PropertyToID("_DepthTextureOblique"); + public static readonly int _DepthTextureNonOblique = Shader.PropertyToID("_DepthTextureNonOblique"); + public static readonly int _CaptureCameraIVP_NO = Shader.PropertyToID("_CaptureCameraIVP_NO"); + // MSAA shader properties public static readonly int _ColorTextureMS = Shader.PropertyToID("_ColorTextureMS"); public static readonly int _DepthTextureMS = Shader.PropertyToID("_DepthTextureMS"); diff --git a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/RenderPipelineResources.cs b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/RenderPipelineResources.cs index f0c7d767cca..87b1b915415 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/RenderPipelineResources.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/RenderPipelineResources.cs @@ -145,7 +145,8 @@ public sealed class ShaderResources public ComputeShader inScatteredRadiancePrecomputationCS; [Reload("Runtime/Sky/PhysicallyBasedSky/PhysicallyBasedSky.shader")] public Shader physicallyBasedSkyPS; - + [Reload("Runtime/Lighting/PlanarReflectionFiltering.compute")] + public ComputeShader planarReflectionFilteringCS; // Material [Reload("Runtime/Material/PreIntegratedFGD/PreIntegratedFGD_GGXDisneyDiffuse.shader")] public Shader preIntegratedFGD_GGXDisneyDiffusePS; diff --git a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/Settings/FrameSettings.cs b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/Settings/FrameSettings.cs index df665026da4..b7c81cc20bf 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/Settings/FrameSettings.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/Settings/FrameSettings.cs @@ -767,6 +767,8 @@ internal static void Sanitize(ref FrameSettings sanitizedFrameSettings, Camera c // Object motion vector are disabled if motion vector are disabled sanitizedFrameSettings.bitDatas[(int)FrameSettingsField.ObjectMotionVectors] &= motionVector && !preview; + sanitizedFrameSettings.bitDatas[(int)FrameSettingsField.TransparentsWriteMotionVector] &= motionVector && !preview; + sanitizedFrameSettings.bitDatas[(int)FrameSettingsField.Decals] &= renderPipelineSettings.supportDecals && !preview; sanitizedFrameSettings.bitDatas[(int)FrameSettingsField.TransparentPostpass] &= renderPipelineSettings.supportTransparentDepthPostpass && !preview; sanitizedFrameSettings.bitDatas[(int)FrameSettingsField.Distortion] &= renderPipelineSettings.supportDistortion && !msaa && !preview; diff --git a/com.unity.render-pipelines.high-definition/Runtime/RenderPipelineResources/HDRenderPipelineResources.asset b/com.unity.render-pipelines.high-definition/Runtime/RenderPipelineResources/HDRenderPipelineResources.asset index 4ebc10b45cf..e8f5c832006 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/RenderPipelineResources/HDRenderPipelineResources.asset +++ b/com.unity.render-pipelines.high-definition/Runtime/RenderPipelineResources/HDRenderPipelineResources.asset @@ -108,6 +108,8 @@ MonoBehaviour: type: 3} physicallyBasedSkyPS: {fileID: 4800000, guid: a06934a4863e778498be65d8f865b7a4, type: 3} + planarReflectionFilteringCS: {fileID: 7200000, guid: 9f3f8a01b8caaaa4595591dc96d43dd2, + type: 3} preIntegratedFGD_GGXDisneyDiffusePS: {fileID: 4800000, guid: 123f13d52852ef547b2962de4bd9eaad, type: 3} preIntegratedFGD_CharlieFabricLambertPS: {fileID: 4800000, guid: 3b3bf235775cf8b4baae7f3306787ab0, @@ -309,6 +311,7 @@ MonoBehaviour: SMAASearchTex: {fileID: 2800000, guid: dc95d70472e232b438d0fd38651e7ec2, type: 3} SMAAAreaTex: {fileID: 2800000, guid: 92e0d85ab4eca874098e7fcf6f8f674e, type: 3} defaultHDRISky: {fileID: 8900000, guid: 8253d41e6e8b11a4cbe77a4f8f82934d, type: 3} + ggxConeAngle70pc: {fileID: 2800000, guid: 794081635e7e0fe46b6c3fa0afa70d87, type: 3} assets: defaultDiffusionProfile: {fileID: 11400000, guid: 2b7005ba3a4d8474b8cdc34141ad766e, type: 2} diff --git a/com.unity.render-pipelines.high-definition/Runtime/ShaderLibrary/ShaderVariablesXR.cs b/com.unity.render-pipelines.high-definition/Runtime/ShaderLibrary/ShaderVariablesXR.cs index 6898a2fbcc3..a7c25738f1f 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/ShaderLibrary/ShaderVariablesXR.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/ShaderLibrary/ShaderVariablesXR.cs @@ -4,35 +4,35 @@ namespace UnityEngine.Rendering.HighDefinition [GenerateHLSL(needAccessors = false, generateCBuffer = true, constantRegister = (int)ConstantRegister.XR)] unsafe struct ShaderVariablesXR { - [HLSLArray((int)ShaderOptions.XrMaxViews, typeof(Matrix4x4))] - public fixed float _XRViewMatrix[(int)ShaderOptions.XrMaxViews * 16]; - [HLSLArray((int)ShaderOptions.XrMaxViews, typeof(Matrix4x4))] - public fixed float _XRInvViewMatrix[(int)ShaderOptions.XrMaxViews * 16]; - [HLSLArray((int)ShaderOptions.XrMaxViews, typeof(Matrix4x4))] - public fixed float _XRProjMatrix[(int)ShaderOptions.XrMaxViews * 16]; - [HLSLArray((int)ShaderOptions.XrMaxViews, typeof(Matrix4x4))] - public fixed float _XRInvProjMatrix[(int)ShaderOptions.XrMaxViews * 16]; - [HLSLArray((int)ShaderOptions.XrMaxViews, typeof(Matrix4x4))] - public fixed float _XRViewProjMatrix[(int)ShaderOptions.XrMaxViews * 16]; - [HLSLArray((int)ShaderOptions.XrMaxViews, typeof(Matrix4x4))] - public fixed float _XRInvViewProjMatrix[(int)ShaderOptions.XrMaxViews * 16]; - [HLSLArray((int)ShaderOptions.XrMaxViews, typeof(Matrix4x4))] - public fixed float _XRNonJitteredViewProjMatrix[(int)ShaderOptions.XrMaxViews * 16]; - [HLSLArray((int)ShaderOptions.XrMaxViews, typeof(Matrix4x4))] - public fixed float _XRPrevViewProjMatrix[(int)ShaderOptions.XrMaxViews * 16]; - [HLSLArray((int)ShaderOptions.XrMaxViews, typeof(Matrix4x4))] - public fixed float _XRPrevInvViewProjMatrix[(int)ShaderOptions.XrMaxViews * 16]; - [HLSLArray((int)ShaderOptions.XrMaxViews, typeof(Matrix4x4))] - public fixed float _XRPrevViewProjMatrixNoCameraTrans[(int)ShaderOptions.XrMaxViews * 16]; - [HLSLArray((int)ShaderOptions.XrMaxViews, typeof(Matrix4x4))] - public fixed float _XRPixelCoordToViewDirWS[(int)ShaderOptions.XrMaxViews * 16]; + [HLSLArray(ShaderConfig.k_XRMaxViewsForCBuffer, typeof(Matrix4x4))] + public fixed float _XRViewMatrix[ShaderConfig.k_XRMaxViewsForCBuffer * 16]; + [HLSLArray(ShaderConfig.k_XRMaxViewsForCBuffer, typeof(Matrix4x4))] + public fixed float _XRInvViewMatrix[ShaderConfig.k_XRMaxViewsForCBuffer * 16]; + [HLSLArray(ShaderConfig.k_XRMaxViewsForCBuffer, typeof(Matrix4x4))] + public fixed float _XRProjMatrix[ShaderConfig.k_XRMaxViewsForCBuffer * 16]; + [HLSLArray(ShaderConfig.k_XRMaxViewsForCBuffer, typeof(Matrix4x4))] + public fixed float _XRInvProjMatrix[ShaderConfig.k_XRMaxViewsForCBuffer * 16]; + [HLSLArray(ShaderConfig.k_XRMaxViewsForCBuffer, typeof(Matrix4x4))] + public fixed float _XRViewProjMatrix[ShaderConfig.k_XRMaxViewsForCBuffer * 16]; + [HLSLArray(ShaderConfig.k_XRMaxViewsForCBuffer, typeof(Matrix4x4))] + public fixed float _XRInvViewProjMatrix[ShaderConfig.k_XRMaxViewsForCBuffer * 16]; + [HLSLArray(ShaderConfig.k_XRMaxViewsForCBuffer, typeof(Matrix4x4))] + public fixed float _XRNonJitteredViewProjMatrix[ShaderConfig.k_XRMaxViewsForCBuffer * 16]; + [HLSLArray(ShaderConfig.k_XRMaxViewsForCBuffer, typeof(Matrix4x4))] + public fixed float _XRPrevViewProjMatrix[ShaderConfig.k_XRMaxViewsForCBuffer * 16]; + [HLSLArray(ShaderConfig.k_XRMaxViewsForCBuffer, typeof(Matrix4x4))] + public fixed float _XRPrevInvViewProjMatrix[ShaderConfig.k_XRMaxViewsForCBuffer * 16]; + [HLSLArray(ShaderConfig.k_XRMaxViewsForCBuffer, typeof(Matrix4x4))] + public fixed float _XRPrevViewProjMatrixNoCameraTrans[ShaderConfig.k_XRMaxViewsForCBuffer * 16]; + [HLSLArray(ShaderConfig.k_XRMaxViewsForCBuffer, typeof(Matrix4x4))] + public fixed float _XRPixelCoordToViewDirWS[ShaderConfig.k_XRMaxViewsForCBuffer * 16]; - [HLSLArray((int)ShaderOptions.XrMaxViews, typeof(Vector4))] - public fixed float _XRWorldSpaceCameraPos[(int)ShaderOptions.XrMaxViews * 4]; - [HLSLArray((int)ShaderOptions.XrMaxViews, typeof(Vector4))] - public fixed float _XRWorldSpaceCameraPosViewOffset[(int)ShaderOptions.XrMaxViews * 4]; - [HLSLArray((int)ShaderOptions.XrMaxViews, typeof(Vector4))] - public fixed float _XRPrevWorldSpaceCameraPos[(int)ShaderOptions.XrMaxViews * 4]; + [HLSLArray(ShaderConfig.k_XRMaxViewsForCBuffer, typeof(Vector4))] + public fixed float _XRWorldSpaceCameraPos[ShaderConfig.k_XRMaxViewsForCBuffer * 4]; + [HLSLArray(ShaderConfig.k_XRMaxViewsForCBuffer, typeof(Vector4))] + public fixed float _XRWorldSpaceCameraPosViewOffset[ShaderConfig.k_XRMaxViewsForCBuffer * 4]; + [HLSLArray(ShaderConfig.k_XRMaxViewsForCBuffer, typeof(Vector4))] + public fixed float _XRPrevWorldSpaceCameraPos[ShaderConfig.k_XRMaxViewsForCBuffer * 4]; } } diff --git a/com.unity.render-pipelines.high-definition/Runtime/Utilities/HDRenderUtilities.cs b/com.unity.render-pipelines.high-definition/Runtime/Utilities/HDRenderUtilities.cs index 1cfa26a673e..c6c19aaaa1e 100644 --- a/com.unity.render-pipelines.high-definition/Runtime/Utilities/HDRenderUtilities.cs +++ b/com.unity.render-pipelines.high-definition/Runtime/Utilities/HDRenderUtilities.cs @@ -365,6 +365,17 @@ public static RenderTexture CreatePlanarProbeRenderTarget(int planarSize) }; } + public static RenderTexture CreatePlanarProbeDepthRenderTarget(int planarSize) + { + return new RenderTexture(planarSize, planarSize, 1, GraphicsFormat.R32_SFloat) + { + dimension = TextureDimension.Tex2D, + enableRandomWrite = true, + useMipMap = true, + autoGenerateMips = false + }; + } + /// /// Create the texture target for a baked reflection probe. ///