diff --git a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/LinuxEditor/Vulkan/None/2203_PlanarProbes.png b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/LinuxEditor/Vulkan/None/2203_PlanarProbes.png
index fdb71657a76..468a8bccd4f 100644
--- a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/LinuxEditor/Vulkan/None/2203_PlanarProbes.png
+++ b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/LinuxEditor/Vulkan/None/2203_PlanarProbes.png
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:8b9468ad4eaf403e3d7ece2949564efe9b7bc4b9151cbf75e645abb12b711eb4
-size 287818
+oid sha256:fbb807957524dba90475ee0fd8dadc4482a99bb171fdc141395a4f7a6554c59f
+size 250643
diff --git a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/OSXEditor/Metal/None/2203_PlanarProbes.png b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/OSXEditor/Metal/None/2203_PlanarProbes.png
index 9f3de7e29d3..9d3392e8eb7 100644
--- a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/OSXEditor/Metal/None/2203_PlanarProbes.png
+++ b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/OSXEditor/Metal/None/2203_PlanarProbes.png
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:7b6dc89292419a8e6fa1b16df9d87786737834c5f8205d7e88d028f66f1ba215
-size 249122
+oid sha256:1fa30284b8cfd475c55be0d839fc7d68f58b1222f2297050563d50d500a27988
+size 253523
diff --git a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2203_PlanarProbes.png b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2203_PlanarProbes.png
index 165ef79ea3c..b6fdb8c4797 100644
--- a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2203_PlanarProbes.png
+++ b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2203_PlanarProbes.png
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:60c1060c81f32c243029cdbec19c825ae19b8316d8973d3035b249109f29fcdc
-size 256035
+oid sha256:05fa4c6ce0cd88b7554d0a3b57fe4301d832b8bcb19caeb1e0bfd8eab9ab18da
+size 250503
diff --git a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2501_LightLayers.png b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2501_LightLayers.png
index c4c6d4d2f1d..47463655def 100644
--- a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2501_LightLayers.png
+++ b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D11/None/2501_LightLayers.png
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:8908b4238dc180c24f4ccdb465034610ca4801a048997ce060fb9690977498a9
-size 120500
+oid sha256:45d38b91f49dfec90d7c2672cc67ccc7ea51baeb146c9122876909ac047e7aeb
+size 120529
diff --git a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D12/None/2203_PlanarProbes.png b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D12/None/2203_PlanarProbes.png
index 9381fcaa296..b6fdb8c4797 100644
--- a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D12/None/2203_PlanarProbes.png
+++ b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Direct3D12/None/2203_PlanarProbes.png
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:9e6e6ea9401358eda14f0242daabfdd89e75005034437fa20435322ecf169b20
-size 287739
+oid sha256:05fa4c6ce0cd88b7554d0a3b57fe4301d832b8bcb19caeb1e0bfd8eab9ab18da
+size 250503
diff --git a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Vulkan/None/2203_PlanarProbes.png b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Vulkan/None/2203_PlanarProbes.png
index 6548a948b05..43ef90b0a39 100644
--- a/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Vulkan/None/2203_PlanarProbes.png
+++ b/TestProjects/HDRP_Tests/Assets/ReferenceImages/Linear/WindowsEditor/Vulkan/None/2203_PlanarProbes.png
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:977995200e0b81238a2c98040f6e2f467dc34a5aa8aa24a5ba929fc370ac3fac
-size 256309
+oid sha256:4321afcd6d2dbdc4b35f1efb6ad1a9fd80bbac1a8c2f7d7cd1c8811703d15cb8
+size 250643
diff --git a/com.unity.render-pipelines.core/ShaderLibrary/GeometricTools.hlsl b/com.unity.render-pipelines.core/ShaderLibrary/GeometricTools.hlsl
index 5868d98babd..a84aeda1098 100644
--- a/com.unity.render-pipelines.core/ShaderLibrary/GeometricTools.hlsl
+++ b/com.unity.render-pipelines.core/ShaderLibrary/GeometricTools.hlsl
@@ -132,6 +132,23 @@ float3 IntersectRayPlane(float3 rayOrigin, float3 rayDirection, float3 planeOrig
return rayOrigin + rayDirection * dist;
}
+// Same as above but return intersection distance and true / false if the ray hit/miss
+bool IntersectRayPlane(float3 rayOrigin, float3 rayDirection, float3 planePosition, float3 planeNormal, out float t)
+{
+ bool res = false;
+ t = -1.0;
+
+ float denom = dot(planeNormal, rayDirection);
+ if (abs(denom) > 1e-5)
+ {
+ float3 d = planePosition - rayOrigin;
+ t = dot(d, planeNormal) / denom;
+ res = (t >= 0);
+ }
+
+ return res;
+}
+
// Can support cones with an elliptic base: pre-scale 'coneAxisX' and 'coneAxisY' by (h/r_x) and (h/r_y).
// Returns parametric distances 'tEntr' and 'tExit' along the ray,
// subject to constraints 'tMin' and 'tMax'.
diff --git a/com.unity.render-pipelines.core/ShaderLibrary/ImageBasedLighting.hlsl b/com.unity.render-pipelines.core/ShaderLibrary/ImageBasedLighting.hlsl
index 4f19e17fa7e..8f0eaabe5d0 100644
--- a/com.unity.render-pipelines.core/ShaderLibrary/ImageBasedLighting.hlsl
+++ b/com.unity.render-pipelines.core/ShaderLibrary/ImageBasedLighting.hlsl
@@ -32,12 +32,6 @@ real PerceptualRoughnessToMipmapLevel(real perceptualRoughness)
return PerceptualRoughnessToMipmapLevel(perceptualRoughness, UNITY_SPECCUBE_LOD_STEPS);
}
-// Mapping for convolved Texture2D, this is an empirical remapping to match GGX version of cubemap convolution
-real PlanarPerceptualRoughnessToMipmapLevel(real perceptualRoughness, uint mipMapcount)
-{
- return PositivePow(perceptualRoughness, 0.8) * uint(max(mipMapcount - 1, 0));
-}
-
// The *accurate* version of the non-linear remapping. It works by
// approximating the cone of the specular lobe, and then computing the MIP map level
// which (approximately) covers the footprint of the lobe with a single texel.
diff --git a/com.unity.render-pipelines.high-definition/CHANGELOG.md b/com.unity.render-pipelines.high-definition/CHANGELOG.md
index f8283a25bdd..1eb63baeed7 100644
--- a/com.unity.render-pipelines.high-definition/CHANGELOG.md
+++ b/com.unity.render-pipelines.high-definition/CHANGELOG.md
@@ -788,6 +788,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- DXR: Only read the geometric attributes that are required using the share pass info and shader graph defines.
- DXR: Dispatch binned rays in 1D instead of 2D.
- Lit and LayeredLit tessellation cross lod fade don't used dithering anymore between LOD but fade the tessellation height instead. Allow a smoother transition
+- Changed the way planar reflections are filtered in order to be a bit more "physically based".
## [7.1.1] - 2019-09-05
diff --git a/com.unity.render-pipelines.high-definition/Runtime/Lighting/LightLoop/LightLoop.cs b/com.unity.render-pipelines.high-definition/Runtime/Lighting/LightLoop/LightLoop.cs
index 0047970d137..ccfa707d79d 100644
--- a/com.unity.render-pipelines.high-definition/Runtime/Lighting/LightLoop/LightLoop.cs
+++ b/com.unity.render-pipelines.high-definition/Runtime/Lighting/LightLoop/LightLoop.cs
@@ -1788,7 +1788,44 @@ internal bool GetEnvLightData(CommandBuffer cmd, HDCamera hdCamera, in Processed
&& !hdCamera.frameSettings.IsEnabled(FrameSettingsField.PlanarProbe))
break;
- var scaleOffset = m_TextureCaches.reflectionPlanarProbeCache.FetchSlice(cmd, probe.texture, out int fetchIndex);
+ // Grab the render data that was used to render the probe
+ var renderData = planarProbe.renderData;
+ // Grab the world to camera matrix of the capture camera
+ var worldToCameraRHSMatrix = renderData.worldToCameraRHS;
+ // Grab the projection matrix that was used to render
+ var projectionMatrix = renderData.projectionMatrix;
+ // Build an alternative matrix for projection that is not oblique
+ var projectionMatrixNonOblique = Matrix4x4.Perspective(renderData.fieldOfView, probe.texture.width / probe.texture.height, probe.settings.cameraSettings.frustum.nearClipPlaneRaw, probe.settings.cameraSettings.frustum.farClipPlane);
+
+ // Convert the projection matrices to their GPU version
+ var gpuProj = GL.GetGPUProjectionMatrix(projectionMatrix, true);
+ var gpuProjNonOblique = GL.GetGPUProjectionMatrix(projectionMatrixNonOblique, true);
+
+ // Build the oblique and non oblique view projection matrices
+ var vp = gpuProj * worldToCameraRHSMatrix;
+ var vpNonOblique = gpuProjNonOblique * worldToCameraRHSMatrix;
+
+ // We need to collect the set of parameters required for the filtering
+ IBLFilterBSDF.PlanarTextureFilteringParameters planarTextureFilteringParameters = new IBLFilterBSDF.PlanarTextureFilteringParameters();
+ planarTextureFilteringParameters.probeNormal = Vector3.Normalize(hdCamera.camera.transform.position - renderData.capturePosition);
+ planarTextureFilteringParameters.probePosition = probe.gameObject.transform.position;
+ planarTextureFilteringParameters.captureCameraDepthBuffer = planarProbe.realtimeDepthTexture;
+ planarTextureFilteringParameters.captureCameraScreenSize = new Vector4(probe.texture.width, probe.texture.height, 1.0f / probe.texture.width, 1.0f / probe.texture.height);
+ planarTextureFilteringParameters.captureCameraIVP = vp.inverse;
+ planarTextureFilteringParameters.captureCameraIVP_NonOblique = vpNonOblique.inverse;
+ planarTextureFilteringParameters.captureCameraVP_NonOblique = vpNonOblique;
+ planarTextureFilteringParameters.captureCameraPosition = renderData.capturePosition;
+ planarTextureFilteringParameters.captureFOV = renderData.fieldOfView;
+ planarTextureFilteringParameters.captureNearPlane = probe.settings.cameraSettings.frustum.nearClipPlaneRaw;
+ planarTextureFilteringParameters.captureFarPlane = probe.settings.cameraSettings.frustum.farClipPlane;
+
+ // Fetch the slice and do the filtering
+ var scaleOffset = m_TextureCaches.reflectionPlanarProbeCache.FetchSlice(cmd, probe.texture, ref planarTextureFilteringParameters, out int fetchIndex);
+
+ // We don't need to provide the capture position
+ // It is already encoded in the 'worldToCameraRHSMatrix'
+ capturePosition = Vector3.zero;
+
// Indices start at 1, because -0 == 0, we can know from the bit sign which cache to use
envIndex = scaleOffset == Vector4.zero ? int.MinValue : -(fetchIndex + 1);
@@ -1800,19 +1837,7 @@ internal bool GetEnvLightData(CommandBuffer cmd, HDCamera hdCamera, in Processed
}
atlasScaleOffset = scaleOffset;
-
- var renderData = planarProbe.renderData;
- var worldToCameraRHSMatrix = renderData.worldToCameraRHS;
- var projectionMatrix = renderData.projectionMatrix;
-
- // We don't need to provide the capture position
- // It is already encoded in the 'worldToCameraRHSMatrix'
- capturePosition = Vector3.zero;
-
- // get the device dependent projection matrix
- var gpuProj = GL.GetGPUProjectionMatrix(projectionMatrix, true);
- var gpuView = worldToCameraRHSMatrix;
- var vp = gpuProj * gpuView;
+
m_TextureCaches.env2DAtlasScaleOffset[fetchIndex] = scaleOffset;
m_TextureCaches.env2DCaptureVP[fetchIndex] = vp;
diff --git a/com.unity.render-pipelines.high-definition/Runtime/Lighting/PlanarReflectionFiltering.compute b/com.unity.render-pipelines.high-definition/Runtime/Lighting/PlanarReflectionFiltering.compute
new file mode 100644
index 00000000000..426c8273d03
--- /dev/null
+++ b/com.unity.render-pipelines.high-definition/Runtime/Lighting/PlanarReflectionFiltering.compute
@@ -0,0 +1,194 @@
+#pragma kernel FilterPlanarReflection
+#pragma kernel DownScale
+#pragma kernel DepthConversion
+
+#pragma only_renderers d3d11 playstation xboxone vulkan metal switch
+// #pragma enable_d3d11_debug_symbols
+
+// The process is done in 3 steps. We start by converting the depth from oblique to regular frustum depth.
+// Then we build a mip chain of both the depth and the color. The depth is averaged in 2x2 and the color
+// is filtered in a wider neighborhood (otherwise we get too much artifacts) when doing the actual filtering.
+// The filtering estimates the pixel footprint of the blur based on the distance to the occluder, the roughness
+// of the current mip and the distance to the pixel. we then select the input from the right mip (the idea)
+// Is to avoid a 128x128 blur for the rougher values.
+
+// HDRP generic includes
+#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Common.hlsl"
+#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/GeometricTools.hlsl"
+#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/ImageBasedLighting.hlsl"
+#include "Packages/com.unity.render-pipelines.high-definition/Runtime/ShaderLibrary/ShaderVariables.hlsl"
+#include "Packages/com.unity.render-pipelines.high-definition/Runtime/Material/Material.hlsl"
+
+// Tile size of this compute
+#define PLANAR_REFLECTION_TILE_SIZE 8
+
+// Mip chain of depth and color
+TEXTURE2D(_DepthTextureMipChain);
+TEXTURE2D(_ReflectionColorMipChain);
+
+CBUFFER_START(ShaderVariablesPlanarReflectionFiltering)
+ // The screen size (width, height, 1.0 / width, 1.0 / height) that is produced by the capture
+ float4 _CaptureBaseScreenSize;
+ // The screen size (width, height, 1.0 / width, 1.0 / height) of the current level processed
+ float4 _CaptureCurrentScreenSize;
+ // Normal of the planar reflection plane
+ float3 _ReflectionPlaneNormal;
+ // World space position of the planar reflection (non camera relative)
+ float3 _ReflectionPlanePosition;
+ // FOV of the capture camera
+ float _CaptureCameraFOV;
+ // World space position of the capture camera (non camera relative)
+ float3 _CaptureCameraPositon;
+ // The mip index of the source data
+ uint _SourceMipIndex;
+ // Inverse view projection of the capture camera (oblique)
+ float4x4 _CaptureCameraIVP;
+ // Inverse view projection of the capture camera (non oblique)
+ float4x4 _CaptureCameraIVP_NO;
+ // View projection of the capture camera (non oblique)
+ float4x4 _CaptureCameraVP_NO;
+ // Given that sometimes our writing texture can be bigger than the current target, we need to apply a scale factor before using the sampling intrinsic
+ float _RTScaleFactor;
+ // Far plane of the capture camera
+ float _CaptureCameraFarPlane;
+ // The number of valid mips in the mip chain
+ uint _MaxMipLevels;
+CBUFFER_END
+
+// Output buffer of our filtering code
+RW_TEXTURE2D(float4, _FilteredPlanarReflectionBuffer);
+
+// These angles have been experimentally computed to match the result of reflection probes. Initially this was a table dependent on angle and roughness, but given that every planar has a
+// finite number of LODs and those LODS have fixed roughness and the angle changes the result, but not that much. I changed it to a per LOD LUT
+static const float reflectionProbeEquivalentAngles[UNITY_SPECCUBE_LOD_STEPS + 1] = {0.0, 0.04, 0.12, 0.4, 0.9, 1.2, 1.2};
+
+[numthreads(PLANAR_REFLECTION_TILE_SIZE, PLANAR_REFLECTION_TILE_SIZE, 1)]
+void FilterPlanarReflection(uint3 dispatchThreadId : SV_DispatchThreadID, uint2 groupThreadId : SV_GroupThreadID, uint2 groupId : SV_GroupID)
+{
+ UNITY_XR_ASSIGN_VIEW_INDEX(dispatchThreadId.z);
+
+ // Compute the pixel position to process
+ uint2 currentCoord = (uint2)(groupId * PLANAR_REFLECTION_TILE_SIZE + groupThreadId);
+
+ // Compute the coordinates that shall be used for sampling
+ float2 sampleCoords = (currentCoord << (int)(_SourceMipIndex)) * _CaptureBaseScreenSize.zw * _RTScaleFactor;
+
+ // Fetch the depth value for the current pixel.
+ float centerDepthValue = SAMPLE_TEXTURE2D_LOD(_DepthTextureMipChain, s_trilinear_clamp_sampler, sampleCoords, _SourceMipIndex).x;
+
+ // Compute the world position of the tapped pixel
+ PositionInputs centralPosInput = GetPositionInput(currentCoord, _CaptureCurrentScreenSize.zw, centerDepthValue, _CaptureCameraIVP_NO, 0, 0);
+
+ // Compute the direction to the reflection pixel
+ const float3 rayDirection = normalize(centralPosInput.positionWS - _CaptureCameraPositon);
+
+ // Compute the position on the plane we shall be integrating from
+ float t = -1.0;
+ if (!IntersectRayPlane(_CaptureCameraPositon, rayDirection, _ReflectionPlanePosition, _ReflectionPlaneNormal, t))
+ {
+ // If there is no plane intersection, there is nothing to filter (means that is a position that cannot be reflected)
+ _FilteredPlanarReflectionBuffer[currentCoord] = float4(0.0, 0.0, 0.0, 1.0);
+ return;
+ }
+
+ // Compute the integration position (position on the plane)
+ const float3 integrationPositionRWS = _CaptureCameraPositon + rayDirection * t;
+
+ // Evaluate the cone halfangle for the filtering
+ const float halfAngle = reflectionProbeEquivalentAngles[_SourceMipIndex];
+
+ // Compute the distances we need for our filtering
+ const float distanceCameraToPlane = length(integrationPositionRWS - _CaptureCameraPositon);
+ const float distancePlaneToObject = length(centralPosInput.positionWS - integrationPositionRWS);
+
+ // Compute the cone footprint on the image reflection plane for this configuration
+ const float brdfConeRadius = tan(halfAngle) * distancePlaneToObject;
+
+ // We need to compute the view cone radius
+ const float viewConeRadius = brdfConeRadius * distanceCameraToPlane / (distancePlaneToObject + distanceCameraToPlane);
+
+ // Compute the view cone's half angle. This matches the FOV angle to see exactly the half of the cone (The tangent could be precomputed in the table)
+ const float viewConeHalfAngle = FastATanPos(viewConeRadius / distanceCameraToPlane);
+ // Given the camera's fov and pixel resolution convert the viewConeHalfAngle to a number of pixels
+ const float pixelDistance = viewConeHalfAngle / _CaptureCameraFOV * _CaptureCurrentScreenSize.x;
+
+ // Convert this to a mip level shift starting from the mip 0
+ const float miplevel = log2(pixelDistance / 2);
+
+ // Because of the high level of aliasing that this algorithm causes, especially on the higher mips, we apply a mip bias during the sampling to try to hide it
+ const float mipBias = _SourceMipIndex > 3 ? lerp(0.0, 2.0, (_MaxMipLevels - _SourceMipIndex) / _MaxMipLevels) : 0.0;
+
+ // Read the integration color that we should take
+ const float3 integrationColor = SAMPLE_TEXTURE2D_LOD(_ReflectionColorMipChain, s_trilinear_clamp_sampler, sampleCoords, clamp(miplevel + _SourceMipIndex + mipBias, 0, _MaxMipLevels)).xyz;
+
+ // Write the output ray data
+ _FilteredPlanarReflectionBuffer[currentCoord] = float4(integrationColor, 1.0);
+}
+
+// Half resolution output texture for our mip chain build.
+RW_TEXTURE2D(float4, _HalfResReflectionBuffer);
+RW_TEXTURE2D(float, _HalfResDepthBuffer);
+
+[numthreads(PLANAR_REFLECTION_TILE_SIZE, PLANAR_REFLECTION_TILE_SIZE, 1)]
+void DownScale(uint3 dispatchThreadId : SV_DispatchThreadID, uint2 groupThreadId : SV_GroupThreadID, uint2 groupId : SV_GroupID)
+{
+ UNITY_XR_ASSIGN_VIEW_INDEX(dispatchThreadId.z);
+
+ // Compute the pixel position to process
+ int2 currentCoord = (int2)(groupId * PLANAR_REFLECTION_TILE_SIZE + groupThreadId);
+
+ // Unfortunately, we have to go wider than the simple 2x2 neighborhood or there is too much aliasing
+ float3 averageColor = 0.0;
+ float sumW = 0.0;
+ // In order to avoid a one pixel shift to the right, we need to center our down sample.
+ for (int y = -1; y <= 2; ++y)
+ {
+ for (int x = -1; x <= 2; ++x)
+ {
+ const int2 tapCoord = currentCoord * 2 + uint2(x, y);
+ // If the pixel is outside the current screen size, its weight becomes zero
+ float weight = tapCoord.x > _CaptureCurrentScreenSize.x || tapCoord.x < 0
+ || tapCoord.y > _CaptureCurrentScreenSize.y || tapCoord.y < 0 ? 0.0 : 1.0;
+ averageColor += LOAD_TEXTURE2D_LOD(_ReflectionColorMipChain, tapCoord, _SourceMipIndex).xyz * weight;
+ sumW += weight;
+ }
+ }
+ // Normalize and output
+ _HalfResReflectionBuffer[currentCoord] = float4(averageColor / sumW, 1.0);
+
+ // We average the 4 depths and move on
+ _HalfResDepthBuffer[currentCoord] = (LOAD_TEXTURE2D_LOD(_DepthTextureMipChain, currentCoord * 2, _SourceMipIndex).x
+ + LOAD_TEXTURE2D_LOD(_DepthTextureMipChain, currentCoord * 2 + uint2(0,1), _SourceMipIndex).x
+ + LOAD_TEXTURE2D_LOD(_DepthTextureMipChain, currentCoord * 2 + uint2(1,0), _SourceMipIndex).x
+ + LOAD_TEXTURE2D_LOD(_DepthTextureMipChain, currentCoord * 2 + uint2(1,1), _SourceMipIndex).x) * 0.25;
+}
+
+// Initial depth buffer (oblique)
+TEXTURE2D(_DepthTextureOblique);
+// Converted depth values (non oblique)
+RW_TEXTURE2D(float, _DepthTextureNonOblique);
+
+[numthreads(PLANAR_REFLECTION_TILE_SIZE, PLANAR_REFLECTION_TILE_SIZE, 1)]
+void DepthConversion(uint3 dispatchThreadId : SV_DispatchThreadID, uint2 groupThreadId : SV_GroupThreadID, uint2 groupId : SV_GroupID)
+{
+ UNITY_XR_ASSIGN_VIEW_INDEX(dispatchThreadId.z);
+
+ // Compute the pixel position to process
+ int2 currentCoord = (int2)(groupId * PLANAR_REFLECTION_TILE_SIZE + groupThreadId);
+
+ // Fetch the depth value for the current pixel. It would be great to use sample instead, but oblique matrices prevent us from doing it.
+ float centerDepthValue = LOAD_TEXTURE2D_LOD(_DepthTextureOblique, currentCoord, 0).x;
+
+ // Compute the world position of the tapped pixel
+ PositionInputs centralPosInput = GetPositionInput(currentCoord, _CaptureCurrentScreenSize.zw, centerDepthValue, _CaptureCameraIVP, 0, 0);
+
+ // For some reason, with oblique matrices, when the point is on the background the reconstructed position ends up behind the camera and at the wrong position
+ float3 rayDirection = normalize(_CaptureCameraPositon - centralPosInput.positionWS);
+ rayDirection = centerDepthValue == 0.0 ? -rayDirection : rayDirection;
+ // Adjust the position
+ centralPosInput.positionWS = centerDepthValue == 0.0 ? _CaptureCameraPositon + rayDirection * _CaptureCameraFarPlane : centralPosInput.positionWS;
+
+ // Re-do the projection, but this time without the oblique part and export it
+ float4 hClip = mul(_CaptureCameraVP_NO, float4(centralPosInput.positionWS, 1.0));
+ _DepthTextureNonOblique[currentCoord] = saturate(hClip.z / hClip.w);
+}
diff --git a/com.unity.render-pipelines.high-definition/Runtime/Lighting/PlanarReflectionFiltering.compute.meta b/com.unity.render-pipelines.high-definition/Runtime/Lighting/PlanarReflectionFiltering.compute.meta
new file mode 100644
index 00000000000..2277cb2d810
--- /dev/null
+++ b/com.unity.render-pipelines.high-definition/Runtime/Lighting/PlanarReflectionFiltering.compute.meta
@@ -0,0 +1,8 @@
+fileFormatVersion: 2
+guid: 9f3f8a01b8caaaa4595591dc96d43dd2
+ComputeShaderImporter:
+ externalObjects: {}
+ currentAPIMask: 4
+ userData:
+ assetBundleName:
+ assetBundleVariant:
diff --git a/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/HDProbe.cs b/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/HDProbe.cs
index 145981a32b3..c2a1babc366 100644
--- a/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/HDProbe.cs
+++ b/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/HDProbe.cs
@@ -118,6 +118,7 @@ float aspect
// Runtime Data
RenderTexture m_RealtimeTexture;
+ RenderTexture m_RealtimeDepthBuffer;
RenderData m_RealtimeRenderData;
bool m_WasRenderedSinceLastOnDemandRequest = true;
@@ -189,6 +190,12 @@ public RenderTexture realtimeTexture
set => m_RealtimeTexture = value;
}
+ public RenderTexture realtimeDepthTexture
+ {
+ get => m_RealtimeDepthBuffer;
+ set => m_RealtimeDepthBuffer = value;
+ }
+
///
/// The texture used during lighting for this probe.
///
@@ -231,6 +238,20 @@ public Texture SetTexture(ProbeSettings.Mode targetMode, Texture texture)
}
}
+ public Texture SetDepthTexture(ProbeSettings.Mode targetMode, Texture texture)
+ {
+ if (targetMode == ProbeSettings.Mode.Realtime && !(texture is RenderTexture))
+ throw new ArgumentException("'texture' must be a RenderTexture for the Realtime mode.");
+
+ switch (targetMode)
+ {
+ case ProbeSettings.Mode.Baked: return m_BakedTexture = texture;
+ case ProbeSettings.Mode.Custom: return m_CustomTexture = texture;
+ case ProbeSettings.Mode.Realtime: return m_RealtimeDepthBuffer = (RenderTexture)texture;
+ default: throw new ArgumentOutOfRangeException();
+ }
+ }
+
///
/// The render data of the last bake
///
diff --git a/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/PlanarReflectionProbeCache.cs b/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/PlanarReflectionProbeCache.cs
index e9e37cceb4a..1143e6db8fa 100644
--- a/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/PlanarReflectionProbeCache.cs
+++ b/com.unity.render-pipelines.high-definition/Runtime/Lighting/Reflection/PlanarReflectionProbeCache.cs
@@ -109,7 +109,7 @@ void ConvertTexture(CommandBuffer cmd, Texture input, RenderTexture target)
CoreUtils.DrawFullScreen(cmd, m_ConvertTextureMaterial, m_ConvertTextureMPB);
}
- Texture ConvolveProbeTexture(CommandBuffer cmd, Texture texture, out Vector4 sourceScaleOffset)
+ Texture ConvolveProbeTexture(CommandBuffer cmd, Texture texture, ref IBLFilterBSDF.PlanarTextureFilteringParameters planarTextureFilteringParameters, out Vector4 sourceScaleOffset)
{
// Probes can be either Cubemaps (for baked probes) or RenderTextures (for realtime probes)
Texture2D texture2D = texture as Texture2D;
@@ -158,12 +158,12 @@ Texture ConvolveProbeTexture(CommandBuffer cmd, Texture texture, out Vector4 sou
float scaleX = (float)texture.width / m_ConvolutionTargetTexture.width;
float scaleY = (float)texture.height / m_ConvolutionTargetTexture.height;
sourceScaleOffset = new Vector4(scaleX, scaleY, 0, 0);
- m_IBLFilterGGX.FilterPlanarTexture(cmd, convolutionSourceTexture, m_ConvolutionTargetTexture);
+ m_IBLFilterGGX.FilterPlanarTexture(cmd, convolutionSourceTexture, ref planarTextureFilteringParameters, m_ConvolutionTargetTexture);
return m_ConvolutionTargetTexture;
}
- public Vector4 FetchSlice(CommandBuffer cmd, Texture texture, out int fetchIndex)
+ public Vector4 FetchSlice(CommandBuffer cmd, Texture texture, ref IBLFilterBSDF.PlanarTextureFilteringParameters planarTextureFilteringParameters, out int fetchIndex)
{
Vector4 scaleOffset = Vector4.zero;
fetchIndex = m_FrameProbeIndex++;
@@ -172,17 +172,17 @@ public Vector4 FetchSlice(CommandBuffer cmd, Texture texture, out int fetchIndex
{
// If the texture is already in the atlas, we update it only if needed
if (NeedsUpdate(texture) || m_ProbeBakingState[scaleOffset] != ProbeFilteringState.Ready)
- if (!UpdatePlanarTexture(cmd, texture, ref scaleOffset))
+ if (!UpdatePlanarTexture(cmd, texture, ref planarTextureFilteringParameters, ref scaleOffset))
Debug.LogError("Can't convolve or update the planar reflection render target");
}
else // Either we add it to the atlas
- if (!UpdatePlanarTexture(cmd, texture, ref scaleOffset))
+ if (!UpdatePlanarTexture(cmd, texture, ref planarTextureFilteringParameters, ref scaleOffset))
Debug.LogError("No more space in the planar reflection probe atlas. To solve this issue, increase the size of the Planar Reflection Probe Atlas in the HDRP settings.");
return scaleOffset;
}
- bool UpdatePlanarTexture(CommandBuffer cmd, Texture texture, ref Vector4 scaleOffset)
+ bool UpdatePlanarTexture(CommandBuffer cmd, Texture texture, ref IBLFilterBSDF.PlanarTextureFilteringParameters planarTextureFilteringParameters, ref Vector4 scaleOffset)
{
bool success = false;
@@ -192,7 +192,7 @@ bool UpdatePlanarTexture(CommandBuffer cmd, Texture texture, ref Vector4 scaleOf
m_ProbeBakingState[scaleOffset] = ProbeFilteringState.Convolving;
Vector4 sourceScaleOffset;
- Texture convolvedTexture = ConvolveProbeTexture(cmd, texture, out sourceScaleOffset);
+ Texture convolvedTexture = ConvolveProbeTexture(cmd, texture, ref planarTextureFilteringParameters, out sourceScaleOffset);
if (convolvedTexture == null)
return false;
diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.hlsl
index 6163a043f09..bf318142059 100644
--- a/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.hlsl
+++ b/com.unity.render-pipelines.high-definition/Runtime/Material/AxF/AxF.hlsl
@@ -2374,23 +2374,6 @@ IndirectLighting EvaluateBSDF_ScreenspaceRefraction( LightLoopContext lightLo
//-----------------------------------------------------------------------------
// EvaluateBSDF_Env
// ----------------------------------------------------------------------------
-float GetEnvMipLevel(EnvLightData lightData, float iblPerceptualRoughness)
-{
- float iblMipLevel;
-
- // TODO: We need to match the PerceptualRoughnessToMipmapLevel formula for planar, so we don't do this test (which is specific to our current lightloop)
- // Specific case for Texture2Ds, their convolution is a gaussian one and not a GGX one - So we use another roughness mip mapping.
- if (IsEnvIndexTexture2D(lightData.envIndex))
- {
- // Empirical remapping
- iblMipLevel = PlanarPerceptualRoughnessToMipmapLevel(iblPerceptualRoughness, _ColorPyramidLodCount);
- }
- else
- {
- iblMipLevel = PerceptualRoughnessToMipmapLevel(iblPerceptualRoughness);
- }
- return iblMipLevel;
-}
float3 GetModifiedEnvSamplingDir(EnvLightData lightData, float3 N, float3 iblR, float iblPerceptualRoughness, float clampedNdotV)
{
@@ -2468,11 +2451,8 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext,
// bottom reflection is full. Lit doesn't have this problem too much in practice since only GetModifiedEnvSamplingDir
// changes the direction vs the coat.)
- float IBLMipLevel;
- IBLMipLevel = GetEnvMipLevel(lightData, preLightData.iblPerceptualRoughness);
-
// Sample the pre-integrated environment lighting
- float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, envSamplingDirForBottomLayer, IBLMipLevel, lightData.rangeCompressionFactorCompensation);
+ float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, envSamplingDirForBottomLayer, PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness), lightData.rangeCompressionFactorCompensation);
weight *= preLD.w; // Used by planar reflection to discard pixel
envLighting = GetSpecularIndirectDimmer() * preLightData.specularFGD * preLD.xyz;
@@ -2526,11 +2506,8 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext,
// Single lobe approach
// We computed an average mip level stored in preLightData.iblPerceptualRoughness that we use for all CT lobes
- float IBLMipLevel;
- IBLMipLevel = GetEnvMipLevel(lightData, preLightData.iblPerceptualRoughness);
-
// Sample the actual environment lighting
- float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, envSamplingDirForBottomLayer, IBLMipLevel, lightData.rangeCompressionFactorCompensation);
+ float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, envSamplingDirForBottomLayer, PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness), lightData.rangeCompressionFactorCompensation);
float3 envLighting;
envLighting = preLightData.specularCTFGDSingleLobe * GetSpecularIndirectDimmer();
diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/Eye/Eye.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/Eye/Eye.hlsl
index cb7a88e04a3..0cf77d6a9c1 100644
--- a/com.unity.render-pipelines.high-definition/Runtime/Material/Eye/Eye.hlsl
+++ b/com.unity.render-pipelines.high-definition/Runtime/Material/Eye/Eye.hlsl
@@ -787,20 +787,7 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext,
float3 F = preLightData.specularFGD;
- float iblMipLevel;
- // TODO: We need to match the PerceptualRoughnessToMipmapLevel formula for planar, so we don't do this test (which is specific to our current lightloop)
- // Specific case for Texture2Ds, their convolution is a gaussian one and not a GGX one - So we use another roughness mip mapping.
- if (IsEnvIndexTexture2D(lightData.envIndex))
- {
- // Empirical remapping
- iblMipLevel = PlanarPerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness, _ColorPyramidLodCount);
- }
- else
- {
- iblMipLevel = PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness);
- }
-
- float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, iblMipLevel, lightData.rangeCompressionFactorCompensation);
+ float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness), lightData.rangeCompressionFactorCompensation);
weight *= preLD.a; // Used by planar reflection to discard pixel
envLighting = F * preLD.rgb;
diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/Fabric/Fabric.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/Fabric/Fabric.hlsl
index 71810177be5..c0b4ee444a8 100644
--- a/com.unity.render-pipelines.high-definition/Runtime/Material/Fabric/Fabric.hlsl
+++ b/com.unity.render-pipelines.high-definition/Runtime/Material/Fabric/Fabric.hlsl
@@ -611,19 +611,6 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext,
// Note: using influenceShapeType and projectionShapeType instead of (lightData|proxyData).shapeType allow to make compiler optimization in case the type is know (like for sky)
EvaluateLight_EnvIntersection(positionWS, bsdfData.normalWS, lightData, influenceShapeType, R, weight);
- float iblMipLevel;
- // TODO: We need to match the PerceptualRoughnessToMipmapLevel formula for planar, so we don't do this test (which is specific to our current lightloop)
- // Specific case for Texture2Ds, their convolution is a gaussian one and not a GGX one - So we use another roughness mip mapping.
- if (IsEnvIndexTexture2D(lightData.envIndex))
- {
- // Empirical remapping
- iblMipLevel = PositivePow(preLightData.iblPerceptualRoughness, 0.8) * uint(max(_ColorPyramidLodCount - 1, 0));
- }
- else
- {
- iblMipLevel = PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness);
- }
-
// If it is a silk, we need to use the GGX convolution (slice0), otherwise the charlie convolution (slice1)
int sliceIndex = 0;
if (HasFlag(bsdfData.materialFeatures, MATERIALFEATUREFLAGS_FABRIC_COTTON_WOOL))
@@ -631,7 +618,7 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext,
sliceIndex = _EnvSliceSize - 1;
}
- float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, iblMipLevel, lightData.rangeCompressionFactorCompensation, sliceIndex);
+ float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness), lightData.rangeCompressionFactorCompensation, sliceIndex);
weight *= preLD.a; // Used by planar reflection to discard pixel
envLighting = preLightData.specularFGD * preLD.rgb;
diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/Fabric/IBLFilterCharlie.cs b/com.unity.render-pipelines.high-definition/Runtime/Material/Fabric/IBLFilterCharlie.cs
index 2c6a500e3f6..297c9743d49 100644
--- a/com.unity.render-pipelines.high-definition/Runtime/Material/Fabric/IBLFilterCharlie.cs
+++ b/com.unity.render-pipelines.high-definition/Runtime/Material/Fabric/IBLFilterCharlie.cs
@@ -85,5 +85,10 @@ override public void FilterCubemap(CommandBuffer cmd, Texture source, RenderText
public override void FilterCubemapMIS(CommandBuffer cmd, Texture source, RenderTexture target, RenderTexture conditionalCdf, RenderTexture marginalRowCdf)
{
}
+
+ override public void FilterPlanarTexture(CommandBuffer cmd, RenderTexture source, ref PlanarTextureFilteringParameters planarTextureFilteringParameters, RenderTexture target)
+ {
+ m_MipGenerator.RenderColorGaussianPyramid(cmd, new Vector2Int(source.width, source.height), source, target);
+ }
}
}
diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/GGXConvolution/IBLFilterGGX.cs b/com.unity.render-pipelines.high-definition/Runtime/Material/GGXConvolution/IBLFilterGGX.cs
index 5ef402930e4..54dbc40af2b 100644
--- a/com.unity.render-pipelines.high-definition/Runtime/Material/GGXConvolution/IBLFilterGGX.cs
+++ b/com.unity.render-pipelines.high-definition/Runtime/Material/GGXConvolution/IBLFilterGGX.cs
@@ -1,3 +1,5 @@
+using UnityEngine.Experimental.Rendering;
+
namespace UnityEngine.Rendering.HighDefinition
{
class IBLFilterGGX : IBLFilterBSDF
@@ -13,6 +15,19 @@ class IBLFilterGGX : IBLFilterBSDF
int m_ConditionalDensitiesKernel = -1;
int m_MarginalRowDensitiesKernel = -1;
+ // Planar reflection filtering
+ ComputeShader m_PlanarReflectionFilteringCS;
+ int m_PlanarReflectionDepthConversionKernel = -1;
+ int m_PlanarReflectionDownScaleKernel = -1;
+ int m_PlanarReflectionFilteringKernel = -1;
+ RTHandle m_PlanarReflectionFilterTex0;
+ RTHandle m_PlanarReflectionFilterTex1;
+ RTHandle m_PlanarReflectionFilterDepthTex0;
+ RTHandle m_PlanarReflectionFilterDepthTex1;
+ const int k_DefaultPlanarResolution = 512;
+ // Intermediate variables
+ Vector4 currentScreenSize = new Vector4(1.0f, 1.0f, 1.0f, 1.0f);
+
public IBLFilterGGX(RenderPipelineResources renderPipelineResources, MipGenerator mipGenerator)
{
m_RenderPipelineResources = renderPipelineResources;
@@ -58,6 +73,14 @@ public override void Initialize(CommandBuffer cmd)
InitializeGgxIblSampleData(cmd);
}
+ if (!m_PlanarReflectionFilteringCS)
+ {
+ m_PlanarReflectionFilteringCS = m_RenderPipelineResources.shaders.planarReflectionFilteringCS;
+ m_PlanarReflectionDepthConversionKernel = m_PlanarReflectionFilteringCS.FindKernel("DepthConversion");
+ m_PlanarReflectionDownScaleKernel = m_PlanarReflectionFilteringCS.FindKernel("DownScale");
+ m_PlanarReflectionFilteringKernel = m_PlanarReflectionFilteringCS.FindKernel("FilterPlanarReflection");
+ }
+
for (int i = 0; i < 6; ++i)
{
var lookAt = Matrix4x4.LookAt(Vector3.zero, CoreUtils.lookAtList[i], CoreUtils.upVectorList[i]);
@@ -75,6 +98,14 @@ public override void Cleanup()
{
CoreUtils.Destroy(m_convolveMaterial);
CoreUtils.Destroy(m_GgxIblSampleData);
+ RTHandles.Release(m_PlanarReflectionFilterTex0);
+ m_PlanarReflectionFilterTex0 = null;
+ RTHandles.Release(m_PlanarReflectionFilterTex1);
+ m_PlanarReflectionFilterTex1 = null;
+ RTHandles.Release(m_PlanarReflectionFilterDepthTex0);
+ m_PlanarReflectionFilterDepthTex0 = null;
+ RTHandles.Release(m_PlanarReflectionFilterDepthTex1);
+ m_PlanarReflectionFilterDepthTex1 = null;
}
void FilterCubemapCommon(CommandBuffer cmd,
@@ -153,9 +184,158 @@ override public void FilterCubemapMIS(CommandBuffer cmd,
FilterCubemapCommon(cmd, source, target, m_faceWorldToViewMatrixMatrices);
}
+
override public void FilterCubemap(CommandBuffer cmd, Texture source, RenderTexture target)
{
FilterCubemapCommon(cmd, source, target, m_faceWorldToViewMatrixMatrices);
}
+
+ void CheckIntermediateTexturesSize(int texWidth, int texHeight)
+ {
+ // If the first texture is not the right size
+ if (m_PlanarReflectionFilterTex0 == null || m_PlanarReflectionFilterTex0.rt.width < texWidth)
+ {
+ // We re-allocate them all
+ RTHandles.Release(m_PlanarReflectionFilterTex0);
+ RTHandles.Release(m_PlanarReflectionFilterTex1);
+ RTHandles.Release(m_PlanarReflectionFilterDepthTex0);
+ RTHandles.Release(m_PlanarReflectionFilterDepthTex1);
+ m_PlanarReflectionFilterTex0 = RTHandles.Alloc(texWidth, texHeight, TextureXR.slices, colorFormat: GraphicsFormat.R16G16B16A16_SFloat, enableRandomWrite: true, useDynamicScale: false, useMipMap: true, name: "PlanarReflectionTextureIntermediate0");
+ m_PlanarReflectionFilterTex1 = RTHandles.Alloc(texWidth, texHeight, TextureXR.slices, colorFormat: GraphicsFormat.R16G16B16A16_SFloat, enableRandomWrite: true, useDynamicScale: false, useMipMap: false, name: "PlanarReflectionTextureIntermediate1");
+ m_PlanarReflectionFilterDepthTex0 = RTHandles.Alloc(texWidth, texHeight, TextureXR.slices, colorFormat: GraphicsFormat.R32_SFloat, enableRandomWrite: true, useDynamicScale: false, useMipMap: true, name: "PlanarReflectionTextureIntermediateDepth0");
+ m_PlanarReflectionFilterDepthTex1 = RTHandles.Alloc(texWidth, texHeight, TextureXR.slices, colorFormat: GraphicsFormat.R32_SFloat, enableRandomWrite: true, useDynamicScale: false, useMipMap: false, name: "PlanarReflectionTextureIntermediateDepth1");
+ }
+ }
+
+ void BuildColorAndDepthMipChain(CommandBuffer cmd, RenderTexture sourceColor, RenderTexture sourceDepth, ref PlanarTextureFilteringParameters planarTextureFilteringParameters)
+ {
+ int currentTexWidth = sourceColor.width;
+ int currentTexHeight = sourceColor.height;
+
+ // The first color level can be copied straight away in the mip chain, nothing special to be done
+ cmd.CopyTexture(sourceColor, 0, 0, 0, 0, sourceColor.width, sourceColor.height, m_PlanarReflectionFilterTex0, 0, 0, 0, 0);
+
+ // For depth it is a bit trickier, we want to convert the depth from oblique space to non-oblique space due to the poor interpolation properties of the oblique matrix
+ cmd.SetComputeVectorParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCameraPositon, planarTextureFilteringParameters.captureCameraPosition);
+ cmd.SetComputeMatrixParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCameraVP_NO, planarTextureFilteringParameters.captureCameraVP_NonOblique);
+ cmd.SetComputeMatrixParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCameraIVP, planarTextureFilteringParameters.captureCameraIVP);
+ currentScreenSize.Set(currentTexWidth, currentTexHeight, 1.0f / currentTexWidth, 1.0f / currentTexHeight);
+ cmd.SetComputeVectorParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCurrentScreenSize, currentScreenSize);
+ cmd.SetComputeFloatParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCameraFarPlane, planarTextureFilteringParameters.captureFarPlane);
+
+ // Input textures
+ cmd.SetComputeTextureParam(m_PlanarReflectionFilteringCS, m_PlanarReflectionDepthConversionKernel, HDShaderIDs._DepthTextureOblique, sourceDepth);
+
+ // Output textures
+ cmd.SetComputeTextureParam(m_PlanarReflectionFilteringCS, m_PlanarReflectionDepthConversionKernel, HDShaderIDs._DepthTextureNonOblique, m_PlanarReflectionFilterDepthTex0);
+
+ // Compute the dispatch parameters and evaluate the new mip
+ int tileSize = 8;
+ int numTilesXHR = (currentTexWidth + (tileSize - 1)) / tileSize;
+ int numTilesYHR = (currentTexHeight + (tileSize - 1)) / tileSize;
+ cmd.DispatchCompute(m_PlanarReflectionFilteringCS, m_PlanarReflectionDepthConversionKernel, numTilesXHR, numTilesYHR, 1);
+
+ // Move to the next mip and build the chain
+ int currentMipSource = 0;
+ int texWidthHalf = sourceColor.width >> 1;
+ int texHeightHalf = sourceColor.height >> 1;
+
+ // Until we have a 2x2 texture, continue
+ while (texWidthHalf >= 2 && texHeightHalf >= 2)
+ {
+ // Constant inputs
+ cmd.SetComputeIntParam(m_PlanarReflectionFilteringCS, HDShaderIDs._SourceMipIndex, currentMipSource);
+
+ // Input textures
+ cmd.SetComputeTextureParam(m_PlanarReflectionFilteringCS, m_PlanarReflectionDownScaleKernel, HDShaderIDs._ReflectionColorMipChain, m_PlanarReflectionFilterTex0);
+ cmd.SetComputeTextureParam(m_PlanarReflectionFilteringCS, m_PlanarReflectionDownScaleKernel, HDShaderIDs._HalfResReflectionBuffer, m_PlanarReflectionFilterTex1);
+
+ // Output textures
+ cmd.SetComputeTextureParam(m_PlanarReflectionFilteringCS, m_PlanarReflectionDownScaleKernel, HDShaderIDs._DepthTextureMipChain, m_PlanarReflectionFilterDepthTex0);
+ cmd.SetComputeTextureParam(m_PlanarReflectionFilteringCS, m_PlanarReflectionDownScaleKernel, HDShaderIDs._HalfResDepthBuffer, m_PlanarReflectionFilterDepthTex1);
+ currentScreenSize.Set(currentTexWidth, currentTexHeight, 1.0f / currentTexWidth, 1.0f / currentTexHeight);
+ cmd.SetComputeVectorParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCurrentScreenSize, currentScreenSize);
+
+ // Compute the dispatch parameters and evaluate the new mip
+ int numTilesXHRHalf = (texWidthHalf + (tileSize - 1)) / tileSize;
+ int numTilesYHRHalf = (texHeightHalf + (tileSize - 1)) / tileSize;
+ cmd.DispatchCompute(m_PlanarReflectionFilteringCS, m_PlanarReflectionDownScaleKernel, numTilesXHRHalf, numTilesYHRHalf, 1);
+
+ // Given that mip to mip in compute doesn't work, we have to do this :(
+ cmd.CopyTexture(m_PlanarReflectionFilterTex1, 0, 0, 0, 0, texWidthHalf, texHeightHalf, m_PlanarReflectionFilterTex0, 0, currentMipSource + 1, 0, 0);
+ cmd.CopyTexture(m_PlanarReflectionFilterDepthTex1, 0, 0, 0, 0, texWidthHalf, texHeightHalf, m_PlanarReflectionFilterDepthTex0, 0, currentMipSource + 1, 0, 0);
+
+ // Update the parameters for the next mip
+ currentTexWidth = currentTexWidth >> 1;
+ currentTexHeight = currentTexHeight >> 1;
+ texWidthHalf = texWidthHalf >> 1;
+ texHeightHalf = texHeightHalf >> 1;
+ currentMipSource++;
+ }
+ }
+
+ override public void FilterPlanarTexture(CommandBuffer cmd, RenderTexture source, ref PlanarTextureFilteringParameters planarTextureFilteringParameters, RenderTexture target)
+ {
+ // First we need to make sure that our intermediate textures are the big enough to do our process (these textures are squares)
+ CheckIntermediateTexturesSize(source.width, source.height);
+
+ // Then we need to build a mip chain (one for color, one for depth) that we will sample later on in the process
+ BuildColorAndDepthMipChain(cmd, source, planarTextureFilteringParameters.captureCameraDepthBuffer, ref planarTextureFilteringParameters);
+
+ // Init the mip descent
+ int texWidth = source.width;
+ int texHeight = source.height;
+
+ // First we need to copy the Mip0 (that matches perfectly smooth surface), no processing to be done on it
+ cmd.CopyTexture(m_PlanarReflectionFilterTex0, 0, 0, 0, 0, texWidth, texHeight, target, 0, 0, 0, 0);
+
+ // Initialize the parameters for the descent
+ int mipIndex = 1;
+ int tileSize = 8;
+ // Based on the initial texture resolution, the number of available mips for us to read from is variable and is based on the maximal texture width
+ int numMipsChain = (int)(Mathf.Log((float)texWidth, 2.0f) - 1.0f);
+ float rtScaleFactor = texWidth / (float)m_PlanarReflectionFilterTex0.rt.width;
+ texWidth = texWidth >> 1;
+ texHeight = texHeight >> 1;
+
+ // Loop until we have the right amount of mips
+ while (mipIndex < (int)EnvConstants.ConvolutionMipCount)
+ {
+ // Evaluate the dispatch parameters
+ int numTilesXHR = (texWidth + (tileSize - 1)) / tileSize;
+ int numTilesYHR = (texHeight + (tileSize - 1)) / tileSize;
+
+ // Set input textures
+ cmd.SetComputeTextureParam(m_PlanarReflectionFilteringCS, m_PlanarReflectionFilteringKernel, HDShaderIDs._DepthTextureMipChain, m_PlanarReflectionFilterDepthTex0);
+ cmd.SetComputeTextureParam(m_PlanarReflectionFilteringCS, m_PlanarReflectionFilteringKernel, HDShaderIDs._ReflectionColorMipChain, m_PlanarReflectionFilterTex0);
+
+ // Input constant parameters required
+ cmd.SetComputeVectorParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureBaseScreenSize, planarTextureFilteringParameters.captureCameraScreenSize);
+ currentScreenSize.Set(texWidth, texHeight, 1.0f / texWidth, 1.0f / texHeight);
+ cmd.SetComputeVectorParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCurrentScreenSize, currentScreenSize);
+ cmd.SetComputeIntParam(m_PlanarReflectionFilteringCS, HDShaderIDs._SourceMipIndex, mipIndex);
+ cmd.SetComputeIntParam(m_PlanarReflectionFilteringCS, HDShaderIDs._MaxMipLevels, numMipsChain);
+ cmd.SetComputeFloatParam(m_PlanarReflectionFilteringCS, HDShaderIDs._RTScaleFactor, rtScaleFactor);
+ cmd.SetComputeVectorParam(m_PlanarReflectionFilteringCS, HDShaderIDs._ReflectionPlaneNormal, planarTextureFilteringParameters.probeNormal);
+ cmd.SetComputeVectorParam(m_PlanarReflectionFilteringCS, HDShaderIDs._ReflectionPlanePosition, planarTextureFilteringParameters.probePosition);
+ cmd.SetComputeVectorParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCameraPositon, planarTextureFilteringParameters.captureCameraPosition);
+ cmd.SetComputeMatrixParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCameraIVP_NO, planarTextureFilteringParameters.captureCameraIVP_NonOblique);
+ cmd.SetComputeFloatParam(m_PlanarReflectionFilteringCS, HDShaderIDs._CaptureCameraFOV, planarTextureFilteringParameters.captureFOV * Mathf.PI / 180.0f);
+
+ // Set output textures
+ cmd.SetComputeTextureParam(m_PlanarReflectionFilteringCS, m_PlanarReflectionFilteringKernel, HDShaderIDs._FilteredPlanarReflectionBuffer, m_PlanarReflectionFilterTex1);
+
+ // Evaluate the next convolution
+ cmd.DispatchCompute(m_PlanarReflectionFilteringCS, m_PlanarReflectionFilteringKernel, numTilesXHR, numTilesYHR, 1);
+
+ // Copy the convoluted texture into the next mip and move on
+ cmd.CopyTexture(m_PlanarReflectionFilterTex1, 0, 0, 0, 0, texWidth, texHeight, target, 0, mipIndex, 0, 0);
+
+ // Move to the next mip
+ texWidth = texWidth >> 1;
+ texHeight = texHeight >> 1;
+ mipIndex++;
+ }
+ }
}
}
diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/Hair/Hair.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/Hair/Hair.hlsl
index a8cfd8479c9..ac0e17be16b 100644
--- a/com.unity.render-pipelines.high-definition/Runtime/Material/Hair/Hair.hlsl
+++ b/com.unity.render-pipelines.high-definition/Runtime/Material/Hair/Hair.hlsl
@@ -555,20 +555,7 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext,
// Note: using influenceShapeType and projectionShapeType instead of (lightData|proxyData).shapeType allow to make compiler optimization in case the type is know (like for sky)
EvaluateLight_EnvIntersection(positionWS, bsdfData.normalWS, lightData, influenceShapeType, R, weight);
- float iblMipLevel;
- // TODO: We need to match the PerceptualRoughnessToMipmapLevel formula for planar, so we don't do this test (which is specific to our current lightloop)
- // Specific case for Texture2Ds, their convolution is a gaussian one and not a GGX one - So we use another roughness mip mapping.
- if (IsEnvIndexTexture2D(lightData.envIndex))
- {
- // Empirical remapping
- iblMipLevel = PositivePow(preLightData.iblPerceptualRoughness, 0.8) * uint(max(_ColorPyramidLodCount - 1, 0));
- }
- else
- {
- iblMipLevel = PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness);
- }
-
- float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, iblMipLevel, lightData.rangeCompressionFactorCompensation);
+ float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness), lightData.rangeCompressionFactorCompensation);
weight *= preLD.a; // Used by planar reflection to discard pixel
envLighting = preLightData.specularFGD * preLD.rgb;
diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/IBLFilterBSDF.cs b/com.unity.render-pipelines.high-definition/Runtime/Material/IBLFilterBSDF.cs
index 990459e6eba..7198cb15810 100644
--- a/com.unity.render-pipelines.high-definition/Runtime/Material/IBLFilterBSDF.cs
+++ b/com.unity.render-pipelines.high-definition/Runtime/Material/IBLFilterBSDF.cs
@@ -19,11 +19,33 @@ abstract class IBLFilterBSDF
// Filters MIP map levels (other than 0) with GGX using BRDF importance sampling.
abstract public void FilterCubemap(CommandBuffer cmd, Texture source, RenderTexture target);
- public void FilterPlanarTexture(CommandBuffer cmd, RenderTexture source, RenderTexture target)
+ internal struct PlanarTextureFilteringParameters
{
- m_MipGenerator.RenderColorGaussianPyramid(cmd, new Vector2Int(source.width, source.height), source, target);
- }
+ // Depth buffer (oblique) that was produced
+ public RenderTexture captureCameraDepthBuffer;
+ // Inverse view projection matrix (oblique)
+ public Matrix4x4 captureCameraIVP;
+ // View projection matrix (non oblique)
+ public Matrix4x4 captureCameraVP_NonOblique;
+ // Inverse view projection matrix (non oblique)
+ public Matrix4x4 captureCameraIVP_NonOblique;
+ // Position of the capture camera
+ public Vector3 captureCameraPosition;
+ // Resolution of the capture camera
+ public Vector4 captureCameraScreenSize;
+ // Position of the probe
+ public Vector3 probePosition;
+ // Normal of the reflection probe
+ public Vector3 probeNormal;
+ // FOV of the capture camera
+ public float captureFOV;
+ // Near clipping plane of the capture camera
+ public float captureNearPlane;
+ // Far clipping plane of the capture camera
+ public float captureFarPlane;
+ };
+ abstract public void FilterPlanarTexture(CommandBuffer cmd, RenderTexture source, ref PlanarTextureFilteringParameters planarTextureFilteringParameters, RenderTexture target);
public abstract void FilterCubemapMIS(CommandBuffer cmd, Texture source, RenderTexture target, RenderTexture conditionalCdf, RenderTexture marginalRowCdf);
}
}
diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/Lit.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/Lit.hlsl
index 629a99a22b7..89b05cde86c 100644
--- a/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/Lit.hlsl
+++ b/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/Lit.hlsl
@@ -1925,20 +1925,7 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext,
float3 F = preLightData.specularFGD;
- float iblMipLevel;
- // TODO: We need to match the PerceptualRoughnessToMipmapLevel formula for planar, so we don't do this test (which is specific to our current lightloop)
- // Specific case for Texture2Ds, their convolution is a gaussian one and not a GGX one - So we use another roughness mip mapping.
- if (IsEnvIndexTexture2D(lightData.envIndex))
- {
- // Empirical remapping
- iblMipLevel = PlanarPerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness, _ColorPyramidLodCount);
- }
- else
- {
- iblMipLevel = PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness);
- }
-
- float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, iblMipLevel, lightData.rangeCompressionFactorCompensation);
+ float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness), lightData.rangeCompressionFactorCompensation);
weight *= preLD.a; // Used by planar reflection to discard pixel
if (GPUImageBasedLightingType == GPUIMAGEBASEDLIGHTINGTYPE_REFLECTION)
diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/SimpleLit.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/SimpleLit.hlsl
index 555893a975e..9144a53b969 100644
--- a/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/SimpleLit.hlsl
+++ b/com.unity.render-pipelines.high-definition/Runtime/Material/Lit/SimpleLit.hlsl
@@ -417,20 +417,7 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext,
EvaluateLight_EnvIntersection(posInput.positionWS, bsdfData.normalWS, lightData, influenceShapeType, R, weight);
- float iblMipLevel;
- // TODO: We need to match the PerceptualRoughnessToMipmapLevel formula for planar, so we don't do this test (which is specific to our current lightloop)
- // Specific case for Texture2Ds, their convolution is a gaussian one and not a GGX one - So we use another roughness mip mapping.
- if (IsEnvIndexTexture2D(lightData.envIndex))
- {
- // Empirical remapping
- iblMipLevel = PlanarPerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness, _ColorPyramidLodCount);
- }
- else
- {
- iblMipLevel = PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness);
- }
-
- float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, iblMipLevel, lightData.rangeCompressionFactorCompensation);
+ float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R, PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness), lightData.rangeCompressionFactorCompensation);
weight *= preLD.a; // Used by planar reflection to discard pixel
envLighting = F_Schlick(bsdfData.fresnel0, dot(bsdfData.normalWS, V)) * preLD.rgb;
diff --git a/com.unity.render-pipelines.high-definition/Runtime/Material/StackLit/StackLit.hlsl b/com.unity.render-pipelines.high-definition/Runtime/Material/StackLit/StackLit.hlsl
index 6421ead5411..07e07bb258d 100644
--- a/com.unity.render-pipelines.high-definition/Runtime/Material/StackLit/StackLit.hlsl
+++ b/com.unity.render-pipelines.high-definition/Runtime/Material/StackLit/StackLit.hlsl
@@ -4377,20 +4377,8 @@ IndirectLighting EvaluateBSDF_Env( LightLoopContext lightLoopContext,
EvaluateLight_EnvIntersection(positionWS, normal, lightData, influenceShapeType, R[i], tempWeight[i]);
- float iblMipLevel;
- // TODO: We need to match the PerceptualRoughnessToMipmapLevel formula for planar, so we don't do this test (which is specific to our current lightloop)
- // Specific case for Texture2Ds, their convolution is a gaussian one and not a GGX one - So we use another roughness mip mapping.
- if (IsEnvIndexTexture2D(lightData.envIndex))
- {
- // Empirical remapping
- iblMipLevel = PlanarPerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness[i], _ColorPyramidLodCount);
- }
- else
- {
- iblMipLevel = PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness[i]);
- }
+ float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R[i], PerceptualRoughnessToMipmapLevel(preLightData.iblPerceptualRoughness[i]), lightData.rangeCompressionFactorCompensation);
- float4 preLD = SampleEnv(lightLoopContext, lightData.envIndex, R[i], iblMipLevel, lightData.rangeCompressionFactorCompensation);
// Used by planar reflection to discard pixel:
tempWeight[i] *= preLD.a;
diff --git a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.cs b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.cs
index a4f08a4cc44..7b12b5d5155 100644
--- a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.cs
+++ b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDRenderPipeline.cs
@@ -1403,6 +1403,7 @@ public struct Target
public RenderTargetIdentifier id;
public CubemapFace face;
public RenderTexture copyToTarget;
+ public RenderTexture targetDepth;
}
public HDCamera hdCamera;
public bool clearCameraSettings;
@@ -1806,6 +1807,10 @@ ref List renderDatas
{
visibleProbe.SetTexture(ProbeSettings.Mode.Realtime, HDRenderUtilities.CreatePlanarProbeRenderTarget(desiredPlanarProbeSize));
}
+ if (visibleProbe.realtimeDepthTexture == null || visibleProbe.realtimeDepthTexture.width != desiredPlanarProbeSize)
+ {
+ visibleProbe.SetDepthTexture(ProbeSettings.Mode.Realtime, HDRenderUtilities.CreatePlanarProbeDepthRenderTarget(desiredPlanarProbeSize));
+ }
// Set the viewer's camera as the default camera anchor
for (var i = 0; i < cameraSettings.Count; ++i)
{
@@ -1936,6 +1941,7 @@ ref _cullingResults
request.target = new RenderRequest.Target
{
id = visibleProbe.realtimeTexture,
+ targetDepth = visibleProbe.realtimeDepthTexture,
face = CubemapFace.Unknown
};
}
@@ -2818,11 +2824,15 @@ void Callback(CommandBuffer c, HDCamera cam)
using (new ProfilingScope(cmd, ProfilingSampler.Get(HDProfileId.BlitToFinalRTDevBuildOnly)))
{
- for (int viewIndex = 0; viewIndex < hdCamera.viewCount; ++viewIndex)
- {
- var finalBlitParams = PrepareFinalBlitParameters(hdCamera, viewIndex);
- BlitFinalCameraTexture(finalBlitParams, m_BlitPropertyBlock, m_IntermediateAfterPostProcessBuffer, target.id, cmd);
- }
+ for (int viewIndex = 0; viewIndex < hdCamera.viewCount; ++viewIndex)
+ {
+ var finalBlitParams = PrepareFinalBlitParameters(hdCamera, viewIndex);
+ BlitFinalCameraTexture(finalBlitParams, m_BlitPropertyBlock, m_IntermediateAfterPostProcessBuffer, target.id, cmd);
+
+ // If a depth target is specified, fill it
+ if (target.targetDepth != null)
+ BlitFinalCameraTexture(finalBlitParams, m_BlitPropertyBlock, m_SharedRTManager.GetDepthTexture(), target.targetDepth, cmd);
+ }
}
aovRequest.PushCameraTexture(cmd, AOVBuffers.Output, hdCamera, m_IntermediateAfterPostProcessBuffer, aovBuffers);
diff --git a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDStringConstants.cs b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDStringConstants.cs
index b8c4858fec1..a7a11ebbf88 100644
--- a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDStringConstants.cs
+++ b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/HDStringConstants.cs
@@ -252,6 +252,29 @@ static class HDShaderIDs
public static readonly int _CameraFilteringBuffer = Shader.PropertyToID("_CameraFilteringTexture");
public static readonly int _IrradianceSource = Shader.PropertyToID("_IrradianceSource");
+ // Planar reflection filtering
+ public static readonly int _ReflectionColorMipChain = Shader.PropertyToID("_ReflectionColorMipChain");
+ public static readonly int _DepthTextureMipChain = Shader.PropertyToID("_DepthTextureMipChain");
+ public static readonly int _ReflectionPlaneNormal = Shader.PropertyToID("_ReflectionPlaneNormal");
+ public static readonly int _ReflectionPlanePosition = Shader.PropertyToID("_ReflectionPlanePosition");
+ public static readonly int _FilteredPlanarReflectionBuffer = Shader.PropertyToID("_FilteredPlanarReflectionBuffer");
+ public static readonly int _HalfResReflectionBuffer = Shader.PropertyToID("_HalfResReflectionBuffer");
+ public static readonly int _HalfResDepthBuffer = Shader.PropertyToID("_HalfResDepthBuffer");
+ public static readonly int _CaptureBaseScreenSize = Shader.PropertyToID("_CaptureBaseScreenSize");
+ public static readonly int _CaptureCurrentScreenSize = Shader.PropertyToID("_CaptureCurrentScreenSize");
+ public static readonly int _CaptureCameraIVP = Shader.PropertyToID("_CaptureCameraIVP");
+ public static readonly int _CaptureCameraPositon = Shader.PropertyToID("_CaptureCameraPositon");
+ public static readonly int _SourceMipIndex = Shader.PropertyToID("_SourceMipIndex");
+ public static readonly int _MaxMipLevels = Shader.PropertyToID("_MaxMipLevels");
+ public static readonly int _ThetaValuesTexture = Shader.PropertyToID("_ThetaValuesTexture");
+ public static readonly int _CaptureCameraFOV = Shader.PropertyToID("_CaptureCameraFOV");
+ public static readonly int _RTScaleFactor = Shader.PropertyToID("_RTScaleFactor");
+ public static readonly int _CaptureCameraVP_NO = Shader.PropertyToID("_CaptureCameraVP_NO");
+ public static readonly int _CaptureCameraFarPlane = Shader.PropertyToID("_CaptureCameraFarPlane");
+ public static readonly int _DepthTextureOblique = Shader.PropertyToID("_DepthTextureOblique");
+ public static readonly int _DepthTextureNonOblique = Shader.PropertyToID("_DepthTextureNonOblique");
+ public static readonly int _CaptureCameraIVP_NO = Shader.PropertyToID("_CaptureCameraIVP_NO");
+
// MSAA shader properties
public static readonly int _ColorTextureMS = Shader.PropertyToID("_ColorTextureMS");
public static readonly int _DepthTextureMS = Shader.PropertyToID("_DepthTextureMS");
diff --git a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/RenderPipelineResources.cs b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/RenderPipelineResources.cs
index f0c7d767cca..87b1b915415 100644
--- a/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/RenderPipelineResources.cs
+++ b/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/RenderPipelineResources.cs
@@ -145,7 +145,8 @@ public sealed class ShaderResources
public ComputeShader inScatteredRadiancePrecomputationCS;
[Reload("Runtime/Sky/PhysicallyBasedSky/PhysicallyBasedSky.shader")]
public Shader physicallyBasedSkyPS;
-
+ [Reload("Runtime/Lighting/PlanarReflectionFiltering.compute")]
+ public ComputeShader planarReflectionFilteringCS;
// Material
[Reload("Runtime/Material/PreIntegratedFGD/PreIntegratedFGD_GGXDisneyDiffuse.shader")]
public Shader preIntegratedFGD_GGXDisneyDiffusePS;
diff --git a/com.unity.render-pipelines.high-definition/Runtime/RenderPipelineResources/HDRenderPipelineResources.asset b/com.unity.render-pipelines.high-definition/Runtime/RenderPipelineResources/HDRenderPipelineResources.asset
index 4ebc10b45cf..e8f5c832006 100644
--- a/com.unity.render-pipelines.high-definition/Runtime/RenderPipelineResources/HDRenderPipelineResources.asset
+++ b/com.unity.render-pipelines.high-definition/Runtime/RenderPipelineResources/HDRenderPipelineResources.asset
@@ -108,6 +108,8 @@ MonoBehaviour:
type: 3}
physicallyBasedSkyPS: {fileID: 4800000, guid: a06934a4863e778498be65d8f865b7a4,
type: 3}
+ planarReflectionFilteringCS: {fileID: 7200000, guid: 9f3f8a01b8caaaa4595591dc96d43dd2,
+ type: 3}
preIntegratedFGD_GGXDisneyDiffusePS: {fileID: 4800000, guid: 123f13d52852ef547b2962de4bd9eaad,
type: 3}
preIntegratedFGD_CharlieFabricLambertPS: {fileID: 4800000, guid: 3b3bf235775cf8b4baae7f3306787ab0,
@@ -309,6 +311,7 @@ MonoBehaviour:
SMAASearchTex: {fileID: 2800000, guid: dc95d70472e232b438d0fd38651e7ec2, type: 3}
SMAAAreaTex: {fileID: 2800000, guid: 92e0d85ab4eca874098e7fcf6f8f674e, type: 3}
defaultHDRISky: {fileID: 8900000, guid: 8253d41e6e8b11a4cbe77a4f8f82934d, type: 3}
+ ggxConeAngle70pc: {fileID: 2800000, guid: 794081635e7e0fe46b6c3fa0afa70d87, type: 3}
assets:
defaultDiffusionProfile: {fileID: 11400000, guid: 2b7005ba3a4d8474b8cdc34141ad766e,
type: 2}
diff --git a/com.unity.render-pipelines.high-definition/Runtime/Utilities/HDRenderUtilities.cs b/com.unity.render-pipelines.high-definition/Runtime/Utilities/HDRenderUtilities.cs
index 1cfa26a673e..c6c19aaaa1e 100644
--- a/com.unity.render-pipelines.high-definition/Runtime/Utilities/HDRenderUtilities.cs
+++ b/com.unity.render-pipelines.high-definition/Runtime/Utilities/HDRenderUtilities.cs
@@ -365,6 +365,17 @@ public static RenderTexture CreatePlanarProbeRenderTarget(int planarSize)
};
}
+ public static RenderTexture CreatePlanarProbeDepthRenderTarget(int planarSize)
+ {
+ return new RenderTexture(planarSize, planarSize, 1, GraphicsFormat.R32_SFloat)
+ {
+ dimension = TextureDimension.Tex2D,
+ enableRandomWrite = true,
+ useMipMap = true,
+ autoGenerateMips = false
+ };
+ }
+
///
/// Create the texture target for a baked reflection probe.
///