Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions com.unity.render-pipelines.core/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.

### Fixed
- Fixed ACES tonemaping for Nintendo Switch by forcing some shader color conversion functions to full float precision.
- Fixed a bug in FreeCamera which would only provide a speed boost for the first frame when pressing the Shfit key.

## [10.2.0] - 2020-10-19

Expand Down
8 changes: 3 additions & 5 deletions com.unity.render-pipelines.core/Runtime/Camera/FreeCamera.cs
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ void UpdateInputs()
inputRotateAxisX += (Input.GetAxis(kRightStickX) * m_LookSpeedController * Time.deltaTime);
inputRotateAxisY += (Input.GetAxis(kRightStickY) * m_LookSpeedController * Time.deltaTime);

leftShift = Input.GetKeyDown(KeyCode.LeftShift);
leftShift = Input.GetKey(KeyCode.LeftShift);
fire1 = Input.GetAxis("Fire1") > 0.0f;

inputChangeSpeed = Input.GetAxis(kSpeedAxis);
Expand Down Expand Up @@ -192,10 +192,8 @@ void Update()
transform.localRotation = Quaternion.Euler(newRotationX, newRotationY, transform.localEulerAngles.z);

float moveSpeed = Time.deltaTime * m_MoveSpeed;
if (leftShiftBoost)
moveSpeed *= leftShift ? m_Turbo : 1.0f;
else
moveSpeed *= fire1 ? m_Turbo : 1.0f;
if (fire1 || leftShiftBoost && leftShift)
moveSpeed *= m_Turbo;
transform.position += transform.forward * moveSpeed * inputVertical;
transform.position += transform.right * moveSpeed * inputHorizontal;
transform.position += Vector3.up * moveSpeed * inputYAxis;
Expand Down
2 changes: 2 additions & 0 deletions com.unity.render-pipelines.high-definition/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Fixed computation of geometric normal in path tracing (case 1293029).
- Fixed issues with path-traced volumetric scattering (cases 1295222, 1295234).
- Fixed the default background color for previews to use the original color.
- Fixed an issue with half res ssgi upscale.

### Changed
- Removed the material pass probe volumes evaluation mode.
Expand All @@ -43,6 +44,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Removed the upscale radius from the RTR.
- Density Volumes can now take a 3D RenderTexture as mask, the mask can use RGBA format for RGB fog.
- Decreased the minimal Fog Distance value in the Density Volume to 0.05.
- Changed the convergence time of ssgi to 16 frames and the preset value

## [10.3.0] - 2020-12-01

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ bool UsesQualityMode()
/// The thickness of the depth buffer value used for the ray marching step
/// </summary>
[Tooltip("Controls the thickness of the depth buffer used for ray marching.")]
public ClampedFloatParameter depthBufferThickness = new ClampedFloatParameter(0.01f, 0, 1.0f);
public ClampedFloatParameter depthBufferThickness = new ClampedFloatParameter(0.2f, 0.0f, 0.5f);

GlobalIllumination()
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -319,14 +319,10 @@ internal class LightLoopTextureCaches
public List<Vector4> env2DCaptureForward { get; private set; }
public List<Vector4> env2DAtlasScaleOffset {get; private set; } = new List<Vector4>();

Material m_CubeToPanoMaterial;

public void Initialize(HDRenderPipelineAsset hdrpAsset, RenderPipelineResources defaultResources, IBLFilterBSDF[] iBLFilterBSDFArray)
{
var lightLoopSettings = hdrpAsset.currentPlatformRenderPipelineSettings.lightLoopSettings;

m_CubeToPanoMaterial = CoreUtils.CreateEngineMaterial(defaultResources.shaders.cubeToPanoPS);

lightCookieManager = new LightCookieManager(hdrpAsset, k_MaxCacheSize);

env2DCaptureVP = new List<Matrix4x4>();
Expand Down Expand Up @@ -365,8 +361,6 @@ public void Cleanup()
reflectionProbeCache.Release();
reflectionPlanarProbeCache.Release();
lightCookieManager.Release();

CoreUtils.Destroy(m_CubeToPanoMaterial);
}

public void NewFrame()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ class ConvertSSGIPassData

TextureHandle ConvertSSGI(RenderGraph renderGraph, HDCamera hdCamera, bool halfResolution, TextureHandle depthPyramid, TextureHandle stencilBuffer, TextureHandle normalBuffer, TextureHandle inoutputBuffer0, TextureHandle inoutputBuffer1)
{
using (var builder = renderGraph.AddRenderPass<ConvertSSGIPassData>("Upscale SSGI", out var passData, ProfilingSampler.Get(HDProfileId.SSGIUpscale)))
using (var builder = renderGraph.AddRenderPass<ConvertSSGIPassData>("Upscale SSGI", out var passData, ProfilingSampler.Get(HDProfileId.SSGIConvert)))
{
builder.EnableAsyncCompute(false);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@
// Epslon value used for the computation
#define GI_TRACE_EPS 0.00024414

#define PERCEPTUAL_SPACE

// Input depth pyramid texture
TEXTURE2D_X(_DepthTexture);
// Input texture that holds the offset for every level of the depth pyramid
Expand All @@ -42,6 +44,7 @@ CBUFFER_START(UnityScreenSpaceGlobalIllumination)
int _IndirectDiffuseProbeFallbackBias;
float4 _ColorPyramidUvScaleAndLimitPrevFrame;
int _SsrStencilBit;
int _IndirectDiffuseFrameIndex;
CBUFFER_END

// Output texture that holds the hit point NDC coordinates
Expand All @@ -62,7 +65,7 @@ bool RayMarch(float3 positionWS, float3 sampleDir, float3 normalWS, float2 posit
// We start tracing from the center of the current pixel, and do so up to the far plane.
float3 rayOrigin = float3(positionSS + 0.5, deviceDepth);

float3 sampledPosWS = positionWS + sampleDir * 0.01;
float3 sampledPosWS = positionWS + sampleDir * 0.001;
float3 sampledPosNDC = ComputeNormalizedDeviceCoordinatesWithZ(sampledPosWS, UNITY_MATRIX_VP); // Jittered
float3 sampledPosSS = float3(sampledPosNDC.xy * _ScreenSize.xy, sampledPosNDC.z);

Expand Down Expand Up @@ -206,27 +209,20 @@ void TRACE_GLOBAL_ILLUMINATION(uint3 dispatchThreadId : SV_DispatchThreadID, uin
uint2 currentCoord = dispatchThreadId.xy;

#if HALF_RES
// Fetch the mipoffset for the second mip (half res)
int2 mipOffset = _DepthPyramidMipLevelOffsets[1];
// Grab the depth of the half res pixel (better than grabbing a random one across the 4 candidates or averaging)
float deviceDepth = LOAD_TEXTURE2D_X(_DepthTexture, mipOffset + currentCoord).x;
// Compute the full resolution pixel for the inputs that do not have a pyramid
uint2 fullResCoord = currentCoord * 2;
#else
currentCoord = currentCoord * 2;
#endif

// Read the depth value as early as possible
float deviceDepth = LOAD_TEXTURE2D_X(_DepthTexture, currentCoord).x;
#endif

// Initialize the hitpoint texture to a miss
_IndirectDiffuseHitPointTextureRW[COORD_TEXTURE2D_X(currentCoord)] = float2(99.0, 0.0);
_IndirectDiffuseHitPointTextureRW[COORD_TEXTURE2D_X(dispatchThreadId.xy)] = float2(99.0, 0.0);

// Read the pixel normal
NormalData normalData;
#if HALF_RES
// For half resolution, we take the top left corner (0, 0) in the upscaled 2x2 pixel neighborhood
DecodeFromNormalBuffer(fullResCoord.xy, normalData);
#else
DecodeFromNormalBuffer(currentCoord.xy, normalData);
#endif

// Generete a new direction to follow
float2 newSample;
newSample.x = GetBNDSequenceSample(currentCoord.xy, _RaytracingFrameIndex, 0);
Expand All @@ -241,11 +237,7 @@ void TRACE_GLOBAL_ILLUMINATION(uint3 dispatchThreadId : SV_DispatchThreadID, uin
// If this is a background pixel, we flag the ray as a dead ray (we are also trying to keep the usage of the depth buffer the latest possible)
bool killRay = deviceDepth == UNITY_RAW_FAR_CLIP_VALUE;
// Convert this to a world space position (camera relative)
#if HALF_RES
PositionInputs posInput = GetPositionInput(fullResCoord, _ScreenSize.zw, deviceDepth, UNITY_MATRIX_I_VP, GetWorldToViewMatrix(), 0);
#else
PositionInputs posInput = GetPositionInput(currentCoord, _ScreenSize.zw, deviceDepth, UNITY_MATRIX_I_VP, GetWorldToViewMatrix(), 0);
#endif

// Compute the view direction (world space)
float3 viewWS = GetWorldSpaceNormalizeViewDir(posInput.positionWS);
Expand All @@ -266,7 +258,7 @@ void TRACE_GLOBAL_ILLUMINATION(uint3 dispatchThreadId : SV_DispatchThreadID, uin
// recompute it using the last value of 't', which would result in an overshoot.
// It also needs to be precisely at the center of the pixel to avoid artifacts.
float2 hitPositionNDC = floor(rayPos.xy) * _ScreenSize.zw + (0.5 * _ScreenSize.zw); // Should we precompute the half-texel bias? We seem to use it a lot.
_IndirectDiffuseHitPointTextureRW[COORD_TEXTURE2D_X(currentCoord)] = hitPositionNDC;
_IndirectDiffuseHitPointTextureRW[COORD_TEXTURE2D_X(dispatchThreadId.xy)] = hitPositionNDC;
}
}

Expand Down Expand Up @@ -310,19 +302,14 @@ void REPROJECT_GLOBAL_ILLUMINATION(uint3 dispatchThreadId : SV_DispatchThreadID,
uint2 currentCoord = groupId * INDIRECT_DIFFUSE_TILE_SIZE + groupThreadId;

#if HALF_RES
// Fetch the mipoffset for the second mip (given that we are in half res)
int2 mipOffset = _DepthPyramidMipLevelOffsets[1];
float deviceDepth = LOAD_TEXTURE2D_X(_DepthTexture, mipOffset + currentCoord).x;

// Compute the full resolution pixel for the inputs that do not have a pyramid
uint2 fullResCoord = currentCoord * 2;
#else
// Fetch the depth of the current pixel
float deviceDepth = LOAD_TEXTURE2D_X(_DepthTexture, currentCoord).x;
currentCoord = currentCoord * 2;
#endif

float deviceDepth = LOAD_TEXTURE2D_X(_DepthTexture, currentCoord).x;

// Read the hit point ndc position to fetch
float2 hitPositionNDC = LOAD_TEXTURE2D_X(_IndirectDiffuseHitPointTexture, currentCoord).xy;
float2 hitPositionNDC = LOAD_TEXTURE2D_X(_IndirectDiffuseHitPointTexture, dispatchThreadId.xy).xy;

// Grab the depth of the hit point
float hitPointDepth = LOAD_TEXTURE2D_X(_DepthTexture, hitPositionNDC * _ScreenSize.xy).x;
Expand All @@ -336,7 +323,7 @@ void REPROJECT_GLOBAL_ILLUMINATION(uint3 dispatchThreadId : SV_DispatchThreadID,

// Fetch the motion vector of the current target pixel
float2 motionVectorNDC;
DecodeMotionVector(SAMPLE_TEXTURE2D_X_LOD(_CameraMotionVectorsTexture, s_linear_clamp_sampler, min(hitPositionNDC, 1.0f - 0.5f * _ScreenSize.zw) * _RTHandleScale.xy, 0), motionVectorNDC);
DecodeMotionVector(SAMPLE_TEXTURE2D_X_LOD(_CameraMotionVectorsTexture, s_linear_clamp_sampler, hitPositionNDC, 0), motionVectorNDC);

float2 prevFrameNDC = hitPositionNDC - motionVectorNDC;
float2 prevFrameUV = prevFrameNDC * _ColorPyramidUvScaleAndLimitPrevFrame.xy;
Expand Down Expand Up @@ -366,20 +353,17 @@ void REPROJECT_GLOBAL_ILLUMINATION(uint3 dispatchThreadId : SV_DispatchThreadID,

// We need to recreate the direction that was generated
float2 newSample;
newSample.x = GetBNDSequenceSample(currentCoord.xy, _RaytracingFrameIndex, 0);
newSample.y = GetBNDSequenceSample(currentCoord.xy, _RaytracingFrameIndex, 1);
newSample.x = GetBNDSequenceSample(currentCoord.xy, _IndirectDiffuseFrameIndex, 0);
newSample.y = GetBNDSequenceSample(currentCoord.xy, _IndirectDiffuseFrameIndex, 1);

// Read the pixel normal
NormalData normalData;
#if HALF_RES
// For half resolution, we take the top left corner (0, 0) in the upscaled 2x2 pixel neighborhood
DecodeFromNormalBuffer(fullResCoord.xy, normalData);
#else
DecodeFromNormalBuffer(currentCoord.xy, normalData);
#endif

#ifdef PERCEPTUAL_SPACE
// We tone map the signal. Due to the very small budget for denoising, we need to compress the range of the signal
color = color / (1.0 + color);
#endif

// Re-compute the direction that was used to do the generation
float3 sampleDir = SampleHemisphereCosine(newSample.x, newSample.y, normalData.normalWS);
Expand All @@ -392,8 +376,8 @@ void REPROJECT_GLOBAL_ILLUMINATION(uint3 dispatchThreadId : SV_DispatchThreadID,
// We are simply interested to know if the intersected pixel was moving, so we multiply it by a big number
// TODO: make this process not binary
// Write the output to the target pixel
_IndirectDiffuseTexture0RW[COORD_TEXTURE2D_X(currentCoord)] = float4(outYSH);
_IndirectDiffuseTexture1RW[COORD_TEXTURE2D_X(currentCoord)] = float4(outCoCg, invalid ? 0.0 : 1.0, length(motionVectorNDC * 10000.0f));
_IndirectDiffuseTexture0RW[COORD_TEXTURE2D_X(dispatchThreadId.xy)] = float4(outYSH);
_IndirectDiffuseTexture1RW[COORD_TEXTURE2D_X(dispatchThreadId.xy)] = float4(outCoCg, invalid ? 0.0 : 1.0, length(motionVectorNDC * 10000.0f));
}

void ConvertYCoCgToRGBUtil(float4 inYSH, float2 inCoCg, float3 inNormal, out float3 outColor)
Expand Down Expand Up @@ -423,54 +407,47 @@ void CONVERT_YCOCG_TO_RGB(uint3 dispatchThreadId : SV_DispatchThreadID, uint2 gr
UNITY_XR_ASSIGN_VIEW_INDEX(dispatchThreadId.z);

// Fetch the current pixel coordinate
uint2 currentCoord = groupId * INDIRECT_DIFFUSE_TILE_SIZE + groupThreadId;
uint2 currentCoord = dispatchThreadId.xy;

// If the depth of this pixel is the depth of the background, we can end the process right away
#if HALF_RES
// Fetch the mipoffset for the second mip (given that we are in half res)
int2 mipOffset = _DepthPyramidMipLevelOffsets[1];
float deviceDepth = LOAD_TEXTURE2D_X(_DepthTexture, mipOffset + currentCoord).x;
currentCoord = currentCoord * 2;
#endif

// Compute the full resolution pixel for the inputs that do not have a pyramid
uint2 fullResCoord = currentCoord * 2;
#else
// Fetch the depth of the current pixel
float deviceDepth = LOAD_TEXTURE2D_X(_DepthTexture, currentCoord).x;
#endif

if (deviceDepth == UNITY_RAW_FAR_CLIP_VALUE)
{
_IndirectDiffuseTexture0RW[COORD_TEXTURE2D_X(currentCoord)] = float4(0.0, 0.0, 0.0, 0.0);
_IndirectDiffuseTexture0RW[COORD_TEXTURE2D_X(dispatchThreadId.xy)] = float4(0.0, 0.0, 0.0, 0.0);
return;
}

// Fetch the normal
NormalData normalData;
#if HALF_RES
// For half resolution, we take the top left corner (0, 0) in the upscaled 2x2 pixel neighborhood
DecodeFromNormalBuffer(fullResCoord.xy, normalData);
#else
DecodeFromNormalBuffer(currentCoord.xy, normalData);
#endif

// Convert the signal back to a color
float3 color;
float4 ySH = _IndirectDiffuseTexture0RW[COORD_TEXTURE2D_X(currentCoord)];
float3 cocgB = LOAD_TEXTURE2D_X(_IndirectDiffuseTexture1, currentCoord).xyz;
float4 ySH = _IndirectDiffuseTexture0RW[COORD_TEXTURE2D_X(dispatchThreadId.xy)];
float3 cocgB = LOAD_TEXTURE2D_X(_IndirectDiffuseTexture1, dispatchThreadId.xy).xyz;
ConvertYCoCgToRGBUtil(ySH, cocgB.xy, normalData.normalWS, color);

#ifdef PERCEPTUAL_SPACE
// We invert the tonemap
color = color / (1.0 - color);

// The mulitplication is wrong, but with all the approximations that we need to compensate a bit
// the fact that the signal was significantly attenuated (due to blurring in tonemapped space to reduce the blobbyness).
// This has been experimentally tested. However, it needs more testing and potetially reverted if found more harmful than useful
color *= (lerp(2.5, 1.0, cocgB.z));
color *= (lerp(5.0, 1.0, cocgB.z));
#endif

// Does this pixel recieve SSGI?
uint stencilValue = GetStencilValue(LOAD_TEXTURE2D_X(_StencilTexture, currentCoord));
if ((stencilValue & _SsrStencilBit) == 0)
cocgB.z = 0.0;

// Output the color as well as the blend factor
_IndirectDiffuseTexture0RW[COORD_TEXTURE2D_X(currentCoord)] = float4(color, cocgB.z);
_IndirectDiffuseTexture0RW[COORD_TEXTURE2D_X(dispatchThreadId.xy)] = float4(color, cocgB.z);
}
Original file line number Diff line number Diff line change
Expand Up @@ -215,8 +215,16 @@ SSGIConvertParameters PrepareSSGIConvertParameters(HDCamera hdCamera, bool halfR
SSGIConvertParameters parameters = new SSGIConvertParameters();

// Set the camera parameters
parameters.texWidth = hdCamera.actualWidth;
parameters.texHeight = hdCamera.actualHeight;
if (!halfResolution)
{
parameters.texWidth = hdCamera.actualWidth;
parameters.texHeight = hdCamera.actualHeight;
}
else
{
parameters.texWidth = hdCamera.actualWidth / 2;
parameters.texHeight = hdCamera.actualHeight / 2;
}
parameters.viewCount = hdCamera.viewCount;

// Grab the right kernel
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ Shader "Hidden/HDRP/DebugDensityVolumeAtlas"

float3 GetUVs(float2 texcoords)
{
return float3(texcoords * float2(1, _TextureSize.x / _TextureSize.y) * _TextureSize, _Slice) + _Offset;
return float3(texcoords * float2(1, _TextureSize.x / _TextureSize.y) * _TextureSize.xy, _Slice) + _Offset;
}

float4 Color(Varyings input) : SV_Target
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,15 +44,15 @@ internal GlobalLightingQualitySettings()
SSRMaxRaySteps[(int)ScalableSettingLevelParameter.Level.High] = 64;

/* Screen Space Global Illumination */
SSGIRaySteps[(int)ScalableSettingLevelParameter.Level.Low] = 24;
SSGIRaySteps[(int)ScalableSettingLevelParameter.Level.Medium] = 32;
SSGIRaySteps[(int)ScalableSettingLevelParameter.Level.High] = 64;
SSGIRaySteps[(int)ScalableSettingLevelParameter.Level.Low] = 32;
SSGIRaySteps[(int)ScalableSettingLevelParameter.Level.Medium] = 64;
SSGIRaySteps[(int)ScalableSettingLevelParameter.Level.High] = 96;

SSGIFullResolution[(int)ScalableSettingLevelParameter.Level.Low] = false;
SSGIFullResolution[(int)ScalableSettingLevelParameter.Level.Medium] = true;
SSGIFullResolution[(int)ScalableSettingLevelParameter.Level.High] = true;

SSGIFilterRadius[(int)ScalableSettingLevelParameter.Level.Low] = 2;
SSGIFilterRadius[(int)ScalableSettingLevelParameter.Level.Low] = 3;
SSGIFilterRadius[(int)ScalableSettingLevelParameter.Level.Medium] = 5;
SSGIFilterRadius[(int)ScalableSettingLevelParameter.Level.High] = 7;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ internal enum HDProfileId
SSGITrace,
SSGIDenoise,
SSGIUpscale,
SSGIConvert,

ForwardEmissive,
ForwardOpaque,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ void INDIRECT_DIFFUSE_INTEGRATION_UPSCALE(uint3 dispatchThreadId : SV_DispatchTh

// Compute the position of the actual source pixel
#ifdef HALF_RESOLUTION
uint2 actualSourceCoord = ComputeSourceCoordinates(sampleCoord / 2, _RaytracingFrameIndex);
uint2 actualSourceCoord = ComputeSourceCoordinates(sampleCoord * 0.5, _RaytracingFrameIndex);
#else
uint2 actualSourceCoord = sampleCoord;
#endif
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ void ReflectionAdjustWeight(uint3 dispatchThreadId : SV_DispatchThreadID, uint2
}

// Fetch the lighting and compute the weight
float3 lighting = LOAD_TEXTURE2D_X(_SsrLightingTextureRW, targetCoord);
float3 lighting = LOAD_TEXTURE2D_X(_SsrLightingTextureRW, targetCoord).rgb;
float weight = ComputeWeightValue(perceptualSmoothness);

// Output the result to the half resolution part of the texture
Expand Down