From c3f94b92a47ad9a71ed063e48c9bb4dc97402cba Mon Sep 17 00:00:00 2001 From: Pavlos Mavridis Date: Thu, 23 Apr 2020 12:35:39 +0200 Subject: [PATCH 01/11] AOV documentation --- .../Documentation~/AOVs.md | 104 ++++++++++++++++++ .../Documentation~/Images/aov_example.png | 3 + .../Documentation~/TableOfContents.md | 1 + 3 files changed, 108 insertions(+) create mode 100644 com.unity.render-pipelines.high-definition/Documentation~/AOVs.md create mode 100644 com.unity.render-pipelines.high-definition/Documentation~/Images/aov_example.png diff --git a/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md b/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md new file mode 100644 index 00000000000..a16f487bce2 --- /dev/null +++ b/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md @@ -0,0 +1,104 @@ +# Arbitrary Output Variables + +Arbitrary Output Variables (AOVs) are additional images that an HDRP camera can generate. Typically they are used to output additional information per pixel, which can be used later for compositing or additional image processing (such as denoising). + +Here is an example of three AOVs, containing from left to right the Albedo, Normal and Object ID of each pixel: + +![](Images/aov_example.png) + +In HDRP you can access and configure AOVs in the following ways: +- Using the [HDRP Compositor Tool](Compositor-Main). +- Using the Unity Recorder and the AOV Recorder Package. +- Using the scripting API to setup a custom AOV request in any HDRP Camera of your scene. + +The first two options offer a limited selection of AOVs in their User Interface, while the third option allows much more flexibility on the nature of data that will be outputted. + +## Material Property AOVs +Here is a list of material properties that can be outputted using the AOV API. + +| Material Property | Description | +|-------------------|---------------------------| +| Normal | Output the surface albedo | +| Albedo | Output the surface normal | +| Smoothness | Output the surface smoothness | +| Ambient Occlusion | Output the ambient occlusion (N/A for AxF) | +| Specular | Output the surface specularity | +| Alpha | Output the surface alpha (pixel coverage) | + +## Lighting Selection with AOVs +AOVs can also be used to output the contribution from a selected list of lights, or they can be used to output only specific components of the lighting. + +| Lighting Property | Description | +|-------------------|---------------------------| +| DiffuseOnly | Render only diffuse lighting (direct and indirect) | +| SpecularOnly | Render only specular lighting (direct and indirect) | +| DirectDiffuseOnly | Render only direct diffuse lighting | +| DirectSpecularOnly | Render only direct specular lighting | +| IndirectDiffuseOnly | Render only indirect diffuse lighting | +| ReflectionOnly | Render only reflections | +| RefractionOnly | Render only refractions | +| EmissiveOnly | Render only emissive lighting | + +## Custom Pass AOVs +Finally, AOVs can also be used to output the results of [custom passes](Custom-Pass). In particular, you can output the cumulative results of all custom passes that are active on every custom pass injection point. This can be useful to output arbitrary information that is computed in custom passes, such as the Object ID of the scene objects. + +## Scripting API +Here is a code snippet demonstrating how to setup a simple AOV request for the surface albedo on an HDRP camera. +``` +// member variables: +RenderTexture m_AovRT; // Stores the final AOV output +RTHandle m_TmpRT; // The RTHandle used to render the AOV +``` + + +``` +// runtime code: + +var hdAdditionalCameraData = camera.GetComponent(); +if (hdAdditionalCameraData != null ) +{ + // first allocate a render texture to store the resulting + if (m_AovRT == null) + m_AovRT = new RenderTexture(camera.pixelWidth, camera.pixelHeight, 0, RenderTextureFormat.ARGBFloat, RenderTextureReadWrite.Default); + + // initialize a new AOV request + var aovRequest = AOVRequest.NewDefault(); + + AOVBuffers[] aovBuffers = null; + CustomPassAOVBuffers[] customPassAovBuffers = null; + + // Allocate the RTHandle that will store the intermediate results + var buferAlloc = m_TmpRT ?? (m_TmpRT = RTHandles.Alloc(camera.pixelWidth, camera.pixelHeight)); + + // Request an AOV with the surface albedo + aovRequest.SetFullscreenOutput(MaterialSharedProperty.Albedo); + aovBuffers = new[] { AOVBuffers.Color }; + + // + var aovRequestBuilder = new AOVRequestBuilder(); + aovRequestBuilder.Add(aovRequest, + bufferId => buferAlloc, + null, + aovBuffers, + customPassAovBuffers, + bufferId => buferAlloc, + (cmd, textures, customPassTextures, properties) => + { + // callback to blit the AOV from the intermediate RTHandle to the final render texture (m_AovRT). + if (textures.Count > 0) + { + cmd.Blit(textures[0], m_AovRT); + } + else if (customPassTextures.Count > 0) + { + cmd.Blit(customPassTextures[0], m_AovRT); + } + }); + + // Build the AOV request + var aovRequestDataCollection = aovRequestBuilder.Build(); + + // Set the request to the camera + hdAdditionalCameraData.SetAOVRequests(aovRequestDataCollection); +} +``` \ No newline at end of file diff --git a/com.unity.render-pipelines.high-definition/Documentation~/Images/aov_example.png b/com.unity.render-pipelines.high-definition/Documentation~/Images/aov_example.png new file mode 100644 index 00000000000..ef3c8384ddf --- /dev/null +++ b/com.unity.render-pipelines.high-definition/Documentation~/Images/aov_example.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c9979af1a6a84124bcf96266f99560991e5ae6dfd26d7e44690f566ae27321e +size 3072921 diff --git a/com.unity.render-pipelines.high-definition/Documentation~/TableOfContents.md b/com.unity.render-pipelines.high-definition/Documentation~/TableOfContents.md index bd28a21a393..675edd3d4b7 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/TableOfContents.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/TableOfContents.md @@ -102,6 +102,7 @@ * [Motion Vectors](Motion-Vectors) * [Anti-Aliasing](Anti-Aliasing) * [Alpha Output](Alpha-Output) + * [Arbitrary Output Variables](AOVs) * Post-processing * [Post-processing in HDRP](Post-Processing-Main) * [Effect Execution Order](Post-Processing-Execution-Order) From 8630639fb61fb5c09c23852937da6e92aebd7f80 Mon Sep 17 00:00:00 2001 From: Pavlos Mavridis Date: Thu, 23 Apr 2020 12:40:59 +0200 Subject: [PATCH 02/11] minor change in the comments --- .../Documentation~/AOVs.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md b/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md index a16f487bce2..7be3c818d6c 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md @@ -66,15 +66,15 @@ if (hdAdditionalCameraData != null ) AOVBuffers[] aovBuffers = null; CustomPassAOVBuffers[] customPassAovBuffers = null; - - // Allocate the RTHandle that will store the intermediate results - var buferAlloc = m_TmpRT ?? (m_TmpRT = RTHandles.Alloc(camera.pixelWidth, camera.pixelHeight)); // Request an AOV with the surface albedo aovRequest.SetFullscreenOutput(MaterialSharedProperty.Albedo); aovBuffers = new[] { AOVBuffers.Color }; - // + // Allocate the RTHandle that will store the intermediate results + var buferAlloc = m_TmpRT ?? (m_TmpRT = RTHandles.Alloc(camera.pixelWidth, camera.pixelHeight)); + + // Add the reuesto to a new AOVRequestBuilder var aovRequestBuilder = new AOVRequestBuilder(); aovRequestBuilder.Add(aovRequest, bufferId => buferAlloc, @@ -95,10 +95,10 @@ if (hdAdditionalCameraData != null ) } }); - // Build the AOV request + // Now build the AOV request var aovRequestDataCollection = aovRequestBuilder.Build(); - // Set the request to the camera + // And finally set the request to the camera hdAdditionalCameraData.SetAOVRequests(aovRequestDataCollection); } ``` \ No newline at end of file From c2effcf89ea77bc3b132e688d524aeaedaa41395 Mon Sep 17 00:00:00 2001 From: Pavlos Mavridis Date: Thu, 23 Apr 2020 15:49:50 +0200 Subject: [PATCH 03/11] Accumulation documentation --- .../Documentation~/Accumulation.md | 85 +++++++++++++++++++ .../Images/path_tracing_recording.png | 3 + .../Images/shutter_profiles.png | 3 + .../Documentation~/TableOfContents.md | 1 + 4 files changed, 92 insertions(+) create mode 100644 com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md create mode 100644 com.unity.render-pipelines.high-definition/Documentation~/Images/path_tracing_recording.png create mode 100644 com.unity.render-pipelines.high-definition/Documentation~/Images/shutter_profiles.png diff --git a/com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md b/com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md new file mode 100644 index 00000000000..49b2eccad56 --- /dev/null +++ b/com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md @@ -0,0 +1,85 @@ +## Multiframe Rendering and Accumulation + +Some rendering techniques, such as path tracing and accumulation motion blur, create the final "converged" frame by combining information from multiple intermediate sub-frames. Each intermediate sub-frame can correspond to a slightly different point in time, effectively computing physically-based accumulation motion blur, which properly takes into account object rotations, deformations, material or lighting changes, etc. + +HDRP provides a scripting API that allows you to control the creation of sub-frames and the convergence of multi-frame rendering effects. In particular, the API allows you to control the number of intermediate sub-frames (samples) and the points in time that correspond to each one of them. Furthermore, the weights of each sub-frame are controlled using shutter profiles that describe how fast was the opening and closing motion of the camera's shutter. + +This API is particularly useful when recording path traced movies. Normally, when editing a scene, the convergence of path tracing restarts every time the scene changes, to provide artists an interactive editing workflow that allows them to quickly visualize their changes. However such behavior is not desirable during recording. + +The following images shows a rotating object with path tracing and accumulation motion blur, recorded using the multi-frame rendering API. + +![](Images/path_tracing_recording.png) + +## API Overview +The recording API is available in the HD Render Pipeline and has only three calls: +- BeginRecording should be called when starting a multi-frame render. +- PrepareNewSubFrame should be called before rendering a new subframe. +- EndRecording which should be called to stop the multi-frame rendering mode. + +The script below demonstrates how to use these calls. + +## Scripting Example +The following example demonstrates how to use the multi-frame rendering API in your scripts to properly record converged animation sequences with path tracing and/or accumulation motion blur. To use it, attach the script to the camera of your scene and select the “Start recording” and “stop recording” actions from the context menu. Setting the Shutter Interval parameter to zero will disable motion blur completely. + +``` +using UnityEngine; +using UnityEngine.Rendering; +using UnityEngine.Rendering.HighDefinition; + +public class FrameManager : MonoBehaviour +{ + public int samples = 128; + public float shutterInterval = 1.0f; + public float shutterFullyOpen = 0.25f; + public float shutterBeginsClosing = 0.75f; + + bool m_Recording = false; + int m_Iteration = 0; + int m_RecordedFrames = 0; + + [ContextMenu("Start Recording")] + void BeginMultiframeRendering() + { + RenderPipelineManager.beginFrameRendering += PrepareSubFrameCallBack; + HDRenderPipeline renderPipeline = RenderPipelineManager.currentPipeline as HDRenderPipeline; + renderPipeline.BeginRecording(samples, shutterInterval, shutterFullyOpen, shutterBeginsClosing); + m_Recording = true; + m_Iteration = 0; + m_RecordedFrames = 0; + } + + [ContextMenu("Stop Recording")] + void StopMultiframeRendering() + { + RenderPipelineManager.beginFrameRendering -= PrepareSubFrameCallBack; + HDRenderPipeline renderPipeline = RenderPipelineManager.currentPipeline as HDRenderPipeline; + renderPipeline.EndRecording(); + m_Recording = false; + } + + void PrepareSubFrameCallBack(ScriptableRenderContext cntx, Camera[] cams) + { + HDRenderPipeline renderPipeline = RenderPipelineManager.currentPipeline as HDRenderPipeline; + if (renderPipeline != null && m_Recording) + { + renderPipeline.PrepareNewSubFrame(); + m_Iteration++; + } + + if (m_Recording && m_Iteration % samples == 0) + { + ScreenCapture.CaptureScreenshot($"frame_{m_RecordedFrames++}.png"); + } + } +} +``` + +## Shutter Profiles +The BeginRecording call allows you to specify how fast the camera shutter is opening and closing. The speed of the camera shutter defines the so called “shutter profile”. The following image demonstrates how different shutter profiles affect the appearance of motion blur on a blue sphere moving from left to right. + +![](Images/shutter_profiles.png) + + +In all cases, the speed of the sphere is the same. The only change is the shutter profile. The horizontal axis of the profile diagram corresponds to time, and the vertical axis corresponds to the openning of the shutter. + +In this example, we observe that the slow open profile creates a motion trail appearance for the motion blur, which might be more desired for the artists. On the other hand, the smooth open and close profile creates smoother animations than the slow open or uniform profiles. diff --git a/com.unity.render-pipelines.high-definition/Documentation~/Images/path_tracing_recording.png b/com.unity.render-pipelines.high-definition/Documentation~/Images/path_tracing_recording.png new file mode 100644 index 00000000000..a96c0a968f1 --- /dev/null +++ b/com.unity.render-pipelines.high-definition/Documentation~/Images/path_tracing_recording.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7dba84aa5b5f551a7079184312690dd4abc4b3efe5dbef26cd4354795dface44 +size 1731166 diff --git a/com.unity.render-pipelines.high-definition/Documentation~/Images/shutter_profiles.png b/com.unity.render-pipelines.high-definition/Documentation~/Images/shutter_profiles.png new file mode 100644 index 00000000000..78b0a6a40a5 --- /dev/null +++ b/com.unity.render-pipelines.high-definition/Documentation~/Images/shutter_profiles.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00deb98061b54c0892adf169781533e635b05c49b498d657013756ad47010a83 +size 1024825 diff --git a/com.unity.render-pipelines.high-definition/Documentation~/TableOfContents.md b/com.unity.render-pipelines.high-definition/Documentation~/TableOfContents.md index 675edd3d4b7..3f0906b4913 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/TableOfContents.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/TableOfContents.md @@ -103,6 +103,7 @@ * [Anti-Aliasing](Anti-Aliasing) * [Alpha Output](Alpha-Output) * [Arbitrary Output Variables](AOVs) + * [Multiframe Rendering and Accumulation](Accumulation) * Post-processing * [Post-processing in HDRP](Post-Processing-Main) * [Effect Execution Order](Post-Processing-Execution-Order) From 6d2b8b630ca428e26bf883d96356a36b5d5b5717 Mon Sep 17 00:00:00 2001 From: Pavlos Mavridis Date: Thu, 23 Apr 2020 16:23:35 +0200 Subject: [PATCH 04/11] minor improvements --- .../Documentation~/Accumulation.md | 42 +++++++++++++++---- 1 file changed, 35 insertions(+), 7 deletions(-) diff --git a/com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md b/com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md index 49b2eccad56..257460d77a1 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md @@ -1,6 +1,6 @@ ## Multiframe Rendering and Accumulation -Some rendering techniques, such as path tracing and accumulation motion blur, create the final "converged" frame by combining information from multiple intermediate sub-frames. Each intermediate sub-frame can correspond to a slightly different point in time, effectively computing physically-based accumulation motion blur, which properly takes into account object rotations, deformations, material or lighting changes, etc. +Some rendering techniques, such as [Path Tracing](Ray-Tracing-Path-Tracing) and accumulation motion blur, create the final "converged" frame by combining information from multiple intermediate sub-frames. Each intermediate sub-frame can correspond to a slightly different point in time, effectively computing physically-based accumulation motion blur, which properly takes into account object rotations, deformations, material or lighting changes, etc. HDRP provides a scripting API that allows you to control the creation of sub-frames and the convergence of multi-frame rendering effects. In particular, the API allows you to control the number of intermediate sub-frames (samples) and the points in time that correspond to each one of them. Furthermore, the weights of each sub-frame are controlled using shutter profiles that describe how fast was the opening and closing motion of the camera's shutter. @@ -12,14 +12,22 @@ The following images shows a rotating object with path tracing and accumulation ## API Overview The recording API is available in the HD Render Pipeline and has only three calls: -- BeginRecording should be called when starting a multi-frame render. -- PrepareNewSubFrame should be called before rendering a new subframe. -- EndRecording which should be called to stop the multi-frame rendering mode. +- **BeginRecording** should be called when starting a multi-frame render. +- **PrepareNewSubFrame** should be called before rendering a new subframe. +- **EndRecording** which should be called to stop the multi-frame rendering mode. -The script below demonstrates how to use these calls. +The only call that takes any parameters is **BeginRecording**. Here is an explenation of the parameters: + +| Parameter | Description | +|-------------------|---------------------------| +| Samples | The number of sub-frames to accumumate. This parameter overrides the number of path tracing samples in the the volume.| +| ShutterInterval | The amount of time the shutter is open between two subsequent frames. Zero denotes instant shutter (no motion blur). One denotes there is no (time) gap between two subsequent frames.| +| ShutterProfile | An animation curve, denoting the shutter position during the shutter interval. Alternatively, the user can also provide the time the shutter was fully open; and when the shutter begins closing. + +The script below demonstrates how to use these API calls. ## Scripting Example -The following example demonstrates how to use the multi-frame rendering API in your scripts to properly record converged animation sequences with path tracing and/or accumulation motion blur. To use it, attach the script to the camera of your scene and select the “Start recording” and “stop recording” actions from the context menu. Setting the Shutter Interval parameter to zero will disable motion blur completely. +The following example demonstrates how to use the multi-frame rendering API in your scripts to properly record converged animation sequences with path tracing and/or accumulation motion blur. To use it, attach the script to the camera of your scene and select the “Start recording” and “stop recording” actions from the context menu. ``` using UnityEngine; @@ -28,9 +36,17 @@ using UnityEngine.Rendering.HighDefinition; public class FrameManager : MonoBehaviour { + // The number of samples used for accumumation. public int samples = 128; + [Range(0.0f, 1.0f)] public float shutterInterval = 1.0f; + + // The time during shutter interval when the shutter is fully open + [Range(0.0f, 1.0f)] public float shutterFullyOpen = 0.25f; + + // The time during shutter interval when the shutter begins closing. + [Range(0.0f, 1.0f)] public float shutterBeginsClosing = 0.75f; bool m_Recording = false; @@ -71,6 +87,12 @@ public class FrameManager : MonoBehaviour ScreenCapture.CaptureScreenshot($"frame_{m_RecordedFrames++}.png"); } } + + void OnValidate() + { + // Make sure the shutter will begin closing sometime after it is fully open (and not before) + shutterBeginsClosing = Mathf.Max(shutterFullyOpen, shutterBeginsClosing); + } } ``` @@ -79,7 +101,13 @@ The BeginRecording call allows you to specify how fast the camera shutter is ope ![](Images/shutter_profiles.png) - In all cases, the speed of the sphere is the same. The only change is the shutter profile. The horizontal axis of the profile diagram corresponds to time, and the vertical axis corresponds to the openning of the shutter. +The first three profiles cqn be eqsilly defined without using an animation vurve by setting the open, close parameters to (0,1), (1,1) and (0.25, 0.75). The last profile requires the use of an animation curve. + In this example, we observe that the slow open profile creates a motion trail appearance for the motion blur, which might be more desired for the artists. On the other hand, the smooth open and close profile creates smoother animations than the slow open or uniform profiles. + +## Limitations +The multi-frame rendering API internally changes the Time.timeScale of the scene. This means that: +- You cannot have different accumulation motion blur parameters per camera. +- Projects that already modify this parameter per frame will not be compatible with this technique. \ No newline at end of file From 9084ac570e0310ae86e2666a785d8cd04da67106 Mon Sep 17 00:00:00 2001 From: Pavlos Mavridis Date: Thu, 23 Apr 2020 17:44:31 +0200 Subject: [PATCH 05/11] Rewrite the AOV script. Now it is stand-alone. --- .../Documentation~/AOVs.md | 112 ++++++++++-------- 1 file changed, 62 insertions(+), 50 deletions(-) diff --git a/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md b/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md index 7be3c818d6c..93a9916da9c 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md @@ -43,62 +43,74 @@ AOVs can also be used to output the contribution from a selected list of lights, Finally, AOVs can also be used to output the results of [custom passes](Custom-Pass). In particular, you can output the cumulative results of all custom passes that are active on every custom pass injection point. This can be useful to output arbitrary information that is computed in custom passes, such as the Object ID of the scene objects. ## Scripting API -Here is a code snippet demonstrating how to setup a simple AOV request for the surface albedo on an HDRP camera. -``` -// member variables: -RenderTexture m_AovRT; // Stores the final AOV output -RTHandle m_TmpRT; // The RTHandle used to render the AOV +When the following example script is attached to an HDRP camera, it will request to output albedo AOVs and will save the resulting frames to disk as a sequence of .png images. ``` +using UnityEngine; +using UnityEngine.Rendering; +using UnityEngine.Rendering.HighDefinition; +using UnityEngine.Rendering.HighDefinition.Attributes; +public class AovRecorder : MonoBehaviour +{ + RTHandle m_TmpRT; // The RTHandle used to render the AOV + Texture2D m_ReadBackTexture; -``` -// runtime code: + int m_Frames = 0; -var hdAdditionalCameraData = camera.GetComponent(); -if (hdAdditionalCameraData != null ) -{ - // first allocate a render texture to store the resulting - if (m_AovRT == null) - m_AovRT = new RenderTexture(camera.pixelWidth, camera.pixelHeight, 0, RenderTextureFormat.ARGBFloat, RenderTextureReadWrite.Default); - - // initialize a new AOV request - var aovRequest = AOVRequest.NewDefault(); - - AOVBuffers[] aovBuffers = null; - CustomPassAOVBuffers[] customPassAovBuffers = null; - - // Request an AOV with the surface albedo - aovRequest.SetFullscreenOutput(MaterialSharedProperty.Albedo); - aovBuffers = new[] { AOVBuffers.Color }; - - // Allocate the RTHandle that will store the intermediate results - var buferAlloc = m_TmpRT ?? (m_TmpRT = RTHandles.Alloc(camera.pixelWidth, camera.pixelHeight)); - - // Add the reuesto to a new AOVRequestBuilder - var aovRequestBuilder = new AOVRequestBuilder(); - aovRequestBuilder.Add(aovRequest, - bufferId => buferAlloc, - null, - aovBuffers, - customPassAovBuffers, - bufferId => buferAlloc, - (cmd, textures, customPassTextures, properties) => + // Start is called before the first frame update + void Start() + { + var camera = gameObject.GetComponent(); + if (camera != null) { - // callback to blit the AOV from the intermediate RTHandle to the final render texture (m_AovRT). - if (textures.Count > 0) + var hdAdditionalCameraData = gameObject.GetComponent(); + if (hdAdditionalCameraData != null) { - cmd.Blit(textures[0], m_AovRT); + // initialize a new AOV request + var aovRequest = AOVRequest.NewDefault(); + + AOVBuffers[] aovBuffers = null; + CustomPassAOVBuffers[] customPassAovBuffers = null; + + // Request an AOV with the surface albedo + aovRequest.SetFullscreenOutput(MaterialSharedProperty.Albedo); + aovBuffers = new[] { AOVBuffers.Color }; + + // Allocate the RTHandle that will store the intermediate results + m_TmpRT = RTHandles.Alloc(camera.pixelWidth, camera.pixelHeight); + + // Add the reuesto to a new AOVRequestBuilder + var aovRequestBuilder = new AOVRequestBuilder(); + aovRequestBuilder.Add(aovRequest, + bufferId => m_TmpRT, + null, + aovBuffers, + customPassAovBuffers, + bufferId => m_TmpRT, + (cmd, textures, customPassTextures, properties) => + { + // callback to read back the AOV data and write them to disk + if (textures.Count > 0) + { + m_ReadBackTexture = m_ReadBackTexture ?? new Texture2D(camera.pixelWidth, camera.pixelHeight, TextureFormat.RGBAFloat, false); + RenderTexture.active = textures[0].rt; + m_ReadBackTexture.ReadPixels(new Rect(0, 0, camera.pixelWidth, camera.pixelHeight), 0, 0, false); + m_ReadBackTexture.Apply(); + RenderTexture.active = null; + byte[] bytes = m_ReadBackTexture.EncodeToPNG(); + System.IO.File.WriteAllBytes($"output_{m_Frames++}.png", bytes); + } + + }); + + // Now build the AOV request + var aovRequestDataCollection = aovRequestBuilder.Build(); + + // And finally set the request to the camera + hdAdditionalCameraData.SetAOVRequests(aovRequestDataCollection); } - else if (customPassTextures.Count > 0) - { - cmd.Blit(customPassTextures[0], m_AovRT); - } - }); - - // Now build the AOV request - var aovRequestDataCollection = aovRequestBuilder.Build(); - - // And finally set the request to the camera - hdAdditionalCameraData.SetAOVRequests(aovRequestDataCollection); + } + } } + ``` \ No newline at end of file From b5cea63822aad3183f92286e7fae17631ef2d9a3 Mon Sep 17 00:00:00 2001 From: Pavlos Mavridis Date: Thu, 23 Apr 2020 17:51:51 +0200 Subject: [PATCH 06/11] Typo --- .../Documentation~/AOVs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md b/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md index 93a9916da9c..9fdc32b4fbf 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md @@ -79,7 +79,7 @@ public class AovRecorder : MonoBehaviour // Allocate the RTHandle that will store the intermediate results m_TmpRT = RTHandles.Alloc(camera.pixelWidth, camera.pixelHeight); - // Add the reuesto to a new AOVRequestBuilder + // Add the request to a new AOVRequestBuilder var aovRequestBuilder = new AOVRequestBuilder(); aovRequestBuilder.Add(aovRequest, bufferId => m_TmpRT, From 35c704058599d82d3f3920d9225b66be3c694a73 Mon Sep 17 00:00:00 2001 From: Pavlos Mavridis Date: Fri, 24 Apr 2020 10:11:43 +0200 Subject: [PATCH 07/11] Add path traced DoF documentation --- .../Documentation~/Images/Path-traced-DoF.png | 3 +++ .../Documentation~/Post-Processing-Depth-of-Field.md | 12 +++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 com.unity.render-pipelines.high-definition/Documentation~/Images/Path-traced-DoF.png diff --git a/com.unity.render-pipelines.high-definition/Documentation~/Images/Path-traced-DoF.png b/com.unity.render-pipelines.high-definition/Documentation~/Images/Path-traced-DoF.png new file mode 100644 index 00000000000..699dff4285c --- /dev/null +++ b/com.unity.render-pipelines.high-definition/Documentation~/Images/Path-traced-DoF.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:464664685962ce66588795cb8d58929811907f5be7cd127dca7ec4824d3d0a97 +size 4711889 diff --git a/com.unity.render-pipelines.high-definition/Documentation~/Post-Processing-Depth-of-Field.md b/com.unity.render-pipelines.high-definition/Documentation~/Post-Processing-Depth-of-Field.md index a1e102757f5..2fe4fce2dd3 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/Post-Processing-Depth-of-Field.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/Post-Processing-Depth-of-Field.md @@ -63,4 +63,14 @@ This example shows how the **Blade Count** and **Curvature** properties affect t * On the left side, there is a five blade iris that is slightly open; producing a pentagonal bokeh. * On the right side, there is a five blade iris that is wide open; producing a circular bokeh. -![](Images/Post-ProcessingDepthofField2.png) \ No newline at end of file +![](Images/Post-ProcessingDepthofField2.png) + +## Path-Traced Depth of Field + +When [Path Tracing](Ray-Tracing-Path-Tracing) is enabled and the Focus Mode in the Depth of Field volume is set to use the physical camera, then depth of field will be computed directly during path tracing and it will not be post processed. + +Path-traced depth of field produces images without any artifacts, apart from noise when using insufficient path-tracing samples. To reduce the noise level you have to increase the number of samples from the [Path Tracing](Ray-Tracing-Path-Tracing) settings and/or de-noise the final frame. + +The path traced Depth of Field is always computed at full resolution and any quality settings from the volume will be ignored. + +![](Images/Path-traced-DoF.png) From ce1f7b931dc9667f3dbda129fb9d5c330fbaf498 Mon Sep 17 00:00:00 2001 From: Lewis Jordan Date: Wed, 13 May 2020 11:14:18 +0100 Subject: [PATCH 08/11] Reviewed AOV doc --- .../Documentation~/AOVs.md | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md b/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md index 9fdc32b4fbf..babe717a8e6 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md @@ -1,49 +1,49 @@ # Arbitrary Output Variables -Arbitrary Output Variables (AOVs) are additional images that an HDRP camera can generate. Typically they are used to output additional information per pixel, which can be used later for compositing or additional image processing (such as denoising). +Arbitrary Output Variables (AOVs) are additional images that an [HDRP Camera](HDRP-Camera.md) can generate. They can output additional information per pixel, which you can use later for compositing or additional image processing (such as denoising). -Here is an example of three AOVs, containing from left to right the Albedo, Normal and Object ID of each pixel: +Here is an example of three AOVs, containing from left to right the Albedo, Normal, and Object ID of each pixel: ![](Images/aov_example.png) -In HDRP you can access and configure AOVs in the following ways: -- Using the [HDRP Compositor Tool](Compositor-Main). -- Using the Unity Recorder and the AOV Recorder Package. -- Using the scripting API to setup a custom AOV request in any HDRP Camera of your scene. +In HDRP, you can access and configure AOVs in the following ways: +- Using the [HDRP Compositor tool](Compositor-Main). +- Using the [Unity Recorder](https://docs.unity3d.com/Packages/com.unity.recorder@latest/index.html) and the [AOV Recorder](https://docs.unity3d.com/Packages/com.unity.aovrecorder@latest/index.html) packages. +- Using the scripting API to set up a custom AOV request in any HDRP Camera in your Scene. -The first two options offer a limited selection of AOVs in their User Interface, while the third option allows much more flexibility on the nature of data that will be outputted. +The first two options offer a limited selection of AOVs in their User Interface, while the third option allows for much more flexibility on what data an HDRP Camera can output. -## Material Property AOVs -Here is a list of material properties that can be outputted using the AOV API. +## Material property AOVs +Here is the list of Material properties that you can access with the AOV API. -| Material Property | Description | +| Material property | Description | |-------------------|---------------------------| -| Normal | Output the surface albedo | -| Albedo | Output the surface normal | -| Smoothness | Output the surface smoothness | -| Ambient Occlusion | Output the ambient occlusion (N/A for AxF) | -| Specular | Output the surface specularity | -| Alpha | Output the surface alpha (pixel coverage) | +| **Normal** | Outputs the surface albedo. | +| **Albedo** | Outputs the surface normal. | +| **Smoothness** | Outputs the surface smoothness. | +| **Ambient Occlusion** | Outputs the ambient occlusion (N/A for AxF). | +| **Specular** | Outputs the surface specularity. | +| **Alpha** | Outputs the surface alpha (pixel coverage). | -## Lighting Selection with AOVs -AOVs can also be used to output the contribution from a selected list of lights, or they can be used to output only specific components of the lighting. +## Lighting selection with AOVs +You can use AOVs to output the contribution from a selected list of [Lights](Light-Component.md), or you can use them to output only specific components of the lighting. -| Lighting Property | Description | +| Lighting property | Description | |-------------------|---------------------------| -| DiffuseOnly | Render only diffuse lighting (direct and indirect) | -| SpecularOnly | Render only specular lighting (direct and indirect) | -| DirectDiffuseOnly | Render only direct diffuse lighting | -| DirectSpecularOnly | Render only direct specular lighting | -| IndirectDiffuseOnly | Render only indirect diffuse lighting | -| ReflectionOnly | Render only reflections | -| RefractionOnly | Render only refractions | -| EmissiveOnly | Render only emissive lighting | +| **DiffuseOnly** | Renders only diffuse lighting (direct and indirect). | +| **SpecularOnly** | Renders only specular lighting (direct and indirect). | +| **DirectDiffuseOnly** | Renders only direct diffuse lighting. | +| **DirectSpecularOnly** | Renders only direct specular lighting. | +| **IndirectDiffuseOnly** | Renders only indirect diffuse lighting. | +| **ReflectionOnly** | Renders only reflections. | +| **RefractionOnly** | Renders only refractions. | +| **EmissiveOnly** | Renders only emissive lighting. | ## Custom Pass AOVs -Finally, AOVs can also be used to output the results of [custom passes](Custom-Pass). In particular, you can output the cumulative results of all custom passes that are active on every custom pass injection point. This can be useful to output arbitrary information that is computed in custom passes, such as the Object ID of the scene objects. +Finally, you can use AOVs to output the results of [custom passes](Custom-Pass.md). In particular, you can output the cumulative results of all custom passes that are active on every custom pass injection point. This can be useful to output arbitrary information that custom passes compute, such as the Object ID of the Scene GameObjects. ## Scripting API -When the following example script is attached to an HDRP camera, it will request to output albedo AOVs and will save the resulting frames to disk as a sequence of .png images. +The following example script outputs albedo AOVs from an HDRP Camera and saves the resulting frames to disk as a sequence of .png images. To use the example script, attach it to an HDRP Camera and enter Play Mode. ``` using UnityEngine; using UnityEngine.Rendering; From 5fd1ec2892399cdc9e6736c85dc79de240fe7fb6 Mon Sep 17 00:00:00 2001 From: Lewis Jordan Date: Wed, 13 May 2020 12:52:25 +0100 Subject: [PATCH 09/11] Reviewed accumulation doc --- .../Documentation~/AOVs.md | 2 +- .../Documentation~/Accumulation.md | 46 +++++++++---------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md b/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md index babe717a8e6..f9486c807a4 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/AOVs.md @@ -42,7 +42,7 @@ You can use AOVs to output the contribution from a selected list of [Lights](Lig ## Custom Pass AOVs Finally, you can use AOVs to output the results of [custom passes](Custom-Pass.md). In particular, you can output the cumulative results of all custom passes that are active on every custom pass injection point. This can be useful to output arbitrary information that custom passes compute, such as the Object ID of the Scene GameObjects. -## Scripting API +## Scripting API example The following example script outputs albedo AOVs from an HDRP Camera and saves the resulting frames to disk as a sequence of .png images. To use the example script, attach it to an HDRP Camera and enter Play Mode. ``` using UnityEngine; diff --git a/com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md b/com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md index 257460d77a1..1960836f6f9 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md @@ -1,33 +1,33 @@ -## Multiframe Rendering and Accumulation +## Multiframe rendering and accumulation -Some rendering techniques, such as [Path Tracing](Ray-Tracing-Path-Tracing) and accumulation motion blur, create the final "converged" frame by combining information from multiple intermediate sub-frames. Each intermediate sub-frame can correspond to a slightly different point in time, effectively computing physically-based accumulation motion blur, which properly takes into account object rotations, deformations, material or lighting changes, etc. +Some rendering techniques, such as [Path Tracing](Ray-Tracing-Path-Tracing.md) and accumulation motion blur, combine information from multiple intermediate sub-frames to create a final "converged" frame. Each intermediate sub-frame can correspond to a slightly different point in time, effectively computing physically-based accumulation motion blur, which properly takes into account object rotations, deformations, material or lighting changes. -HDRP provides a scripting API that allows you to control the creation of sub-frames and the convergence of multi-frame rendering effects. In particular, the API allows you to control the number of intermediate sub-frames (samples) and the points in time that correspond to each one of them. Furthermore, the weights of each sub-frame are controlled using shutter profiles that describe how fast was the opening and closing motion of the camera's shutter. +The High Definition Render Pipeline (HDRP) provides a scripting API that allows you to control the creation of sub-frames and the convergence of multi-frame rendering effects. In particular, the API allows you to control the number of intermediate sub-frames (samples) and the points in time that correspond to each one of them. Furthermore, you can use a shutter profile to control the weights of each sub-frame. A shutter profile describes how fast the physical camera opens and closes its shutter. -This API is particularly useful when recording path traced movies. Normally, when editing a scene, the convergence of path tracing restarts every time the scene changes, to provide artists an interactive editing workflow that allows them to quickly visualize their changes. However such behavior is not desirable during recording. +This API is particularly useful when recording path traced movies. Normally, when editing a Scene, the convergence of path tracing restarts every time the Scene changes, to provide artists an interactive editing workflow that allows them to quickly visualize their changes. However such behavior is not desirable during recording. -The following images shows a rotating object with path tracing and accumulation motion blur, recorded using the multi-frame rendering API. +The following image shows a rotating GameObject with path tracing and accumulation motion blur, recorded using the multi-frame recording API. ![](Images/path_tracing_recording.png) -## API Overview -The recording API is available in the HD Render Pipeline and has only three calls: -- **BeginRecording** should be called when starting a multi-frame render. -- **PrepareNewSubFrame** should be called before rendering a new subframe. -- **EndRecording** which should be called to stop the multi-frame rendering mode. +## API overview +The recording API is available in HDRP and has three calls: +- **BeginRecording**: Call this when you want to start a multi-frame render. +- **PrepareNewSubFrame**: Call this before rendering a new subframe. +- **EndRecording**: Call this when you want to stop the multi-frame render. -The only call that takes any parameters is **BeginRecording**. Here is an explenation of the parameters: +The only call that takes any parameters is **BeginRecording**. Here is an explanation of the parameters: | Parameter | Description | |-------------------|---------------------------| -| Samples | The number of sub-frames to accumumate. This parameter overrides the number of path tracing samples in the the volume.| -| ShutterInterval | The amount of time the shutter is open between two subsequent frames. Zero denotes instant shutter (no motion blur). One denotes there is no (time) gap between two subsequent frames.| -| ShutterProfile | An animation curve, denoting the shutter position during the shutter interval. Alternatively, the user can also provide the time the shutter was fully open; and when the shutter begins closing. +| **Samples** | The number of sub-frames to accumulate. This parameter overrides the number of path tracing samples in the [Volume](Volumes.md). | +| **ShutterInterval** | The amount of time the shutter is open between two subsequent frames. A value of **0** results in an instant shutter (no motion blur). A value of **1** means there is no (time) gap between two subsequent frames. | +| **ShutterProfile** | An animation curve that specifies the shutter position during the shutter interval. Alternatively, you can also provide the time the shutter was fully open; and when the shutter begins closing. | -The script below demonstrates how to use these API calls. +The example script below demonstrates how to use these API calls. -## Scripting Example -The following example demonstrates how to use the multi-frame rendering API in your scripts to properly record converged animation sequences with path tracing and/or accumulation motion blur. To use it, attach the script to the camera of your scene and select the “Start recording” and “stop recording” actions from the context menu. +## Scripting API example +The following example demonstrates how to use the multi-frame rendering API in your scripts to properly record converged animation sequences with path tracing and/or accumulation motion blur. To use it, attach the script to a Camera in your Scene and, in the component's context menu, click the “Start Recording” and “Stop Recording” actions. ``` using UnityEngine; @@ -96,18 +96,18 @@ public class FrameManager : MonoBehaviour } ``` -## Shutter Profiles -The BeginRecording call allows you to specify how fast the camera shutter is opening and closing. The speed of the camera shutter defines the so called “shutter profile”. The following image demonstrates how different shutter profiles affect the appearance of motion blur on a blue sphere moving from left to right. +## Shutter profiles +The **BeginRecording** call allows you to specify how fast the camera shutter opens and closes. The speed of the camera shutter defines the so called “shutter profile”. The following image demonstrates how different shutter profiles affect the appearance of motion blur on a blue sphere moving from left to right. ![](Images/shutter_profiles.png) In all cases, the speed of the sphere is the same. The only change is the shutter profile. The horizontal axis of the profile diagram corresponds to time, and the vertical axis corresponds to the openning of the shutter. -The first three profiles cqn be eqsilly defined without using an animation vurve by setting the open, close parameters to (0,1), (1,1) and (0.25, 0.75). The last profile requires the use of an animation curve. +You can easily define the first three profiles without using an animation curve by setting the open, close parameters to (0,1), (1,1), and (0.25, 0.75) respectively. The last profile requires the use of an animation curve. -In this example, we observe that the slow open profile creates a motion trail appearance for the motion blur, which might be more desired for the artists. On the other hand, the smooth open and close profile creates smoother animations than the slow open or uniform profiles. +In this example, you can see that the slow open profile creates a motion trail appearance for the motion blur, which might be more desired for artists. On the other hand, the smooth open and close profile creates smoother animations than the slow open or uniform profiles. ## Limitations -The multi-frame rendering API internally changes the Time.timeScale of the scene. This means that: +The multi-frame rendering API internally changes the `Time.timeScale` of the Scene. This means that: - You cannot have different accumulation motion blur parameters per camera. -- Projects that already modify this parameter per frame will not be compatible with this technique. \ No newline at end of file +- Projects that already modify this parameter per frame are not be compatible with this feature. \ No newline at end of file From 0b33f08520120c7fb20b9dc72fc69663e386be3a Mon Sep 17 00:00:00 2001 From: Lewis Jordan Date: Wed, 13 May 2020 13:04:23 +0100 Subject: [PATCH 10/11] Reviewed path-traced dof doc --- .../Documentation~/Accumulation.md | 4 ++-- .../Documentation~/Post-Processing-Depth-of-Field.md | 8 ++++---- .../Documentation~/Ray-Tracing-Light-Cluster.md | 2 +- .../Documentation~/Ray-Tracing-Path-Tracing.md | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md b/com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md index 1960836f6f9..1016cef3a5e 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md @@ -1,10 +1,10 @@ ## Multiframe rendering and accumulation -Some rendering techniques, such as [Path Tracing](Ray-Tracing-Path-Tracing.md) and accumulation motion blur, combine information from multiple intermediate sub-frames to create a final "converged" frame. Each intermediate sub-frame can correspond to a slightly different point in time, effectively computing physically-based accumulation motion blur, which properly takes into account object rotations, deformations, material or lighting changes. +Some rendering techniques, such as [path tracing](Ray-Tracing-Path-Tracing.md) and accumulation motion blur, combine information from multiple intermediate sub-frames to create a final "converged" frame. Each intermediate sub-frame can correspond to a slightly different point in time, effectively computing physically-based accumulation motion blur, which properly takes into account object rotations, deformations, material or lighting changes. The High Definition Render Pipeline (HDRP) provides a scripting API that allows you to control the creation of sub-frames and the convergence of multi-frame rendering effects. In particular, the API allows you to control the number of intermediate sub-frames (samples) and the points in time that correspond to each one of them. Furthermore, you can use a shutter profile to control the weights of each sub-frame. A shutter profile describes how fast the physical camera opens and closes its shutter. -This API is particularly useful when recording path traced movies. Normally, when editing a Scene, the convergence of path tracing restarts every time the Scene changes, to provide artists an interactive editing workflow that allows them to quickly visualize their changes. However such behavior is not desirable during recording. +This API is particularly useful when recording path-traced movies. Normally, when editing a Scene, the convergence of path tracing restarts every time the Scene changes, to provide artists an interactive editing workflow that allows them to quickly visualize their changes. However such behavior is not desirable during recording. The following image shows a rotating GameObject with path tracing and accumulation motion blur, recorded using the multi-frame recording API. diff --git a/com.unity.render-pipelines.high-definition/Documentation~/Post-Processing-Depth-of-Field.md b/com.unity.render-pipelines.high-definition/Documentation~/Post-Processing-Depth-of-Field.md index 2fe4fce2dd3..447004a2a5b 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/Post-Processing-Depth-of-Field.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/Post-Processing-Depth-of-Field.md @@ -65,12 +65,12 @@ This example shows how the **Blade Count** and **Curvature** properties affect t ![](Images/Post-ProcessingDepthofField2.png) -## Path-Traced Depth of Field +## Path-traced depth of field -When [Path Tracing](Ray-Tracing-Path-Tracing) is enabled and the Focus Mode in the Depth of Field volume is set to use the physical camera, then depth of field will be computed directly during path tracing and it will not be post processed. +If you enable [path tracing](Ray-Tracing-Path-Tracing) and set **Focus Mode** to **Use Physical Camera**, HDRP computes depth of field directly during path tracing instead of as a post-processing effect. -Path-traced depth of field produces images without any artifacts, apart from noise when using insufficient path-tracing samples. To reduce the noise level you have to increase the number of samples from the [Path Tracing](Ray-Tracing-Path-Tracing) settings and/or de-noise the final frame. +Path-traced depth of field produces images without any artifacts, apart from noise when using insufficient path-tracing samples. To reduce the noise level, increase the number of samples from the [Path Tracing](Ray-Tracing-Path-Tracing) settings and/or de-noise the final frame. -The path traced Depth of Field is always computed at full resolution and any quality settings from the volume will be ignored. +HDRP computes path-traced depth of field at full resolution and ignores any quality settings from the Volume. ![](Images/Path-traced-DoF.png) diff --git a/com.unity.render-pipelines.high-definition/Documentation~/Ray-Tracing-Light-Cluster.md b/com.unity.render-pipelines.high-definition/Documentation~/Ray-Tracing-Light-Cluster.md index f93a6a0f650..dcccec16d01 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/Ray-Tracing-Light-Cluster.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/Ray-Tracing-Light-Cluster.md @@ -1,6 +1,6 @@ # Light Cluster -To compute light bounces for ray-traced effects, such as [Reflections](Ray-Traced-Reflections.html), [Global Illumination](Ray-Traced-Global-Illumination.html), [Recursive Rendering](Ray-Tracing-Recursive-Rendering.html), or Path Tracing. HDRP uses a structure to store the set of [Lights](Light-Component.html) that affect each region. In rasterization, HDRP uses the tile structure for opaque objects and the cluster structure for transparent objects. The main difference between these two structures and this one used for ray tracing is that this one is not based on the Camera frustum. +To compute light bounces for ray-traced effects, such as [Reflections](Ray-Traced-Reflections.html), [Global Illumination](Ray-Traced-Global-Illumination.html), [Recursive Rendering](Ray-Tracing-Recursive-Rendering.html), or path tracing. HDRP uses a structure to store the set of [Lights](Light-Component.html) that affect each region. In rasterization, HDRP uses the tile structure for opaque objects and the cluster structure for transparent objects. The main difference between these two structures and this one used for ray tracing is that this one is not based on the Camera frustum. For ray tracing, HDRP builds an axis-aligned grid which, in each cell, stores the list of Lights to fetch if an intersection occurs in that cell. Use this [Volume Override](Volume-Components.html) to change how HDRP builds this structure. ![](Images/RayTracingLightCluster1.png) diff --git a/com.unity.render-pipelines.high-definition/Documentation~/Ray-Tracing-Path-Tracing.md b/com.unity.render-pipelines.high-definition/Documentation~/Ray-Tracing-Path-Tracing.md index 700289c68e4..473fce82dc1 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/Ray-Tracing-Path-Tracing.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/Ray-Tracing-Path-Tracing.md @@ -22,11 +22,11 @@ Path tracing shares the general requirements and setup as other ray tracing effe ## Add path tracing to your Scene -Path Tracing uses the [Volume](Volumes.html) framework, so to enable this feature, and modify its properties, you must add a Path Tracing override to a [Volume](Volumes.html) in your Scene. To do this: +Path tracing uses the [Volume](Volumes.html) framework, so to enable this feature, and modify its properties, you must add a Path Tracing override to a [Volume](Volumes.html) in your Scene. To do this: 1. In the Scene or Hierarchy view, select a GameObject that contains a Volume component to view it in the Inspector. 2. In the Inspector, select Add Override > Path Tracing. -3. In the Inspector for the Path Tracing Volume Override, check the Enable option. If you do not see the Enable option, make sure your HDRP Project supports ray tracing. For information on setting up ray tracing in HDRP, see [getting started with ray tracing](Ray-Tracing-Getting-Started.html). This switches HDRP to path traced rendering and you should initially see a noisy image that converges towards a clean result. +3. In the Inspector for the Path Tracing Volume Override, check the Enable option. If you do not see the Enable option, make sure your HDRP Project supports ray tracing. For information on setting up ray tracing in HDRP, see [getting started with ray tracing](Ray-Tracing-Getting-Started.html). This switches HDRP to path-traced rendering and you should initially see a noisy image that converges towards a clean result. 4. If the image does not converge over time, select the drop-down next to the effect toggle and enable Animated Materials. ![](Images/RayTracingPathTracing3.png) From 44d6d0839b5bdbbdcd884edb87bacd0d54fdfb23 Mon Sep 17 00:00:00 2001 From: Pavlos Mavridis Date: Mon, 18 May 2020 13:19:11 +0200 Subject: [PATCH 11/11] Add missing OnDestroy in example script --- .../Documentation~/Accumulation.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md b/com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md index 1016cef3a5e..b0c50801f05 100644 --- a/com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md +++ b/com.unity.render-pipelines.high-definition/Documentation~/Accumulation.md @@ -88,6 +88,14 @@ public class FrameManager : MonoBehaviour } } + void OnDestroy() + { + if (m_Recording) + { + StopMultiframeRendering(); + } + } + void OnValidate() { // Make sure the shutter will begin closing sometime after it is fully open (and not before)