diff --git a/.clang-format b/.clang-format index c94a5ff6c..68053b603 100644 --- a/.clang-format +++ b/.clang-format @@ -3,8 +3,10 @@ BasedOnStyle: LLVM AccessModifierOffset: -4 AllowShortBlocksOnASingleLine: Empty AllowShortCaseLabelsOnASingleLine: false +BinPackParameters: false AllowShortFunctionsOnASingleLine: Empty AllowShortIfStatementsOnASingleLine: Never +AllowAllParametersOfDeclarationOnNextLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakTemplateDeclarations: Yes ColumnLimit: 120 diff --git a/documentation/source/development/debugging/cla.rst b/documentation/source/development/debugging/cla.rst index 049654741..773fdf86d 100644 --- a/documentation/source/development/debugging/cla.rst +++ b/documentation/source/development/debugging/cla.rst @@ -25,14 +25,6 @@ You can start vulkan-renderer with the following command line arguments: .. warning:: You should never disable validation layers because they offer extensive error checks for debugging. -.. option:: --no-vk-debug-markers - - Disables `Vulkan debug markers `__ (even if ``--renderdoc`` is specified). - -.. option:: --renderdoc - - Enables the `RenderDoc `__ debug layer. - .. option:: --vsync .. warning:: Vsync is currently not implemented. The command line argument will be ignored. diff --git a/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_1.jpg b/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_1.jpg deleted file mode 100644 index 70f77d051..000000000 Binary files a/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_1.jpg and /dev/null differ diff --git a/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_2.jpg b/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_2.jpg deleted file mode 100644 index c1d63044e..000000000 Binary files a/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_2.jpg and /dev/null differ diff --git a/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_3.jpg b/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_3.jpg deleted file mode 100644 index 99c4f28ba..000000000 Binary files a/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_3.jpg and /dev/null differ diff --git a/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_4.jpg b/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_4.jpg deleted file mode 100644 index 4848e56ca..000000000 Binary files a/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_4.jpg and /dev/null differ diff --git a/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_5.jpg b/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_5.jpg deleted file mode 100644 index 1198b583d..000000000 Binary files a/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_5.jpg and /dev/null differ diff --git a/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_6.jpg b/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_6.jpg deleted file mode 100644 index 2184f8685..000000000 Binary files a/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_6.jpg and /dev/null differ diff --git a/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_7.jpg b/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_7.jpg deleted file mode 100644 index bc182957e..000000000 Binary files a/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_7.jpg and /dev/null differ diff --git a/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_8.jpg b/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_8.jpg deleted file mode 100644 index 611dde4c9..000000000 Binary files a/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_8.jpg and /dev/null differ diff --git a/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_9.jpg b/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_9.jpg deleted file mode 100644 index 0783b1b7b..000000000 Binary files a/documentation/source/development/debugging/images/renderdoc/RenderDoc_step_9.jpg and /dev/null differ diff --git a/documentation/source/development/debugging/images/renderdoc/VisualStudioBreakpoint.jpg b/documentation/source/development/debugging/images/renderdoc/VisualStudioBreakpoint.jpg deleted file mode 100644 index 16a56f19f..000000000 Binary files a/documentation/source/development/debugging/images/renderdoc/VisualStudioBreakpoint.jpg and /dev/null differ diff --git a/documentation/source/development/debugging/images/renderdoc/VisualStudioDebugging.jpg b/documentation/source/development/debugging/images/renderdoc/VisualStudioDebugging.jpg deleted file mode 100644 index a4f83a4a7..000000000 Binary files a/documentation/source/development/debugging/images/renderdoc/VisualStudioDebugging.jpg and /dev/null differ diff --git a/documentation/source/development/debugging/renderdoc.rst b/documentation/source/development/debugging/renderdoc.rst index 7d8835854..0fbaf0fd5 100644 --- a/documentation/source/development/debugging/renderdoc.rst +++ b/documentation/source/development/debugging/renderdoc.rst @@ -2,99 +2,5 @@ RenderDoc ========= - `RenderDoc `__ is a free and open source graphics debugger for Vulkan API (and other APIs) developed by `Baldur Karlsson `__. -- It is a very powerful graphics debugging and visualization tool which makes debugging Vulkan application as easy as possible. -- Inexor has full RenderDoc integration. This includes `internal resource naming using Vulkan debug markers `__. -- The following tutorial shows how to debug Inexor using RenderDoc. +- It is a very powerful graphics debugging and visualization tool which simplifies debugging Vulkan application a lot. - You can read up more details in `RenderDoc's documentation `__. - -RenderDoc Tutorial for Windows ------------------------------- - -Step 1: Open Inexor in Visual Studio and add a breakpoint before Vulkan initialization -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- The best spot would be right after ``main()``: - -.. image:: /development/debugging/images/renderdoc/VisualStudioBreakpoint.jpg - :width: 800 - :alt: A breakpoint after the main function in Visual Studio debugger. - -Step 2: Open RenderDoc. -^^^^^^^^^^^^^^^^^^^^^^^ - -.. image:: /development/debugging/images/renderdoc/RenderDoc_step_1.jpg - :width: 800 - :alt: RenderDoc right after starting it. - -Step 3: Start debugging inexor-vulkan-renderer and halt at the breakpoint -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. image:: /development/debugging/images/renderdoc/VisualStudioDebugging.jpg - :width: 800 - :alt: Visual Studio interrupts the program because of a breakpoint. - -Step 4: "Inject into process" inexor-vulkan-renderer.exe using RenderDoc -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. image:: /development/debugging/images/renderdoc/RenderDoc_step_2.jpg - :width: 800 - :alt: "Inject into process" in RenderDoc's menu. - -Step 5: Search for "inexor-vulkan-renderer.exe" and click "inject" -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- You will see a warning Windows Firewall the first time you do this. -- This is because RenderDoc is reading memory from inexor-vulkan-renderer. -- Accept the Windows Firewall warning to allow RenderDoc to read memory. - -.. image:: /development/debugging/images/renderdoc/RenderDoc_step_3.jpg - :width: 800 - :alt: Injecting into inexor-vulkan-renderer. - -Step 6: Continue debugging in Visual Studio -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- RenderDoc should now look like this. - -.. image:: /development/debugging/images/renderdoc/RenderDoc_step_4.jpg - :width: 800 - :alt: Injecting into inexor-vulkan-renderer. - -- Press ``F5`` to continue program execution from the breakpoint. -- RenderDoc is now connected to inexor-vulkan-renderer: - -.. image:: /development/debugging/images/renderdoc/RenderDoc_step_5.jpg - :width: 800 - :alt: RenderDoc is connected inexor-vulkan-renderer. - -- You can see RenderDoc's overlay in inexor-vulkan-renderer.exe: - -.. image:: /development/debugging/images/renderdoc/RenderDoc_step_6.jpg - :width: 800 - :alt: Taking a RenderDoc snapshot. - -Step 7: Debug inexor-vulkan-renderer.exe as usual and press F12 to take RenderDoc snapshots -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- You can take multiple snapshots with either ``PRINT`` or ``F12`` key. - -.. image:: /development/debugging/images/renderdoc/RenderDoc_step_7.jpg - :width: 800 - :alt: Taking a RenderDoc snapshot. - -- You can see the snapshots in RenderDoc right after you took them: - -.. image:: /development/debugging/images/renderdoc/RenderDoc_step_8.jpg - :width: 800 - :alt: Taking a RenderDoc snapshot. - -Step 8: Open a snapshot to analyze the rendering of this frame -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -- Double click on a snapshot to open it: - -.. image:: /development/debugging/images/renderdoc/RenderDoc_step_9.jpg - :width: 800 - :alt: Taking a RenderDoc snapshot. - -- Have fun inspecting! diff --git a/documentation/source/development/getting-started.rst b/documentation/source/development/getting-started.rst index 09c9effbc..9a5754685 100644 --- a/documentation/source/development/getting-started.rst +++ b/documentation/source/development/getting-started.rst @@ -37,7 +37,7 @@ Optional Software Improve your build times with ninja. `RenderDoc `__ - Powerful open source graphics debugger. Inexor has full RenderDoc integration. + A very powerful open source graphics debugger. `Doxygen `__ Required for generating the documentation. diff --git a/example-app/include/base/example_app_base.hpp b/example-app/include/base/example_app_base.hpp new file mode 100644 index 000000000..e69de29bb diff --git a/example-app/include/example_app.hpp b/example-app/include/example_app.hpp new file mode 100644 index 000000000..e69de29bb diff --git a/example-app/src/base/example_app_base.cpp b/example-app/src/base/example_app_base.cpp new file mode 100644 index 000000000..e69de29bb diff --git a/example-app/src/example_app.cpp b/example-app/src/example_app.cpp new file mode 100644 index 000000000..e69de29bb diff --git a/example/main.cpp b/example/main.cpp index f85737397..e167c9b1a 100644 --- a/example/main.cpp +++ b/example/main.cpp @@ -1,33 +1,12 @@ #include "inexor/vulkan-renderer/application.hpp" -#include -#include -#include -#include - #include int main(int argc, char *argv[]) { - spdlog::init_thread_pool(8192, 2); - - auto console_sink = std::make_shared(); - auto file_sink = std::make_shared("vulkan-renderer.log", true); - auto vulkan_renderer_log = - std::make_shared("vulkan-renderer", spdlog::sinks_init_list{console_sink, file_sink}, - spdlog::thread_pool(), spdlog::async_overflow_policy::block); - vulkan_renderer_log->set_level(spdlog::level::trace); - vulkan_renderer_log->set_pattern("%Y-%m-%d %T.%f %^%l%$ %5t [%-10n] %v"); - vulkan_renderer_log->flush_on(spdlog::level::debug); // TODO: as long as we don't have a flush on crash - - spdlog::set_default_logger(vulkan_renderer_log); - - spdlog::trace("Inexor vulkan-renderer, BUILD " + std::string(__DATE__) + ", " + __TIME__); - spdlog::trace("Parsing command line arguments"); - - std::unique_ptr renderer; - try { - renderer = std::make_unique(argc, argv); + using inexor::vulkan_renderer::Application; + std::unique_ptr renderer = std::make_unique(argc, argv); + renderer->run(); } catch (const std::runtime_error &exception) { spdlog::critical(exception.what()); return 1; @@ -35,7 +14,5 @@ int main(int argc, char *argv[]) { spdlog::critical(exception.what()); return 1; } - - renderer->run(); - spdlog::trace("Window closed"); + return 0; } diff --git a/include/inexor/vulkan-renderer/application.hpp b/include/inexor/vulkan-renderer/application.hpp index 9542318e6..be7d4eb0d 100644 --- a/include/inexor/vulkan-renderer/application.hpp +++ b/include/inexor/vulkan-renderer/application.hpp @@ -1,9 +1,18 @@ #pragma once +#include "inexor/vulkan-renderer/camera.hpp" +#include "inexor/vulkan-renderer/fps_counter.hpp" #include "inexor/vulkan-renderer/input/keyboard_mouse_data.hpp" -#include "inexor/vulkan-renderer/renderer.hpp" +#include "inexor/vulkan-renderer/octree_gpu_vertex.hpp" +#include "inexor/vulkan-renderer/render-graph/render_graph.hpp" +#include "inexor/vulkan-renderer/renderers/imgui.hpp" +#include "inexor/vulkan-renderer/time_step.hpp" #include "inexor/vulkan-renderer/world/collision_query.hpp" #include "inexor/vulkan-renderer/world/cube.hpp" +#include "inexor/vulkan-renderer/wrapper/instance.hpp" +#include "inexor/vulkan-renderer/wrapper/surface.hpp" +#include "inexor/vulkan-renderer/wrapper/swapchain.hpp" +#include "inexor/vulkan-renderer/wrapper/window.hpp" // Forward declarations namespace inexor::vulkan_renderer::input { @@ -12,16 +21,73 @@ class KeyboardMouseInputData; namespace inexor::vulkan_renderer { -class Application : public VulkanRenderer { - std::vector m_vertex_shader_files; - std::vector m_fragment_shader_files; - std::vector m_texture_files; - std::vector m_gltf_model_files; +// Using declarations +using input::KeyboardMouseInputData; +using wrapper::Device; +using wrapper::Instance; +using wrapper::Surface; +using wrapper::Swapchain; +using wrapper::Window; + +class Application { +private: + TimeStep m_stopwatch; + FPSCounter m_fps_counter; + bool m_vsync_enabled{false}; + + PFN_vkDebugUtilsMessengerCallbackEXT m_debug_callbacks{VK_NULL_HANDLE}; + + bool m_debug_report_callback_initialised{false}; + + std::unique_ptr m_camera; + std::unique_ptr m_window; + std::unique_ptr m_instance; + std::unique_ptr m_device; + std::shared_ptr m_swapchain; + std::shared_ptr m_surface; + std::unique_ptr m_imgui_overlay; + + std::vector m_octree_vertices; + std::vector m_octree_indices; + + std::shared_ptr m_render_graph; + std::weak_ptr m_color_attachment; + std::weak_ptr m_depth_attachment; + std::weak_ptr m_index_buffer; + std::weak_ptr m_vertex_buffer; + std::weak_ptr m_uniform_buffer; + std::shared_ptr m_octree_vert; + std::shared_ptr m_octree_frag; + + VkDescriptorSetLayout m_descriptor_set_layout{VK_NULL_HANDLE}; + VkDescriptorSet m_descriptor_set{VK_NULL_HANDLE}; + + std::shared_ptr m_octree_pipeline; + std::weak_ptr m_octree_pass; + struct ModelViewPerspectiveMatrices { + glm::mat4 model{1.0f}; + glm::mat4 view{1.0f}; + glm::mat4 proj{1.0f}; + } m_mvp_matrices; + + void setup_render_graph(); + void generate_octree_indices(); + void recreate_swapchain(); + void render_frame(); + + float m_time_passed{0.0f}; + + std::uint32_t m_wnd_width{0}; + std::uint32_t m_wnd_height{0}; + std::string m_wnd_title; + bool m_wnd_resized{false}; + wrapper::Window::Mode m_wnd_mode{wrapper::Window::Mode::WINDOWED}; + + std::vector m_gltf_model_files; std::unique_ptr m_input_data; bool m_enable_validation_layers{true}; - /// Inexor engine supports a variable number of octrees. std::vector> m_worlds; // If the user specified command line argument "--stop-on-validation-message", the program will call @@ -32,20 +98,32 @@ class Application : public VulkanRenderer { /// @brief file_name The TOML configuration file. /// @note It was collectively decided not to use JSON for configuration files. void load_toml_configuration_file(const std::string &file_name); - void load_textures(); - void load_shaders(); + + void check_octree_collisions(); + void initialize_spdlog(); + /// @param initialize Initialize worlds with a fixed seed, which is useful for benchmarking and testing void load_octree_geometry(bool initialize); - void setup_vulkan_debug_callback(); + + void process_keyboard_input(); + void process_mouse_input(); void setup_window_and_input_callbacks(); void update_imgui_overlay(); - void update_uniform_buffers(); - /// Use the camera's position and view direction vector to check for ray-octree collisions with all octrees. - void check_octree_collisions(); - void process_mouse_input(); public: Application(int argc, char **argv); + Application(const Application &) = delete; + Application(Application &&) = delete; + ~Application(); + + Application &operator=(const Application &) = delete; + Application &operator=(Application &&) = delete; + + /// @brief Call glfwSetCursorPosCallback. + /// @param window The window that received the event. + /// @param x_pos The new x-coordinate, in screen coordinates, of the cursor. + /// @param y_pos The new y-coordinate, in screen coordinates, of the cursor. + void cursor_position_callback(GLFWwindow *window, double x_pos, double y_pos); /// @brief Call glfwSetKeyCallback. /// @param window The window that received the event. @@ -55,12 +133,6 @@ class Application : public VulkanRenderer { /// @param mods Bit field describing which modifier keys were held down. void key_callback(GLFWwindow *window, int key, int scancode, int action, int mods); - /// @brief Call glfwSetCursorPosCallback. - /// @param window The window that received the event. - /// @param x_pos The new x-coordinate, in screen coordinates, of the cursor. - /// @param y_pos The new y-coordinate, in screen coordinates, of the cursor. - void cursor_position_callback(GLFWwindow *window, double x_pos, double y_pos); - /// @brief Call glfwSetMouseButtonCallback. /// @param window The window that received the event. /// @param button The mouse button that was pressed or released. diff --git a/include/inexor/vulkan-renderer/exception.hpp b/include/inexor/vulkan-renderer/exception.hpp index b92d917e9..d43361843 100644 --- a/include/inexor/vulkan-renderer/exception.hpp +++ b/include/inexor/vulkan-renderer/exception.hpp @@ -7,16 +7,17 @@ namespace inexor::vulkan_renderer { -/// @brief A custom base class for exceptions +/// A custom base class for exceptions class InexorException : public std::runtime_error { public: // No need to define own constructors. using std::runtime_error::runtime_error; }; -/// @brief InexorException for Vulkan specific things. +/// InexorException for Vulkan specific things. class VulkanException final : public InexorException { public: + /// Default constructor /// @param message The exception message. /// @param result The VkResult value of the Vulkan API call which failed. VulkanException(std::string message, VkResult result); diff --git a/include/inexor/vulkan-renderer/imgui.hpp b/include/inexor/vulkan-renderer/imgui.hpp deleted file mode 100644 index 0ec531814..000000000 --- a/include/inexor/vulkan-renderer/imgui.hpp +++ /dev/null @@ -1,66 +0,0 @@ -#pragma once - -#include "inexor/vulkan-renderer/render_graph.hpp" -#include "inexor/vulkan-renderer/wrapper/descriptor.hpp" -#include "inexor/vulkan-renderer/wrapper/gpu_texture.hpp" -#include "inexor/vulkan-renderer/wrapper/shader.hpp" - -#include -#include -#include - -#include -#include - -// Forward declarations -namespace inexor::vulkan_renderer::wrapper { -class Device; -class Swapchain; -} // namespace inexor::vulkan_renderer::wrapper - -namespace inexor::vulkan_renderer { - -class ImGUIOverlay { - const wrapper::Device &m_device; - const wrapper::Swapchain &m_swapchain; - float m_scale{1.0f}; - - BufferResource *m_index_buffer{nullptr}; - BufferResource *m_vertex_buffer{nullptr}; - GraphicsStage *m_stage{nullptr}; - - std::unique_ptr m_imgui_texture; - std::unique_ptr m_vertex_shader; - std::unique_ptr m_fragment_shader; - std::unique_ptr m_descriptor; - std::vector m_index_data; - std::vector m_vertex_data; - - struct PushConstBlock { - glm::vec2 scale; - glm::vec2 translate; - } m_push_const_block{}; - -public: - /// @brief Construct a new ImGUI overlay. - /// @param device A reference to the device wrapper - /// @param swapchain A reference to the swapchain - /// @param render_graph A pointer to the render graph - /// @param back_buffer A pointer to the target of the ImGUI rendering - ImGUIOverlay(const wrapper::Device &device, const wrapper::Swapchain &swapchain, RenderGraph *render_graph, - TextureResource *back_buffer); - ImGUIOverlay(const ImGUIOverlay &) = delete; - ImGUIOverlay(ImGUIOverlay &&) = delete; - ~ImGUIOverlay(); - - ImGUIOverlay &operator=(const ImGUIOverlay &) = delete; - ImGUIOverlay &operator=(ImGUIOverlay &&) = delete; - - void update(); - - [[nodiscard]] float scale() const { - return m_scale; - } -}; - -} // namespace inexor::vulkan_renderer diff --git a/include/inexor/vulkan-renderer/io/exception.hpp b/include/inexor/vulkan-renderer/io/io_exception.hpp similarity index 100% rename from include/inexor/vulkan-renderer/io/exception.hpp rename to include/inexor/vulkan-renderer/io/io_exception.hpp diff --git a/include/inexor/vulkan-renderer/msaa_target.hpp b/include/inexor/vulkan-renderer/msaa_target.hpp deleted file mode 100644 index d6884b43f..000000000 --- a/include/inexor/vulkan-renderer/msaa_target.hpp +++ /dev/null @@ -1,17 +0,0 @@ -#pragma once - -#include "inexor/vulkan-renderer/wrapper/image.hpp" - -#include - -namespace inexor::vulkan_renderer { - -struct MSAATarget { - // The color buffer. - std::unique_ptr m_color; - - // The depth buffer. - std::unique_ptr m_depth; -}; - -} // namespace inexor::vulkan_renderer diff --git a/include/inexor/vulkan-renderer/render-graph/buffer.hpp b/include/inexor/vulkan-renderer/render-graph/buffer.hpp new file mode 100644 index 000000000..677344e06 --- /dev/null +++ b/include/inexor/vulkan-renderer/render-graph/buffer.hpp @@ -0,0 +1,159 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include + +namespace inexor::vulkan_renderer::wrapper { +/// Forward declaration +class Device; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::wrapper::commands { +/// Forward declaration +class CommandBuffer; +} // namespace inexor::vulkan_renderer::wrapper::commands + +namespace inexor::vulkan_renderer::wrapper::descriptors { +/// Forward declaration +class WriteDescriptorSetBuilder; +} // namespace inexor::vulkan_renderer::wrapper::descriptors + +namespace inexor::vulkan_renderer::render_graph { + +/// The buffer type describes the internal usage of the buffer resource inside of the rendergraph +enum class BufferType { + VERTEX_BUFFER, + INDEX_BUFFER, + UNIFORM_BUFFER, +}; + +// Forward declaration +class GraphicsPass; + +// Using declarations +using wrapper::Device; +using wrapper::commands::CommandBuffer; +using wrapper::descriptors::WriteDescriptorSetBuilder; + +// TODO: Store const reference to rendergraph and retrieve the swapchain image index for automatic buffer tripling + +/// RAII wrapper for buffer resources inside of the rendergraph +/// A buffer resource can be a vertex buffer, index buffer, or uniform buffer +class Buffer { + // + friend class RenderGraph; + friend class GraphicsPass; + friend class CommandBuffer; + friend class WriteDescriptorSetBuilder; + +private: + /// The device wrapper + const Device &m_device; + /// The internal debug name of the buffer resource + std::string m_name; + + /// The buffer type will be set depending on which constructor of the Buffer wrapper is called by rendergraph. The + /// engine currently supports three different types of buffers in the Buffer wrapper class: vertex buffers, index + /// buffers, and uniform buffers. The instances of the Buffer wrapper class are managed by rendergraph only. One + /// solution to deal with the different buffer types would be to use a BufferBase class and to make three distinct + /// classes VertexBuffer, IndexBuffer, and UniformBuffer. However, we aimed for simplicity and wanted to avoid + /// polymorphism in the rendergraph for performance reasons. We also refrained from using templates for this use + /// case. Therefore, we have chosen to use only one Buffer wrapper class which contains members for all three + /// different buffer types. The type of the buffer will be set depending on which Buffer constructor is called by + /// rendergraph. The actual memory management for the buffers is done by Vulkan Memory Allocator (VMA) internally. + BufferType m_buffer_type; + + /// The buffer update function which is called by rendergraph to update the buffer's data. This update function is + /// called, no matter what the type of the buffer is. With the currently supported buffer types (vertex-, index-, + /// and uniform buffers) there is always a discussion about whether some update lambdas can be made std::optional. + /// For example we could have one vertex buffer with an index buffer and the index buffer is updated together with + /// the vertex buffer in the update function of the vertex buffer. From the design of the engine there is no + /// limitation which buffer is updated in which update function, as long as the handle to that buffer has been + /// created in rendergraph. In our example, the update function of the index buffer could be std::nullopt. In this + /// case, rendergraph could separate all buffers into those which require an update and those who do not. For + /// simplicity however, we made the update function not std::optional. + + // TODO: Rewrite description + std::function m_on_check_for_update; + + /// TODO: Is this is relevant for uniform buffers only? + /// TODO: Maybe buffer updates should be done immediately, and no m_src_data should be stored! + /// It's the responsibility of the programmer to make sure the data m_src_data points to is still valid when + /// update_buffer() is called! + void *m_src_data{nullptr}; + std::size_t m_src_data_size{0}; + bool m_update_requested{false}; + + /// The resources for actual memory management of the buffer + VkBuffer m_buffer{VK_NULL_HANDLE}; + VmaAllocation m_alloc{VK_NULL_HANDLE}; + VmaAllocationInfo m_alloc_info{}; + + /// The descriptor buffer info (required for uniform buffers) + VkDescriptorBufferInfo m_descriptor_buffer_info{}; + + /// The staging buffer (if required) + VkBuffer m_staging_buffer{VK_NULL_HANDLE}; + VmaAllocation m_staging_buffer_alloc{VK_NULL_HANDLE}; + VmaAllocationInfo m_staging_buffer_alloc_info{}; + + /// Create the buffer using Vulkan Memory Allocator (VMA) library + /// @param cmd_buf The command buffer + void create(const CommandBuffer &cmd_buf); + + /// Call destroy_buffer and destroy_staging_buffer + void destroy_all(); + + /// Call vmaDestroyBuffer for the actual buffer + void destroy_buffer(); + + /// Call vmaDestroyBuffer for the staging bufffer + void destroy_staging_buffer(); + +public: + /// Default constructor + /// @param device The device wrapper + /// @param buffer_name The name of the buffer + /// @param buffer_type The type of the buffer + /// @param on_update The buffer update function + Buffer(const Device &device, std::string buffer_name, BufferType buffer_type, std::function on_update); + + Buffer(const Buffer &) = delete; + Buffer(Buffer &&other) noexcept; + + /// Call destroy_buffer + ~Buffer(); + + Buffer &operator=(const Buffer &) = delete; + Buffer &operator=(Buffer &&) = delete; + + /// Request a buffer update + /// @param src_data A pointer to the data to copy the updated data from + /// @warning It is the responsibility of the programmer to make sure src_data still points to valid memory when + /// update_buffer() is called! + /// @param src_data_size The size of the data to copy + void request_update(void *src_data, std::size_t src_data_size); + + /// Request a buffer update + /// @tparam BufferDataType + /// @param data + template + void request_update(BufferDataType &data) { + return request_update(std::addressof(data), sizeof(data)); + } + + /// Request a buffer update + /// @tparam BufferDataType + /// @param data + template + void request_update(std::vector &data) { + return request_update(data.data(), sizeof(BufferDataType) * data.size()); + } +}; + +} // namespace inexor::vulkan_renderer::render_graph diff --git a/include/inexor/vulkan-renderer/render-graph/graphics_pass.hpp b/include/inexor/vulkan-renderer/render-graph/graphics_pass.hpp new file mode 100644 index 000000000..5aa64bbe7 --- /dev/null +++ b/include/inexor/vulkan-renderer/render-graph/graphics_pass.hpp @@ -0,0 +1,108 @@ +#pragma once + +#include + +#include "inexor/vulkan-renderer/render-graph/buffer.hpp" +#include "inexor/vulkan-renderer/render-graph/texture.hpp" +#include "inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_layout.hpp" +#include "inexor/vulkan-renderer/wrapper/device.hpp" +#include "inexor/vulkan-renderer/wrapper/swapchain.hpp" + +#include +#include +#include +#include +#include + +namespace inexor::vulkan_renderer::wrapper::commands { +// Forward declaration +class CommandBuffer; +} // namespace inexor::vulkan_renderer::wrapper::commands + +namespace inexor::vulkan_renderer::render_graph { + +// Forward declaration +class RenderGraph; + +// Using declarations +using wrapper::Swapchain; +using wrapper::descriptors::DescriptorSetLayout; + +/// Using declaration +using OnRecordCommandBufferForPass = std::function; + +/// A wrapper for graphics passes inside of rendergraph +class GraphicsPass { + friend class RenderGraph; + +private: + /// The name of the graphics pass + std::string m_name; + + /// The command buffer recording function of the graphics pass + OnRecordCommandBufferForPass m_on_record_cmd_buffer{[](auto &) {}}; + + /// The descriptor set layout of the pass (this will be created by rendergraph) + std::unique_ptr m_descriptor_set_layout; + /// The descriptor set of the pass (this will be created by rendergraph) + VkDescriptorSet m_descriptor_set{VK_NULL_HANDLE}; + + /// The color of the debug label region (visible in graphics debuggers like RenderDoc) + std::array m_debug_label_color; + + /// The extent + VkExtent2D m_extent{0, 0}; + /// The graphics passes this pass reads from + std::vector> m_graphics_pass_reads; + /// A weak pointer to the next graphics pass + std::weak_ptr m_next_pass{}; + + /// The texture attachments of this pass (unified means color, depth, stencil attachment or a swapchain) + std::vector, std::optional>> m_write_attachments{}; + /// The swapchains this graphics pass writes to + std::vector, std::optional>> m_write_swapchains{}; + + // All the data below will be filled and used by rendergraph only + + /// The rendering info will be filled during rendergraph compilation so we don't have to do this while rendering. + /// This means we must make sure that the memory of the attachment infos below is still valid during rendering, + /// which is why we store them as members here. + VkRenderingInfo m_rendering_info{}; + /// The color attachments inside of m_rendering_info + std::vector m_color_attachments{}; + /// Does this graphics pass have any depth attachment? + bool m_has_depth_attachment{false}; + /// The depth attachment inside of m_rendering_info + VkRenderingAttachmentInfo m_depth_attachment{}; + /// Does this graphics pass have any stencil attachment? + bool m_has_stencil_attachment{false}; + /// The stencil attachment inside of m_rendering_info + VkRenderingAttachmentInfo m_stencil_attachment{}; + + /// Reset the rendering info + void reset_rendering_info(); + +public: + /// Default constructor + /// @param name The name of the graphics pass + /// @param on_record_cmd_buffer The command buffer recording function of the graphics pass + /// @param graphics_pass_reads The graphics passes this graphics pass reads from + /// @param write_attachments The attachment this graphics pass writes to + /// @param write_swapchains The swapchains this graphics pass writes to + /// @param pass_debug_label_color The debug label of the pass (visible in graphics debuggers like RenderDoc) + GraphicsPass(std::string name, + OnRecordCommandBufferForPass on_record_cmd_buffer, + std::vector> graphics_pass_reads, + std::vector, std::optional>> write_attachments, + std::vector, std::optional>> write_swapchains, + wrapper::DebugLabelColor pass_debug_label_color); + + GraphicsPass(const GraphicsPass &) = delete; + GraphicsPass(GraphicsPass &&other) noexcept; + ~GraphicsPass() = default; + + GraphicsPass &operator=(const GraphicsPass &) = delete; + GraphicsPass &operator=(GraphicsPass &&) = delete; +}; + +} // namespace inexor::vulkan_renderer::render_graph diff --git a/include/inexor/vulkan-renderer/render-graph/graphics_pass_builder.hpp b/include/inexor/vulkan-renderer/render-graph/graphics_pass_builder.hpp new file mode 100644 index 000000000..dced82bb0 --- /dev/null +++ b/include/inexor/vulkan-renderer/render-graph/graphics_pass_builder.hpp @@ -0,0 +1,84 @@ +#pragma once + +#include "inexor/vulkan-renderer/render-graph/buffer.hpp" +#include "inexor/vulkan-renderer/render-graph/graphics_pass.hpp" +#include "inexor/vulkan-renderer/render-graph/texture.hpp" +#include "inexor/vulkan-renderer/wrapper/make_info.hpp" +#include "inexor/vulkan-renderer/wrapper/swapchain.hpp" + +#include +#include +#include +#include + +namespace inexor::vulkan_renderer::wrapper::commands { +// Forward declaration +class CommandBuffer; +} // namespace inexor::vulkan_renderer::wrapper::commands + +namespace inexor::vulkan_renderer::render_graph { + +// Using declaration +using wrapper::DebugLabelColor; +using wrapper::Swapchain; +using wrapper::commands::CommandBuffer; + +/// A builder class for graphics passes in the rendergraph +class GraphicsPassBuilder { +private: + /// Add members which describe data related to graphics passes here + OnRecordCommandBufferForPass m_on_record_cmd_buffer{}; + /// The graphics passes which are read by this graphics pass + std::vector> m_graphics_pass_reads{}; + /// The texture resources this graphics pass writes to + std::vector, std::optional>> m_write_attachments{}; + /// The swapchain this graphics pass writes to + std::vector, std::optional>> m_write_swapchains{}; + + /// Reset the data of the graphics pass builder + void reset(); + +public: + GraphicsPassBuilder(); + GraphicsPassBuilder(const GraphicsPassBuilder &) = delete; + GraphicsPassBuilder(GraphicsPassBuilder &&) noexcept; + ~GraphicsPassBuilder() = default; + + GraphicsPassBuilder &operator=(const GraphicsPassBuilder &) = delete; + GraphicsPassBuilder &operator=(GraphicsPassBuilder &&) = delete; + + /// Build the graphics pass + /// @param name The name of the graphics pass + /// @param color The debug label color (debug labels are specified per pass and are visible in RenderDoc debugger) + /// @return The graphics pass that was just created + [[nodiscard]] std::shared_ptr build(std::string name, DebugLabelColor color); + + /// Specify that this graphics pass A reads from another graphics pass B (if the weak_ptr to B is not expired), + /// meaning B should be rendered before A. It is perfect valid for 'graphics_pass' to be an invalid pointer, in + /// which case the read is not added. + /// @param condition The condition under which the pass is read from + /// @param graphics_pass The graphics pass (can be an invalid pointer) + /// @return A const reference to the this pointer (allowing method calls to be chained) + [[nodiscard]] GraphicsPassBuilder &conditionally_reads_from(std::weak_ptr graphics_pass, + bool condition); + + /// Specify that this graphics pass A reads from another graphics pass B, meaning B should be rendered before A + /// @param graphics_pass The graphics pass which is read by this graphics pass + /// @return A const reference to the this pointer (allowing method calls to be chained) + [[nodiscard]] GraphicsPassBuilder &reads_from(std::weak_ptr graphics_pass); + + /// Set the function which will be called when the command buffer for rendering of the pass is being recorded + /// @param on_record_cmd_buffer The command buffer recording function + /// @return A const reference to the this pointer (allowing method calls to be chained) + [[nodiscard]] GraphicsPassBuilder &set_on_record(OnRecordCommandBufferForPass on_record_cmd_buffer); + + /// Specify that this graphics pass writes to an either a std::weak_ptr or a std::weak_ptr + /// @param attachment The attachment (either a std::weak_ptr or a std::weak_ptr) + /// @param clear_value The optional clear value of the attachment (``std::nullopt`` by default) + /// @return A const reference to the this pointer (allowing method calls to be chained) + [[nodiscard]] GraphicsPassBuilder & + writes_to(std::variant, std::weak_ptr> write_attachment, + std::optional clear_value = std::nullopt); +}; + +} // namespace inexor::vulkan_renderer::render_graph diff --git a/include/inexor/vulkan-renderer/render-graph/image.hpp b/include/inexor/vulkan-renderer/render-graph/image.hpp new file mode 100644 index 000000000..3126a5adb --- /dev/null +++ b/include/inexor/vulkan-renderer/render-graph/image.hpp @@ -0,0 +1,89 @@ +#pragma once + +#include + +#include +#include + +namespace inexor::vulkan_renderer::wrapper { +// Forward declaration +class Device; +class Sampler; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::wrapper::commands { +// Forward declaration +class CommandBuffer; +} // namespace inexor::vulkan_renderer::wrapper::commands + +namespace inexor::vulkan_renderer::render_graph { + +// Forward declarations +class RenderGraph; +class Texture; + +// Using declaration +using render_graph::RenderGraph; +using render_graph::Texture; +using wrapper::Device; +using wrapper::Sampler; +using wrapper::commands::CommandBuffer; + +// NOTE: Originally we did not want to have a RAII wrapper for VkImage and VkImageView and put this into the Texture +// wrapper directly, but since the Texture wrapper contains 2 Images depending on whether MSAA is enabled or not, we +// have chosen to use the Image RAII wrapper. + +// TODO: Move this to wrapper/ folder again (it is not really part of rendergraph) + +/// RAII wrapper for VkImage and VkImageView +/// @note Multisample anti-aliasing (MSAA) can be enabled on a per-texture basis +class Image { + friend class CommandBuffer; + friend class RenderGraph; + friend class Texture; + +private: + /// The device wrapper + const Device &m_device; + /// The internal debug name of the image + std::string m_name; + + VkImageCreateInfo m_img_ci{}; + VkImageViewCreateInfo m_img_view_ci{}; + + VkImage m_img{VK_NULL_HANDLE}; + VkImageView m_img_view{VK_NULL_HANDLE}; + VmaAllocation m_alloc{VK_NULL_HANDLE}; + VmaAllocationInfo m_alloc_info{}; + + const VmaAllocationCreateInfo m_alloc_ci{ + .usage = VMA_MEMORY_USAGE_AUTO, + }; + + /// The combined image sampler for the texture + /// This is only relevant if the texture is used as TextureUsage::NORMAL + std::unique_ptr m_sampler; + + /// Create the image and the image view + /// @param img_ci The image create info + /// @param img_view_ci The image view create info + void create(VkImageCreateInfo img_ci, VkImageViewCreateInfo img_view_ci); + + /// Destroy the image view, the image, and the sampler + void destroy(); + +public: + /// Default constructor + /// @param device The device wrapper + /// @param name The name of the Image + Image(const Device &device, std::string name); + + Image(const Image &) = delete; + Image(Image &&) noexcept; + ~Image(); + + Image &operator=(const Image &other) = delete; + Image &operator=(Image &&other) = delete; +}; + +} // namespace inexor::vulkan_renderer::render_graph diff --git a/include/inexor/vulkan-renderer/render-graph/render_graph.hpp b/include/inexor/vulkan-renderer/render-graph/render_graph.hpp new file mode 100644 index 000000000..4cfd6368a --- /dev/null +++ b/include/inexor/vulkan-renderer/render-graph/render_graph.hpp @@ -0,0 +1,380 @@ +#pragma once + +#include "inexor/vulkan-renderer/render-graph/buffer.hpp" +#include "inexor/vulkan-renderer/render-graph/graphics_pass.hpp" +#include "inexor/vulkan-renderer/render-graph/graphics_pass_builder.hpp" +#include "inexor/vulkan-renderer/render-graph/texture.hpp" +#include "inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_allocator.hpp" +#include "inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_layout_builder.hpp" +#include "inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_layout_cache.hpp" +#include "inexor/vulkan-renderer/wrapper/descriptors/write_descriptor_set_builder.hpp" +#include "inexor/vulkan-renderer/wrapper/pipelines/pipeline.hpp" +#include "inexor/vulkan-renderer/wrapper/pipelines/pipeline_builder.hpp" +#include "inexor/vulkan-renderer/wrapper/pipelines/pipeline_layout.hpp" + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace inexor::vulkan_renderer::wrapper { +/// Forward declarations +class Device; +class Swapchain; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::render_graph { + +// Using declarations +using wrapper::Device; +using wrapper::Swapchain; +using wrapper::commands::CommandBuffer; +using wrapper::descriptors::DescriptorSetAllocator; +using wrapper::descriptors::DescriptorSetLayoutBuilder; +using wrapper::descriptors::DescriptorSetLayoutCache; +using wrapper::descriptors::WriteDescriptorSetBuilder; +using wrapper::pipelines::GraphicsPipeline; +using wrapper::pipelines::GraphicsPipelineBuilder; +using wrapper::pipelines::PipelineLayout; + +/// A rendergraph is a generic solution for rendering architecture. +/// +/// +/// +/// +/// +/// +class RenderGraph { +private: + /// The device wrapper + Device &m_device; + + // The rendergraph has its own logger + std::shared_ptr m_log{spdlog::default_logger()->clone("render-graph")}; + + /// ----------------------------------------------------------------------------------------------------------------- + /// GRAPHICS PASSES + /// ----------------------------------------------------------------------------------------------------------------- + /// Graphics passes are build inside of graphics pass create functions. Those functions are given to the + /// rendergraph, and they are all called sequentially during rendergraph compilation. Inside of the graphics pass + /// create function, the GraphicsPassBuilder can be used to build the graphics pass. The graphics pass which is + /// created is stored internally inside of the rendergraph. Each graphics pass specifies to which attachments + /// (color, depth, stencil) it writes by using the writes_to method of the GraphicsPassBuilder. The attachments will + /// be used by rendergraph in the VkRenderingInfo in dynamic rendering. Each pass must also specify from which other + /// passes it reads. For example if we specify B.reads_from(A), it means that graphics pass B is reading from + /// graphics pass A. Rendergraph can then automatically determine the oder of all passes during rendergraph + /// compilation using depth first search (DFS) algorithm. Rendergraph also makes sure that the graph is acyclic, so + /// there must not be any cycles in it! Note that graphics passes only specify reads_from for previous passes and + /// writes_to for attachments, as a graphics pass does not directly write into another graphics pass, but only into + /// attachments. Why can rendergraph only reason about cycles after creating the graphics passes, at the and of + /// compilation? To check for cycles, we would need to reason about the reads of each graphics pass before the + /// passes have been created. The problem here is that we can't reference a piece of memory unless it as been + /// created, meaning we can't read_from a pass unless its shared_ptr has been allocated. A workaround would be to + /// have a default constructor which already creates the object before rendergraph could call another constructor + /// which actually creates the pass. This is however much more complex than the current solution, and from what we + /// understand it's the fact that we can't reference a piece of memory unless it has been created that makes it very + /// hard to actually make cycles in rendergraph passes! You can only call reads_from for passes which came before + /// the pass you are currently creating, you can't call reads_from for passes which come later. + /// ----------------------------------------------------------------------------------------------------------------- + + /// The graphics pass builder of the rendergraph + GraphicsPassBuilder m_graphics_pass_builder{}; + /// The graphics passes used in the rendergraph + std::vector> m_graphics_passes; + + /// ----------------------------------------------------------------------------------------------------------------- + /// SWAPCHAINS + /// ----------------------------------------------------------------------------------------------------------------- + /// Swapchains are not managed by rendergraph and they are not even stored as a std::weak_ptr inside of rendergraph. + /// Instead, if a graphics pass writes to a swapchain, it can be specified by using the writes_to method in + /// GraphicsPassBuilder. To unify textures and swapchains as write attachments, The GraphicsPassBuilder class uses + /// std::variant, std::weak_ptr> as type for write attachments. When creating the + /// graphics pass, The constructor of the GraphicsPass class will automatically sort the write attachment by type + /// (texture or swapchain). The textures and swapchains which are used as write attachments are processed by + /// rendergraph in the fill_graphics_pass_rendering_info method. To fill VkRenderingInfo of a graphics pass, we need + /// to fill VkRenderingInfo for every texture and swapchain that is written to. This means we only need to know the + /// image view of the texture (the current image view in case of a swapchain), the image layout, and the optional + /// clear value for the swapchain or texture. The clear value is stored in the graphics pass as well. + /// + /// Rendergraph automatically performs image layout transitions for swapchains when they are used as write + /// attachment. To avoid unnecessary transitions, rendergraph will check if a swapchain is already in + /// VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL layout before starting dynamic rendering. To avoid more unnecessary + /// image layout transition of the swapchain image, the layout is changed to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR only if + /// the next pass (if any) is not rendering to the swapchain. + /// ----------------------------------------------------------------------------------------------------------------- + + /// The unique wait semaphores of all swapchains used (This means if one swapchain is used mutliple times it's still + /// only one VkSemaphore in here because collect_swapchain_image_available_semaphores method will fill this vector) + std::vector m_swapchains_imgs_available; + + /// ----------------------------------------------------------------------------------------------------------------- + /// GRAPHICS PIPELINES + /// ----------------------------------------------------------------------------------------------------------------- + /// Graphics pipelines are created during rendergraph compilation, but the graphics pipeline instances are not + /// stored inside of the rendergraph (unlike textures or buffers for example). The reason for this is that graphics + /// pipelines are also not bound by the rendergraph automatically before calling the on_record command buffer + /// recording function of a graphics pass. This is because while the rendergraph could bind a graphics pipeline just + /// before calling on_record, inside of the on_record function, the use of graphics pipelines could be arbitrarily + /// complex. We also didn't want to have a rendergraph API which exposes binding one graphics pipeline before + /// calling on_record because this could let the programmer think that binding pipelines is fully covered internally + /// in rendergraph, which is not the case. You might wonder why we decided to use create functions for the graphics + /// pipelines if the graphics pipeline is not stored inside of rendergraph? To create a graphics pipeline, you need + /// a pipeline layout. The pipeline layout is part of the GraphicsPipeline wrapper and every graphics pipeline has + /// exactly one pipeline layout. To create the pipeline layout, you need to know the descriptor set layout! To be + /// precise, you need to know the descriptor set layoput if the graphics pipeline uses resource descriptors. You + /// also need to know the push constant ranges, but they are relatively easy to specify in GraphicsPipelienBuilder. + /// This complex order of initialization of Vulkan resources must be respected and one of the advantages of having a + /// rendergraph is that is makes all this very easy. After the descriptor set layout has been created by the + /// rendergraph, the graphics pipelines can be created because we then know about the descriptor set layout which is + /// required for the pipeline layout. + /// ----------------------------------------------------------------------------------------------------------------- + + // TODO: Support compute pipelines + // TODO: Use pipeline cache and write pipeline cache to file, and support loading them + + /// The graphics pipeline builder of the rendergraph + GraphicsPipelineBuilder m_graphics_pipeline_builder; + /// In these callback functions, the graphics pipelines will be created + using OnCreateGraphicsPipeline = std::function; + /// The graphics pipeline create functions which are called by rendergraph to create the graphics pipelines + std::vector m_pipeline_create_functions; + + /// ----------------------------------------------------------------------------------------------------------------- + /// BUFFERS + /// ----------------------------------------------------------------------------------------------------------------- + /// We use Vulkan Memory Allocator (VMA) for memory management of resources under the hood. Buffers are created and + /// stored inside of rendergraph exclusively. To code outside of rendergraph, we give std::weak_ptr of the Buffer + /// handles we create. This way, the buffers can be used in the on_record function directly without having to pass + /// the buffers as parameters to the on_record function. Also, the use of std::weak_ptr makes the memory ownership + /// of the buffer handle clear. The memory is owned by the rendergraph in a std::shared_ptr, but it can be used + /// outside of rendergraph through the std::weak_ptr. The Buffer wrapper can be a VERTEX_BUFFER, INDEX_BUFFER, or + /// UNIFORM_BUFFER. The rendergraph can be instructed to update a buffer in code outside of the rendergraph using + /// the request_update method, and the update is then carried out by rendergraph. All buffer updates are carried out + /// on a per-frame basis, meaning that all updates will always be coherent with respect to one frame. In the future, + /// we could improve rendergraph so it automatically double or triple buffers all resources (buffers, textures..), + /// so it can use one index for rendering, while update on the next index is already being carried out. This will + /// parallelization using taskflow and proper synchronization must be used. While this would increase memory + /// consumption, it would improve rendering performance by reducing cpu and gpu stalls. Every buffer must have an + /// update function. If a vertex buffer and an index buffer is updated, each buffer should be updated in their own + /// update function rather than updating the index buffer in the vertex buffer as well. While this is technically + /// also correct, it makes no sense to do it this way because vertex buffer and index buffer are updated on a + /// per-frame basis coherently anyways. So it doesn't really matter. + /// ----------------------------------------------------------------------------------------------------------------- + + /// The vertex buffers, index buffers, and uniform buffers + std::vector> m_buffers; + + /// ----------------------------------------------------------------------------------------------------------------- + /// TEXTURES + /// ----------------------------------------------------------------------------------------------------------------- + /// Like buffers, textures are created by rendergraph and stored in the rendergraph as a shared_ptr. This means just + /// like buffers, the rendergraph owns the underlying memory, and all external code must use std::weak_ptr to it. + /// There are different types of textures, which are listed in TextureUsage. In principle, the Texture wrapper + /// allows each texture to have an on_init and an on_update function. Depending on the texture usage, textures can + /// be created in two different ways: Textures which are used as color attachment (back buffer) or depth attachment + /// (depth buffer) are created inside of rendergraph only. This means code outside of rendergraph is not allowed to + /// modify the data in the texture directly, but instead the external code must specify which attachment is written + /// to by which pass. Rendergraph will then write to the attachments automatically. This means for texture which are + /// of TextureUsage COLOR_ATTACHMENT or DEPTH_ATTACHMENT, both on_init and on_update are std::nullopt (Rendergraph + /// initializes these textures directly rather than specifying an unnecessary on_init function which is then called + /// directly). The other type of texture is TextureUsage::NORMAL, which means this texture is used in for example in + /// combined image samplers. This could be the font texture of ImGui for example. Because the data to fill this + /// texture is loaded from a file by the ImGui wrapper, we must specify an on_init function to rendergraph which + /// copies the font texture data into the texture wrapper. TextureUsage NORMAL is the only texture type which is + /// allowed to have an on_update function. The on_update function is called per-frame (if it's not std::nullopt). + /// The on_update function allows for dynamic textures which are updated every frame. Note that so far, our code + /// base does not use this feature yet. + + /// The textures + std::vector> m_textures; + + /// ----------------------------------------------------------------------------------------------------------------- + /// RESOURCE DESCRIPTORS + /// ----------------------------------------------------------------------------------------------------------------- + /// After a lot of discussion we decided to keep the actual VkDescriptorSet handles not inside of of + /// rendergraph. Originally we planed to have one descriptor set per pass (or several of them per pass) which are + /// bound using vkBindDescriptorSet before on_record is called. However, the binding of descriptor sets inside of + /// the on_record command buffer recording function of the pass can be much more complex than just binding one + /// descriptor set (or several) before calling on_record. In fact, the descriptor set binding could be arbitrarily + /// complex inside of on_record. Associating the descriptor sets with the pass would introduce another level of + /// unnecessary indirection without any benefits which we do not want. We then thought we could automatically create + /// the descriptor set layout of a pass by analyzing the resources the pass reads from. This in theory would also + /// allow us to allocate the descriptor sets and to update them. However, the descriptor set layout is also required + /// for creating the pipeline layout! There are two problems with this: 1) The programmer would either have to + /// specify the reads of the pass in the order of the descriptor set layout or associate the read of a pass with a + /// binding in the descriptor set layout. Otherwise, the descriptor set layout would be messed up. 2) Because the + /// descriptor set layout is required when creating the graphics pipeline, we would have to associate pipelines with + /// passes somehow. However, we also decided to keep pipelines (instances of the GraphicsPipeline wrapper) out of + /// rendergraph. In summary, keeping VkDescriptorSet handles in rendergraph would complicate the API unnecessarily. + /// Rendergraph now manages resource descriptors as follows: Descriptors need a descriptor set layout, which is + /// created from DescriptorSetLayoutBuilder (the first function). Inside of OnBuildDescriptorSetLayout, with the + /// help of DescriptorSetLayoutBuilder, the programmer simply specifies which descriptors are inside of the + /// descriptor set. The descriptor set is then created by the builder. DescriptorSetLayoutBuilder uses + /// DescriptorSetLayoutCache internally to potentially speed up (re)creation of descriptor set layouts. The + /// VkDescriptorSetLayout created by DescriptorSetLayoutBuilder inside of on_build_descriptor_set_layout must be + /// stored externally, and it is the responsibility of the programmer to make sure the VkDescriptorSetLayout is + /// valid memory when it is used in the on_record function! The descriptor sets are allocated by rendergraph via + /// OnAllocateDescriptorSet functions using the DescriptorSetAllocator of the rendergraph. Descriptor sets are + /// updated in the OnUpdateDescriptorSet functions using the DescriptorSetUpdateBuilder of the rendergraph. + /// + /// TODO: Mention batching to vkUpdateDescriptorSets... + /// ----------------------------------------------------------------------------------------------------------------- + + /// The descriptor set layout builder (a builder pattern for descriptor set layouts) + DescriptorSetLayoutBuilder m_descriptor_set_layout_builder; + /// The descriptor set allocator + DescriptorSetAllocator m_descriptor_set_allocator; + /// The write descriptor set builder (a builder pattern for write descriptor sets) + WriteDescriptorSetBuilder m_write_descriptor_set_builder; + + /// A user-defined function which creates the descriptor set layout + using OnBuildDescriptorSetLayout = std::function; + /// A user-defined function which allocates a descriptor set + using OnAllocateDescriptorSet = std::function; + /// A user-defined function which builds the descriptor set write for the pass + using OnBuildWriteDescriptorSets = std::function(WriteDescriptorSetBuilder &)>; + + /// Resource descriptors are managed by specifying those three functions to the rendergraph + /// Rendergraph will then call those function in the correct order during rendergraph compilation + using ResourceDescriptor = + std::tuple; + /// The resource descriptors of the rendergraph + std::vector m_resource_descriptors; + /// All write descriptor sets will be stored in here so we can have one batched call to vkUpdateDescriptorSets + std::vector m_write_descriptor_sets; + + /// Allocate the descriptor sets + void allocate_descriptor_sets(); + + /// The rendergraph must not have any cycles in it! + /// @exception std::logic_error The rendergraph is not acyclic! + void check_for_cycles(); + + /// Collect all image available semaphores of all swapchains which are used into one std::vector + void collect_swapchain_image_available_semaphores(); + + /// Create the descriptor set layouts + void create_descriptor_set_layouts(); + + /// Create the graphics pipelines + void create_graphics_pipelines(); + + /// Determine the order of execution of the graphics passes by using depth first search (DFS) algorithm + void determine_pass_order(); + + /// Fill the VkRenderingInfo for a graphics pass + /// @param pass The graphics pass + void fill_graphics_pass_rendering_info(GraphicsPass &pass); + + /// Record the command buffer of a pass. After a lot of discussions about the API design of rendergraph, we came to + /// the conclusion that it's the full responsibility of the programmer to manually bind pipelines, descriptors sets, + /// and buffers inside of the on_record function instead of attempting to abstract all of this in rendergraph. This + /// means rendergraph will not automatically bind pipelines, buffers, or descriptor sets! The reason for this is + /// that there could be complex rendering going on inside of the on_record function with an arbitrary number of + /// pipelines descriptor sets, and buffers being bound in a nontrivial order or under conditional cases. We then + /// refrained from designing a simple API inside of rendergraph which automatically binds one graphics pipeline, + /// descriptor set, or a set of buffers at the beginning of rendering before calling on_record because it would + /// cause confusion about the correct API usage for the advanced use cases. Nonetheless, the creation of buffers, + /// descriptors, or pipelines is still the full responsibility of the rendergraph, but you need to use them manually + /// inside of the on_record function. + /// @param cmd_buf The command buffer to record the pass into + /// @param pass The graphics pass to record the command buffer for + void record_command_buffer_for_pass(const CommandBuffer &cmd_buf, GraphicsPass &pass); + + /// Update the vertex-, index-, and uniform-buffers + /// @note If a uniform buffer has been updated, an update of the associated descriptor set will be performed + void update_buffers(); + + /// Update dynamic textures + void update_textures(); + + /// Update the write descriptor sets + /// @note This function must only be called once during rendergraph compilation, not for every frame! + void update_write_descriptor_sets(); + +public: + /// Default constructor + /// @note device and swapchain are not taken as a const reference because rendergraph needs to modify both + /// @param device The device wrapper + RenderGraph(Device &device); + + RenderGraph(const RenderGraph &) = delete; + // TODO: Implement me! + RenderGraph(RenderGraph &&) noexcept; + ~RenderGraph() = default; + + RenderGraph &operator=(const RenderGraph &) = delete; + RenderGraph &operator=(RenderGraph &&) = delete; + + /// Add a new graphics pass to the rendergraph + /// @aram pass The graphics pass that was created + /// @return A std::weak_ptr to the graphics pass that was added + [[nodiscard]] std::weak_ptr add_graphics_pass(std::shared_ptr pass); + + /// Add a new graphics pipeline to the rendergraph + /// @param on_pipeline_create A function to create the graphics pipeline using GraphicsPipelineBuilder + /// @note Move semantics is used to std::move on_pipeline_create + void add_graphics_pipeline(OnCreateGraphicsPipeline on_pipeline_create); + + /// Add an buffer to rendergraph + /// @param name The name of the buffer + /// @param type The type of the buffer + /// @param on_update The update function of the index buffer + /// @return A weak pointer to the buffer resource that was created + [[nodiscard]] std::weak_ptr add_buffer(std::string name, BufferType type, std::function on_update); + + /// Add a descriptor to rendergraph + /// @note This function is of type void because it does not store anything that is created in those callback + /// functions. As mentioned above, resource descriptors are kept outside of rendergraph. + /// @note This function does not perform any error checks when it comes to correct usage of descriptors, because + /// this is the job of validation layers. If you would give the rendergraph 3 empty functions, you would not notice + /// unless you attempt to use some descriptor in on_record that would not have been set up correctly. + /// @param on_create_descriptor_set_layout The descriptor set layout build function + /// @param on_allocate_descriptor_set The descriptor set allocation function + /// @param on_update_descriptor_set The descriptor set update function + void add_resource_descriptor(OnBuildDescriptorSetLayout on_build_descriptor_set_layout, + OnAllocateDescriptorSet on_allocate_descriptor_set, + OnBuildWriteDescriptorSets on_update_descriptor_set); + + /// Add a texture which will be initialized externally (not inside of rendergraph) + /// @param name The name of the texture + /// @param usage The usage of the texture inside of rendergraph + /// @param format The format of the texture + /// @param width The width of the texture + /// @param height The height of the texture + /// @param channel_count The number of texture channels + /// @param sample_count The sample count of the texture + /// @param m_on_check_for_updates The texture update function (an empty lambda by default) + /// @return A weak pointer to the texture that was created + [[nodiscard]] std::weak_ptr add_texture( + std::string name, + TextureUsage usage, + VkFormat format, + std::uint32_t width, + std::uint32_t height, + std::uint32_t channels, + VkSampleCountFlagBits sample_count = VK_SAMPLE_COUNT_1_BIT, + std::function m_on_check_for_updates = []() {}); + + /// Compile the rendergraph + void compile(); + + /// Return the rendergraph's graphics pass builder instance + GraphicsPassBuilder &get_graphics_pass_builder() { + return m_graphics_pass_builder; + } + + /// Render a frame + void render(); + + /// Reset the entire RenderGraph + void reset(); +}; + +} // namespace inexor::vulkan_renderer::render_graph diff --git a/include/inexor/vulkan-renderer/render-graph/texture.hpp b/include/inexor/vulkan-renderer/render-graph/texture.hpp new file mode 100644 index 000000000..c3db7e2ae --- /dev/null +++ b/include/inexor/vulkan-renderer/render-graph/texture.hpp @@ -0,0 +1,156 @@ +#pragma once + +#include + +#include "inexor/vulkan-renderer/render-graph/image.hpp" + +#include +#include +#include +#include + +namespace inexor::vulkan_renderer::wrapper { +// Forward declaration +class Device; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::wrapper::commands { +// Forward delcaration +class CommandBuffer; +} // namespace inexor::vulkan_renderer::wrapper::commands + +namespace inexor::vulkan_renderer::wrapper::descriptors { +/// Forward declaration +class WriteDescriptorSetBuilder; +} // namespace inexor::vulkan_renderer::wrapper::descriptors + +namespace inexor::vulkan_renderer::render_graph { + +/// Specifies the use of the texture +/// NOTE: All usages which are not TextureUsage::NORMAL are for internal usage inside of rendergraph only +enum class TextureUsage { + NORMAL, + COLOR_ATTACHMENT, + DEPTH_ATTACHMENT, + STENCIL_ATTACHMENT, +}; + +// Forward declaration +class GraphicsPass; + +// Using declarations +using wrapper::Device; +using wrapper::commands::CommandBuffer; +using wrapper::descriptors::WriteDescriptorSetBuilder; + +/// RAII wrapper for texture resources +class Texture { + // These friend classes are allowed to access the private data of Texture + friend class WriteDescriptorSetBuilder; + friend class GraphicsPass; + friend class RenderGraph; + +private: + /// The device wrapper + const Device &m_device; + /// The name of the texture + std::string m_name; + /// The usage of this texture + TextureUsage m_usage; + /// The format of the texture + VkFormat m_format{VK_FORMAT_UNDEFINED}; + /// The width of the texture + std::uint32_t m_width{0}; + /// The height of the texture + std::uint32_t m_height{0}; + /// The channel count of the texture (4 by default) + // TODO: Can we determine the number of channels based on the given format? + std::uint32_t m_channels{4}; + /// The sample count of the MSAA image (if MSAA is enabled) + VkSampleCountFlagBits m_samples{VK_SAMPLE_COUNT_1_BIT}; + + /// The image of the texture + std::shared_ptr m_image; + + /// This is only used internally inside of rendergraph in case this texture used as a back buffer, depth buffer, or + /// stencil buffer and MSAA is enabled. + std::shared_ptr m_msaa_image; + + // This is used for initializing textures and for updating dynamic textures + bool m_update_requested{true}; + void *m_src_texture_data{nullptr}; + std::size_t m_src_texture_data_size{0}; + + /// By definition, if this is not std::nullopt, this is a dynamic texture + std::function m_on_check_for_updates; + + // The staging buffer for updating the texture data + VkBuffer m_staging_buffer{VK_NULL_HANDLE}; + VmaAllocation m_staging_buffer_alloc{VK_NULL_HANDLE}; + VmaAllocationInfo m_staging_buffer_alloc_info{}; + + /// This part of the image wrapper is for external use outside of rendergraph + /// The descriptor image info required for descriptor updates + VkDescriptorImageInfo m_descriptor_img_info{}; + + /// Create the texture (and the MSAA texture if specified) + void create(); + + /// Destroy the texture (and the MSAA texture if specified) + void destroy(); + + /// Destroy the staging buffer used for texture updates + void destroy_staging_buffer(); + + /// Upload the data into the texture + /// @param cmd_buf The command buffer to record the commands into + void update(const CommandBuffer &cmd_buf); + +public: + /// Default constructor + /// @param device The device wrapper + /// @param name The internal debug name of the texture + /// @param usage The usage of the texture inside of rendergraph + /// @param format The format of the texture + /// @param width The width of the texture + /// @param height The height of the texture + /// @param channels The channel count of the texture + /// @param samples The sample count of the texture + /// @param on_check_for_updates The update function of the texture + Texture(const Device &device, + std::string name, + TextureUsage usage, + VkFormat format, + std::uint32_t width, + std::uint32_t height, + std::uint32_t channels, + VkSampleCountFlagBits samples, + std::function on_check_for_updates); + + Texture(const Texture &) = delete; + Texture(Texture &&other) noexcept; + ~Texture(); + + Texture &operator=(const Texture &) = delete; + Texture &operator=(Texture &&) = delete; + + [[nodiscard]] VkFormat format() const { + return m_format; + } + + [[nodiscard]] VkExtent2D extent() const { + return { + .width = m_width, + .height = m_height, + }; + } + + /// Request rendergraph to update the texture + /// @param src_texture_data A pointer to the source data + /// @param src_texture_data_size The size of the source data + /// @note It is the responsibility of the programmer to make sure the memory the pointer points to is still valid + /// when rendergraph is carrying out the update! + void request_update(void *src_texture_data, std::size_t src_texture_data_size); +}; + +} // namespace inexor::vulkan_renderer::render_graph diff --git a/include/inexor/vulkan-renderer/render_graph.hpp b/include/inexor/vulkan-renderer/render_graph.hpp deleted file mode 100644 index a036c9ce7..000000000 --- a/include/inexor/vulkan-renderer/render_graph.hpp +++ /dev/null @@ -1,455 +0,0 @@ -#pragma once - -#include "inexor/vulkan-renderer/wrapper/device.hpp" -#include "inexor/vulkan-renderer/wrapper/framebuffer.hpp" -#include "inexor/vulkan-renderer/wrapper/swapchain.hpp" - -#include - -#include -#include -#include -#include -#include -#include - -// TODO: Compute stages. -// TODO: Uniform buffers. - -// Forward declarations -namespace inexor::vulkan_renderer::wrapper { -class CommandBuffer; -class Shader; -}; // namespace inexor::vulkan_renderer::wrapper - -namespace inexor::vulkan_renderer { - -// Forward declarations -class PhysicalResource; -class PhysicalStage; -class RenderGraph; - -/// @brief Base class of all render graph objects (resources and stages). -/// @note This is just for internal use. -struct RenderGraphObject { - RenderGraphObject() = default; - RenderGraphObject(const RenderGraphObject &) = delete; - RenderGraphObject(RenderGraphObject &&) = delete; - virtual ~RenderGraphObject() = default; - - RenderGraphObject &operator=(const RenderGraphObject &) = delete; - RenderGraphObject &operator=(RenderGraphObject &&) = delete; - - /// @brief Casts this object to type `T` - /// @return The object as type `T` or `nullptr` if the cast failed - template - [[nodiscard]] T *as(); - - /// @copydoc as - template - [[nodiscard]] const T *as() const; -}; - -/// @brief A single resource in the render graph. -/// @note May become multiple physical (vulkan) resources during render graph compilation. -class RenderResource : public RenderGraphObject { - friend RenderGraph; - -private: - const std::string m_name; - std::shared_ptr m_physical; - -protected: - explicit RenderResource(std::string name) : m_name(std::move(name)) {} - -public: - RenderResource(const RenderResource &) = delete; - RenderResource(RenderResource &&) = delete; - ~RenderResource() override = default; - - RenderResource &operator=(const RenderResource &) = delete; - RenderResource &operator=(RenderResource &&) = delete; - - [[nodiscard]] const std::string &name() const { - return m_name; - } -}; - -enum class BufferUsage { - /// @brief Specifies that the buffer will be used to input index data. - INDEX_BUFFER, - - /// @brief Specifies that the buffer will be used to input per vertex data to a vertex shader. - VERTEX_BUFFER, -}; - -class BufferResource : public RenderResource { - friend RenderGraph; - -private: - const BufferUsage m_usage; - std::vector m_vertex_attributes; - - // Data to upload during render graph compilation. - const void *m_data{nullptr}; - std::size_t m_data_size{0}; - bool m_data_upload_needed{false}; - std::size_t m_element_size{0}; - -public: - BufferResource(std::string &&name, BufferUsage usage) : RenderResource(name), m_usage(usage) {} - - /// @brief Specifies that element `offset` of this vertex buffer is of format `format`. - /// @note Calling this function is only valid on buffers of type BufferUsage::VERTEX_BUFFER. - void add_vertex_attribute(VkFormat format, std::uint32_t offset); - - /// @brief Specifies the element size of the buffer upfront if data is not to be uploaded immediately. - /// @param element_size The element size in bytes - void set_element_size(std::size_t element_size) { - m_element_size = element_size; - } - - /// @brief Specifies the data that should be uploaded to this buffer at the start of the next frame. - /// @param count The number of elements (not bytes) to upload - /// @param data A pointer to a contiguous block of memory that is at least `count * sizeof(T)` bytes long - // TODO: Use std::span when we switch to C++ 20. - template - void upload_data(const T *data, std::size_t count); - - /// @brief @copybrief upload_data(const T *, std::size_t) - /// @note This is equivalent to doing `upload_data(data.data(), data.size() * sizeof(T))` - /// @see upload_data(const T *data, std::size_t count) - template - void upload_data(const std::vector &data); -}; - -enum class TextureUsage { - /// @brief Specifies that this texture is the output of the render graph. - // TODO: Refactor back buffer system more (remove need for BACK_BUFFER texture usage) - BACK_BUFFER, - - /// @brief Specifies that this texture is a combined depth/stencil buffer. - /// @note This may mean that this texture is completely GPU-sided and cannot be accessed by the CPU in any way. - DEPTH_STENCIL_BUFFER, - - /// @brief Specifies that this texture isn't used for any special purpose. - NORMAL, -}; - -class TextureResource : public RenderResource { - friend RenderGraph; - -private: - const TextureUsage m_usage; - VkFormat m_format{VK_FORMAT_UNDEFINED}; - -public: - TextureResource(std::string &&name, TextureUsage usage) : RenderResource(name), m_usage(usage) {} - - /// @brief Specifies the format of this texture that is required when the physical texture is made. - /// @details For TextureUsage::BACK_BUFFER textures, using the swapchain image format is preferable in most cases. - /// For TextureUsage::DEPTH_STENCIL_BUFFER textures, a VK_FORMAT_D* must be used. - void set_format(VkFormat format) { - m_format = format; - } -}; - -/// @brief A single render stage in the render graph. -/// @note Not to be confused with a vulkan render pass. -class RenderStage : public RenderGraphObject { - friend RenderGraph; - -private: - const std::string m_name; - std::unique_ptr m_physical; - std::vector m_writes; - std::vector m_reads; - - std::vector m_descriptor_layouts; - std::vector m_push_constant_ranges; - std::function m_on_record{[](auto &, auto &) {}}; - -protected: - explicit RenderStage(std::string name) : m_name(std::move(name)) {} - -public: - RenderStage(const RenderStage &) = delete; - RenderStage(RenderStage &&) = delete; - ~RenderStage() override = default; - - RenderStage &operator=(const RenderStage &) = delete; - RenderStage &operator=(RenderStage &&) = delete; - - /// @brief Specifies that this stage writes to `resource`. - void writes_to(const RenderResource *resource); - - /// @brief Specifies that this stage reads from `resource`. - void reads_from(const RenderResource *resource); - - /// @brief Binds a descriptor set layout to this render stage. - /// @note This function will be removed in the near future, as we are aiming for users of the API to not have to - /// deal with descriptors at all. - // TODO: Refactor descriptor management in the render graph - void add_descriptor_layout(VkDescriptorSetLayout layout) { - m_descriptor_layouts.push_back(layout); - } - - /// @brief Add a push constant range to this render stage. - /// @param range The push constant range - void add_push_constant_range(VkPushConstantRange range) { - m_push_constant_ranges.push_back(range); - } - - [[nodiscard]] const std::string &name() const { - return m_name; - } - - /// @brief Specifies a function that will be called during command buffer recording for this stage - /// @details This function can be used to specify other vulkan commands during command buffer recording. The most - /// common use for this is for draw commands. - void set_on_record(std::function on_record) { - m_on_record = std::move(on_record); - } -}; - -class GraphicsStage : public RenderStage { - friend RenderGraph; - -private: - bool m_clears_screen{false}; - bool m_depth_test{false}; - bool m_depth_write{false}; - VkPipelineColorBlendAttachmentState m_blend_attachment{}; - std::unordered_map m_buffer_bindings; - std::vector m_shaders; - -public: - explicit GraphicsStage(std::string &&name) : RenderStage(name) {} - GraphicsStage(const GraphicsStage &) = delete; - GraphicsStage(GraphicsStage &&) = delete; - ~GraphicsStage() override = default; - - GraphicsStage &operator=(const GraphicsStage &) = delete; - GraphicsStage &operator=(GraphicsStage &&) = delete; - - /// @brief Specifies that this stage should clear the screen before rendering. - void set_clears_screen(bool clears_screen) { - m_clears_screen = clears_screen; - } - - /// @brief Specifies the depth options for this stage. - /// @param depth_test Whether depth testing should be performed - /// @param depth_write Whether depth writing should be performed - void set_depth_options(bool depth_test, bool depth_write) { - m_depth_test = depth_test; - m_depth_write = depth_write; - } - - /// @brief Set the blend attachment for this stage. - /// @param blend_attachment The blend attachment - void set_blend_attachment(VkPipelineColorBlendAttachmentState blend_attachment) { - m_blend_attachment = blend_attachment; - } - - /// @brief Specifies that `buffer` should map to `binding` in the shaders of this stage. - void bind_buffer(const BufferResource *buffer, std::uint32_t binding); - - /// @brief Specifies that `shader` should be used during the pipeline of this stage. - /// @note Binding two shaders of same type (e.g. two vertex shaders) is undefined behaviour. - void uses_shader(const wrapper::Shader &shader); -}; - -// TODO: Add wrapper::Allocation that can be made by doing `device->make(...)`. -class PhysicalResource : public RenderGraphObject { - friend RenderGraph; - -protected: - const wrapper::Device &m_device; - VmaAllocation m_allocation{VK_NULL_HANDLE}; - - explicit PhysicalResource(const wrapper::Device &device) : m_device(device) {} - -public: - PhysicalResource(const PhysicalResource &) = delete; - PhysicalResource(PhysicalResource &&) = delete; - ~PhysicalResource() override = default; - - PhysicalResource &operator=(const PhysicalResource &) = delete; - PhysicalResource &operator=(PhysicalResource &&) = delete; -}; - -class PhysicalBuffer : public PhysicalResource { - friend RenderGraph; - -private: - VmaAllocationInfo m_alloc_info{}; - VkBuffer m_buffer{VK_NULL_HANDLE}; - -public: - explicit PhysicalBuffer(const wrapper::Device &device) : PhysicalResource(device) {} - PhysicalBuffer(const PhysicalBuffer &) = delete; - PhysicalBuffer(PhysicalBuffer &&) = delete; - ~PhysicalBuffer() override; - - PhysicalBuffer &operator=(const PhysicalBuffer &) = delete; - PhysicalBuffer &operator=(PhysicalBuffer &&) = delete; -}; - -class PhysicalImage : public PhysicalResource { - friend RenderGraph; - -private: - VkImage m_image{VK_NULL_HANDLE}; - VkImageView m_image_view{VK_NULL_HANDLE}; - -public: - explicit PhysicalImage(const wrapper::Device &device) : PhysicalResource(device) {} - PhysicalImage(const PhysicalImage &) = delete; - PhysicalImage(PhysicalImage &&) = delete; - ~PhysicalImage() override; - - PhysicalImage &operator=(const PhysicalImage &) = delete; - PhysicalImage &operator=(PhysicalImage &&) = delete; -}; - -class PhysicalBackBuffer : public PhysicalResource { - friend RenderGraph; - -private: - const wrapper::Swapchain &m_swapchain; - -public: - PhysicalBackBuffer(const wrapper::Device &device, const wrapper::Swapchain &swapchain) - : PhysicalResource(device), m_swapchain(swapchain) {} - PhysicalBackBuffer(const PhysicalBackBuffer &) = delete; - PhysicalBackBuffer(PhysicalBackBuffer &&) = delete; - ~PhysicalBackBuffer() override = default; - - PhysicalBackBuffer &operator=(const PhysicalBackBuffer &) = delete; - PhysicalBackBuffer &operator=(PhysicalBackBuffer &&) = delete; -}; - -class PhysicalStage : public RenderGraphObject { - friend RenderGraph; - -private: - VkPipeline m_pipeline{VK_NULL_HANDLE}; - VkPipelineLayout m_pipeline_layout{VK_NULL_HANDLE}; - -protected: - const wrapper::Device &m_device; - -public: - explicit PhysicalStage(const wrapper::Device &device) : m_device(device) {} - PhysicalStage(const PhysicalStage &) = delete; - PhysicalStage(PhysicalStage &&) = delete; - ~PhysicalStage() override; - - PhysicalStage &operator=(const PhysicalStage &) = delete; - PhysicalStage &operator=(PhysicalStage &&) = delete; - - /// @brief Retrieve the pipeline layout of this physical stage. - // TODO: This can be removed once descriptors are properly implemented in the render graph. - [[nodiscard]] VkPipelineLayout pipeline_layout() const { - return m_pipeline_layout; - } -}; - -class PhysicalGraphicsStage : public PhysicalStage { - friend RenderGraph; - -private: - VkRenderPass m_render_pass{VK_NULL_HANDLE}; - std::vector m_framebuffers; - -public: - explicit PhysicalGraphicsStage(const wrapper::Device &device) : PhysicalStage(device) {} - PhysicalGraphicsStage(const PhysicalGraphicsStage &) = delete; - PhysicalGraphicsStage(PhysicalGraphicsStage &&) = delete; - ~PhysicalGraphicsStage() override; - - PhysicalGraphicsStage &operator=(const PhysicalGraphicsStage &) = delete; - PhysicalGraphicsStage &operator=(PhysicalGraphicsStage &&) = delete; -}; - -class RenderGraph { -private: - wrapper::Device &m_device; - const wrapper::Swapchain &m_swapchain; - std::shared_ptr m_log{spdlog::default_logger()->clone("render-graph")}; - - // Vectors of render resources and stages. - std::vector> m_buffer_resources; - std::vector> m_texture_resources; - std::vector> m_stages; - - // Stage execution order. - std::vector m_stage_stack; - - // Functions for building resource related vulkan objects. - void build_buffer(const BufferResource &, PhysicalBuffer &) const; - void build_image(const TextureResource &, PhysicalImage &, VmaAllocationCreateInfo *) const; - void build_image_view(const TextureResource &, PhysicalImage &) const; - - // Functions for building stage related vulkan objects. - void build_pipeline_layout(const RenderStage *, PhysicalStage &) const; - void record_command_buffer(const RenderStage *, const wrapper::CommandBuffer &cmd_buf, - std::uint32_t image_index) const; - - // Functions for building graphics stage related vulkan objects. - void build_render_pass(const GraphicsStage *, PhysicalGraphicsStage &) const; - void build_graphics_pipeline(const GraphicsStage *, PhysicalGraphicsStage &) const; - -public: - RenderGraph(wrapper::Device &device, const wrapper::Swapchain &swapchain) - : m_device(device), m_swapchain(swapchain) {} - - /// @brief Adds either a render resource or render stage to the render graph. - /// @return A mutable reference to the just-added resource or stage - template - T *add(Args &&...args) { - auto ptr = std::make_unique(std::forward(args)...); - if constexpr (std::is_same_v) { - return static_cast(m_buffer_resources.emplace_back(std::move(ptr)).get()); - } else if constexpr (std::is_same_v) { - return static_cast(m_texture_resources.emplace_back(std::move(ptr)).get()); - } else if constexpr (std::is_base_of_v) { - return static_cast(m_stages.emplace_back(std::move(ptr)).get()); - } else { - static_assert(!std::is_same_v, "T must be a RenderResource or RenderStage"); - } - } - - /// @brief Compiles the render graph resources/stages into physical vulkan objects. - /// @param target The target resource of the render graph (usually the back buffer) - void compile(const RenderResource *target); - - /// @brief Submits the command frame's command buffers for drawing. - /// @param image_index The current image index, retrieved from Swapchain::acquire_next_image - /// @param cmd_buf The command buffer - void render(std::uint32_t image_index, const wrapper::CommandBuffer &cmd_buf); -}; - -template -[[nodiscard]] T *RenderGraphObject::as() { - return dynamic_cast(this); -} - -template -[[nodiscard]] const T *RenderGraphObject::as() const { - return dynamic_cast(this); -} - -template -void BufferResource::upload_data(const T *data, std::size_t count) { - m_data = data; - m_data_size = count * (m_element_size = sizeof(T)); - m_data_upload_needed = true; -} - -template -void BufferResource::upload_data(const std::vector &data) { - upload_data(data.data(), data.size()); -} - -} // namespace inexor::vulkan_renderer diff --git a/include/inexor/vulkan-renderer/renderer.hpp b/include/inexor/vulkan-renderer/renderer.hpp index 18c476b5c..5f282702b 100644 --- a/include/inexor/vulkan-renderer/renderer.hpp +++ b/include/inexor/vulkan-renderer/renderer.hpp @@ -1,83 +1 @@ -#pragma once - -#include "inexor/vulkan-renderer/camera.hpp" -#include "inexor/vulkan-renderer/fps_counter.hpp" -#include "inexor/vulkan-renderer/imgui.hpp" -#include "inexor/vulkan-renderer/msaa_target.hpp" -#include "inexor/vulkan-renderer/octree_gpu_vertex.hpp" -#include "inexor/vulkan-renderer/time_step.hpp" -#include "inexor/vulkan-renderer/vk_tools/gpu_info.hpp" -#include "inexor/vulkan-renderer/wrapper/instance.hpp" -#include "inexor/vulkan-renderer/wrapper/uniform_buffer.hpp" -#include "inexor/vulkan-renderer/wrapper/window.hpp" -#include "inexor/vulkan-renderer/wrapper/window_surface.hpp" - -namespace inexor::vulkan_renderer { - -class VulkanRenderer { -protected: - std::vector m_shader_stages; - - VkDebugReportCallbackEXT m_debug_report_callback{VK_NULL_HANDLE}; - - bool m_debug_report_callback_initialised{false}; - - TimeStep m_time_step; - - std::uint32_t m_window_width{0}; - std::uint32_t m_window_height{0}; - wrapper::Window::Mode m_window_mode{wrapper::Window::Mode::WINDOWED}; - - std::string m_window_title; - - FPSCounter m_fps_counter; - - bool m_vsync_enabled{false}; - - std::unique_ptr m_camera; - - std::unique_ptr m_window; - std::unique_ptr m_instance; - std::unique_ptr m_device; - std::unique_ptr m_surface; - std::unique_ptr m_swapchain; - std::unique_ptr m_imgui_overlay; - std::unique_ptr m_render_graph; - - std::vector m_shaders; - std::vector m_textures; - std::vector m_uniform_buffers; - std::vector m_descriptors; - std::vector m_octree_vertices; - std::vector m_octree_indices; - - TextureResource *m_back_buffer{nullptr}; - - // Render graph buffers for octree geometry. - BufferResource *m_index_buffer{nullptr}; - BufferResource *m_vertex_buffer{nullptr}; - - void setup_render_graph(); - void generate_octree_indices(); - void recreate_swapchain(); - void render_frame(); - -public: - VulkanRenderer() = default; - VulkanRenderer(const VulkanRenderer &) = delete; - VulkanRenderer(VulkanRenderer &&) = delete; - ~VulkanRenderer(); - - VulkanRenderer &operator=(const VulkanRenderer &) = delete; - VulkanRenderer &operator=(VulkanRenderer &&) = delete; - - bool m_window_resized{false}; - - /// Necessary for taking into account the relative speed of the system's CPU. - float m_time_passed{0.0f}; - - /// - TimeStep m_stopwatch; -}; - -} // namespace inexor::vulkan_renderer + \ No newline at end of file diff --git a/include/inexor/vulkan-renderer/renderers/imgui.hpp b/include/inexor/vulkan-renderer/renderers/imgui.hpp new file mode 100644 index 000000000..2a2c69020 --- /dev/null +++ b/include/inexor/vulkan-renderer/renderers/imgui.hpp @@ -0,0 +1,114 @@ +#pragma once + +#include "inexor/vulkan-renderer/render-graph/render_graph.hpp" + +#include +#include +#include + +#include +#include + +namespace inexor::vulkan_renderer::wrapper { +// Forward declarations +class Device; +class Shader; +class Swapchain; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::pipelines { +// Forward declaration +class GraphicsPipeline; +} // namespace inexor::vulkan_renderer::pipelines + +namespace inexor::vulkan_renderer::render_graph { +// Forward declarations +class Buffer; +class RenderGraph; +class GraphicsPass; +} // namespace inexor::vulkan_renderer::render_graph + +namespace inexor::vulkan_renderer::renderers { + +// Using declarations +using render_graph::Buffer; +using render_graph::GraphicsPass; +using render_graph::RenderGraph; +using render_graph::Texture; +using wrapper::Device; +using wrapper::Shader; +using wrapper::Swapchain; +using wrapper::pipelines::GraphicsPipeline; + +/// A wrapper for an ImGui implementation +class ImGuiRenderer { + std::weak_ptr m_index_buffer; + std::weak_ptr m_vertex_buffer; + std::weak_ptr m_imgui_texture; + std::shared_ptr m_imgui_pipeline; + + std::weak_ptr m_imgui_pass; + + // This is the color attachment we will write to + std::weak_ptr m_swapchain; + // This is the previous pass we read from + std::weak_ptr m_previous_pass; + + std::shared_ptr m_vertex_shader; + std::shared_ptr m_fragment_shader; + + VkDescriptorSetLayout m_descriptor_set_layout{VK_NULL_HANDLE}; + VkDescriptorSet m_descriptor_set{VK_NULL_HANDLE}; + + // We need to collect the vertices and indices generated by ImGui + // because it does not store them in one array, but rather in chunks + std::vector m_index_data; + std::vector m_vertex_data; + + ImGuiContext *m_imgui_context{nullptr}; + + unsigned char *m_font_texture_data{nullptr}; + int m_font_texture_width{0}; + int m_font_texture_height{0}; + int m_font_texture_data_size{0}; + bool m_font_texture_initialized{false}; + + // Neither scale nor translation change + struct PushConstBlock { + glm::vec2 scale{-1.0f}; + glm::vec2 translate{-1.0f}; + } m_push_const_block; + + /// The user's ImGui data will be updated in this function + /// It will be called at the beginning of set_on_update + std::function m_on_update_user_data{[]() {}}; + + void load_font_data_from_file(); + + /// Customize ImGui style like text color for example + void set_imgui_style(); + +public: + /// Default constructor + /// @param device The device wrapper + /// @param render_graph The rendergraph + /// @param previous_pass The previous pass + /// @param swapchain The swapchain to render to + /// @param on_update_user_data The user-specified ImGui update function + ImGuiRenderer(const Device &device, + std::weak_ptr render_graph, + std::weak_ptr previous_pass, + std::weak_ptr swapchain, + std::function on_update_user_data); + + ImGuiRenderer(const ImGuiRenderer &) = delete; + ImGuiRenderer(ImGuiRenderer &&) noexcept; + + /// Call ImGui::DestroyContext + ~ImGuiRenderer(); + + ImGuiRenderer &operator=(const ImGuiRenderer &) = delete; + ImGuiRenderer &operator=(ImGuiRenderer &&) = delete; +}; + +} // namespace inexor::vulkan_renderer::renderers diff --git a/include/inexor/vulkan-renderer/renderers/imgui_renderer.hpp b/include/inexor/vulkan-renderer/renderers/imgui_renderer.hpp new file mode 100644 index 000000000..83c5fdac2 --- /dev/null +++ b/include/inexor/vulkan-renderer/renderers/imgui_renderer.hpp @@ -0,0 +1,54 @@ +#pragma once + +#include "inexor/vulkan-renderer/wrapper/shader.hpp" + +#include +#include + +// Forward declaration +namespace inexor::vulkan_renderer::wrapper { +class Device; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::render_components { + +/// A renderer for ImGui +class ImGuiRenderer { +private: + ImDrawData *imgui_draw_data{nullptr}; + + // The vertex shader and fragment shader for ImGui + wrapper::Shader m_vertex_shader; + wrapper::Shader m_fragment_shader; + + // The vertex buffer and index buffer resource for ImGui + render_graph::BufferResource *m_vertex_buffer{nullptr}; + render_graph::BufferResource *m_index_buffer{nullptr}; + + std::vector m_vertex_data; + std::vector m_index_data; + + struct PushConstBlock { + glm::vec2 scale{}; + glm::vec2 translate{}; + } m_push_const_block{}; + + void initialize_imgui(); + + // TODO: Better name? + void update_imgui_windows(); + +public: + /// Default constructor + /// @param device The device wrapper + /// @param render_graph The render graph + explicit ImGuiRenderer(const wrapper::Device &device, render_graph::RenderGraph *render_graph); + ImGuiRenderer(const ImGuiRenderer &) = delete; + ImGuiRenderer(ImGuiRenderer &&) = delete; + ~ImGuiRenderer() override = default; + + ImGuiRenderer &operator=(const ImGuiRenderer &) = delete; + ImGuiRenderer &operator=(ImGuiRenderer &&) = delete; +}; + +} // namespace inexor::vulkan_renderer::render_components diff --git a/include/inexor/vulkan-renderer/renderers/octree_renderer.hpp b/include/inexor/vulkan-renderer/renderers/octree_renderer.hpp new file mode 100644 index 000000000..3440b3563 --- /dev/null +++ b/include/inexor/vulkan-renderer/renderers/octree_renderer.hpp @@ -0,0 +1,67 @@ +#pragma once + +#include "inexor/vulkan-renderer/world/cube.hpp" +#include "inexor/vulkan-renderer/world/octree_vertex.hpp" + +#include + +#include +#include +#include + +// Forward declaration +namespace inexor::vulkan_renderer::wrapper { +class Device; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::render_components { + +/// Matrices for model, view, and projection +struct ModelViewProjectionMatrices { + glm::mat4 model{1.0f}; + glm::mat4 view{1.0f}; + glm::mat4 proj{1.0f}; +}; + +/// A class for rendering octree geometry +class OctreeRenderer { +private: + /// The octrees to render + std::vector> m_octrees; + std::vector update_needed; + + /// The shaders for octree rendering + wrapper::Shader m_vertex_shader; + wrapper::shader m_fragment_shader; + + // There is one vector of vertices and indices for each octree + std::vector> m_octree_vertices; + std::vector> m_octree_indices; + + // There is one vertex buffer and one index buffer for each octree + std::vector m_vertex_buffers; + std::vector m_index_buffers; + + void generate_octree_vertices(std::size_t octree_index); + void generate_octree_indices(std::size_t octree_index); + + void regenerate_all_octree_vertices(); + void regenerate_all_octree_indices(); + +public: + /// Default constructor + /// @param device The device wrapper + /// @param render_graph The render graph + explicit OctreeRenderer(const wrapper::Device &device, render_graph::RenderGraph *render_graph); + OctreeRenderer(const OctreeRenderer &) = delete; + OctreeRenderer(OctreeRenderer &&) = delete; + ~OctreeRenderer() = default; + + OctreeRenderer &operator=(const OctreeRenderer &) = delete; + OctreeRenderer &operator=(OctreeRenderer &&) = delete; + + /// Creates random octree geometry + void generate_random_octree_geometry(); +}; + +} // namespace inexor::vulkan_renderer::render_components diff --git a/include/inexor/vulkan-renderer/tools/file.hpp b/include/inexor/vulkan-renderer/tools/file.hpp deleted file mode 100644 index 33b4461c0..000000000 --- a/include/inexor/vulkan-renderer/tools/file.hpp +++ /dev/null @@ -1,18 +0,0 @@ -#pragma once - -#include -#include - -namespace inexor::vulkan_renderer::tools { - -/// @brief Extract the extension of a file as lowercase string. -/// @param file_name the name of the file. This is allowed to include the relative or complete path -/// @return The extension of the file as lowercase -[[nodiscard]] std::string get_file_extension_lowercase(const std::string &file_name); - -/// @brief Read the data of a file into memory -/// @param file_name The name of the file -/// @return A std::vector of type char which contains the binary data of the file -[[nodiscard]] std::vector read_file_binary_data(const std::string &file_name); - -} // namespace inexor::vulkan_renderer::tools diff --git a/include/inexor/vulkan-renderer/vk_tools/gpu_info.hpp b/include/inexor/vulkan-renderer/vk_tools/gpu_info.hpp deleted file mode 100644 index 5e02a8778..000000000 --- a/include/inexor/vulkan-renderer/vk_tools/gpu_info.hpp +++ /dev/null @@ -1,67 +0,0 @@ -#pragma once - -#include - -namespace inexor::vulkan_renderer::vk_tools { - -/// @brief Print available version of Vulkan API. -/// @note Inexor engine does not use a Vulkan metaloader such as Volk. -void print_driver_vulkan_version(); - -/// @brief Print information about a gpu's device queue families. -/// @param gpu The regarded gpu. -void print_physical_device_queue_families(VkPhysicalDevice gpu); - -/// @brief Print all available Vulkan instance layers. -void print_instance_layers(); - -/// @brief Print all available Vulkan instance extensions. -void print_instance_extensions(); - -// Note that device layers are deprecated. - -/// @brief Print all available Vulkan device extensions. -/// @param gpu The regarded gpu. -void print_device_extensions(VkPhysicalDevice gpu); - -/// @brief Print all supported surface capabilities of a given combination of gpu and surface. -/// @param gpu The regarded gpu. -/// @param surface The regarded surface. -void print_surface_capabilities(VkPhysicalDevice gpu, VkSurfaceKHR surface); - -/// @brief Print all supported surface formats of a given combination of gpu and surface. -/// @param gpu The regarded gpu. -/// @param surface The regarded Vulkan window surface. -void print_supported_surface_formats(VkPhysicalDevice gpu, VkSurfaceKHR surface); - -/// @brief Print all available presentation modes. -/// @param gpu The regarded gpu. -/// @param surface The regarded surface. -void print_presentation_modes(VkPhysicalDevice gpu, VkSurfaceKHR surface); - -/// @brief Print information about the specified gpu. -/// @param gpu The regarded gpu. -void print_physical_device_info(VkPhysicalDevice gpu); - -/// @brief Print information about the limits of the specified gpu. -/// @param gpu The regarded gpu. -void print_physical_device_limits(VkPhysicalDevice gpu); - -/// @brief Print information about the sparse properties of the specified gpu. -/// @param gpu The regarded gpu. -void print_physical_device_sparse_properties(VkPhysicalDevice gpu); - -/// @brief Print information about the features of the gpu. -/// @param gpu The regarded gpu. -void print_physical_device_features(VkPhysicalDevice gpu); - -/// @brief Print information about the memory properties of a specified gpu. -/// @param gpu The regarded gpu. -void print_physical_device_memory_properties(VkPhysicalDevice gpu); - -/// @brief List up all available gpus. -/// @param instance The instance of Vulkan. -/// @param surface The regarded Vulkan window surface. -void print_all_physical_devices(VkInstance instance, VkSurfaceKHR surface); - -} // namespace inexor::vulkan_renderer::vk_tools diff --git a/include/inexor/vulkan-renderer/vk_tools/representation.hpp b/include/inexor/vulkan-renderer/vk_tools/representation.hpp index 8d9df60c0..647496976 100644 --- a/include/inexor/vulkan-renderer/vk_tools/representation.hpp +++ b/include/inexor/vulkan-renderer/vk_tools/representation.hpp @@ -7,8 +7,8 @@ namespace inexor::vulkan_renderer::vk_tools { /// @brief This function returns a textual representation of the vulkan object T. -template -[[nodiscard]] std::string_view as_string(T); +template +[[nodiscard]] std::string_view as_string(VulkanObjectType); /// Get a feature description of a ``VkBool32`` value in the ``VkPhysicalDeviceFeatures`` struct by index. /// @param index The index of the ``VkBool32`` value in the ``VkPhysicalDeviceFeatures`` struct. @@ -16,6 +16,14 @@ template /// @return A feature description [[nodiscard]] std::string_view get_device_feature_description(std::size_t index); +/// This template allows us to convert a template parameter name into a VkObjectType +/// @note We have to specify a specialization for every Vulkan object type! +/// As far as we know, there is no other easy way to do this in C++. +/// @tparam VulkanObjectType The Vulkan object type as template parameter, for examplke VkFence +/// @return The VkObjectType of the template parameter, for the above mentioned example ``VK_OBJECT_TYPE_FENCE`` +template +[[nodiscard]] constexpr VkObjectType get_vulkan_object_type(VulkanObjectType); + /// @brief Convert a VkResult value into the corresponding error description as std::string_view /// @param result The VkResult to convert /// @return A std::string_view which contains an error description text of the VkResult diff --git a/include/inexor/vulkan-renderer/world/octree_vertex.hpp b/include/inexor/vulkan-renderer/world/octree_vertex.hpp new file mode 100644 index 000000000..8af4b0a77 --- /dev/null +++ b/include/inexor/vulkan-renderer/world/octree_vertex.hpp @@ -0,0 +1,35 @@ +#pragma once + +#include +#include + +namespace inexor::vulkan_renderer { + +/// A struct for octree vertices +struct OctreeVertex { + glm::vec3 position{0.0f, 0.0f, 0.0f}; + glm::vec3 color{0.0f, 0.0f, 0.0f}; +}; + +// Inline to suppress clang-tidy warning +inline bool operator==(const OctreeVertex &lhs, const OctreeVertex &rhs) { + return lhs.position == rhs.position && lhs.color == rhs.color; +} + +} // namespace inexor::vulkan_renderer + +namespace std { + +// Usually it is undefined behavior to declare something in the std namespace +// Specializing templates in the std namespace for user-defined types is an exception to the general rule of not +// modifying the std namespace +template <> +struct hash { + std::size_t operator()(const inexor::vulkan_renderer::OctreeVertex &vertex) const { + const auto h1 = std::hash{}(vertex.position); + const auto h2 = std::hash{}(vertex.color); + return h1 ^ h2; + } +}; + +} // namespace std diff --git a/include/inexor/vulkan-renderer/wrapper/command_buffer.hpp b/include/inexor/vulkan-renderer/wrapper/command_buffer.hpp deleted file mode 100644 index 130b534c4..000000000 --- a/include/inexor/vulkan-renderer/wrapper/command_buffer.hpp +++ /dev/null @@ -1,367 +0,0 @@ -#pragma once - -#include "inexor/vulkan-renderer/wrapper/fence.hpp" -#include "inexor/vulkan-renderer/wrapper/gpu_memory_buffer.hpp" - -#include -#include -#include -#include - -namespace inexor::vulkan_renderer::wrapper { - -// Forward declaration -class Device; - -/// @brief RAII wrapper class for VkCommandBuffer. -/// @todo Make trivially copyable (this class doesn't really "own" the command buffer, more just an OOP wrapper). -class CommandBuffer { - VkCommandBuffer m_command_buffer{VK_NULL_HANDLE}; - const Device &m_device; - std::string m_name; - std::unique_ptr m_wait_fence; - - // The Device wrapper must be able to call begin_command_buffer and end_command_buffer - friend class Device; - - /// The staging buffers which are maybe used in the command buffer - /// This vector of staging buffers will be cleared every time ``begin_command_buffer`` is called - /// @note We are not recycling staging buffers. Once they are used and the command buffer handle has reached the end - /// of its lifetime, the staging bufers will be cleared. We trust Vulkan Memory Allocator (VMA) in managing the - /// memory for staging buffers. - mutable std::vector m_staging_bufs; - - friend class CommandPool; - - /// Call vkBeginCommandBuffer - /// @param flags The command buffer usage flags, ``VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT`` by default - const CommandBuffer & // NOLINT - begin_command_buffer(VkCommandBufferUsageFlags flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) const; - - /// Create a new staging buffer which will be stored internally for a copy operation - /// @param data A raw pointer to the data to copy (must not be ``nullptr``) - /// @param data_size The size of the data to copy (must be greater than ``0``) - /// @param name The internal name of the staging buffer (must not be empty) - /// @return A VkBuffer which contains the staging buffer data - [[nodiscard]] VkBuffer create_staging_buffer(const void *data, const VkDeviceSize data_size, - const std::string &name) const { - assert(data); - assert(data_size > 0); - assert(!name.empty()); - - // Create a staging buffer for the copy operation and keep it until the CommandBuffer exceeds its lifetime - m_staging_bufs.emplace_back(m_device, name, data_size, data, data_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, - VMA_MEMORY_USAGE_CPU_ONLY); - - return m_staging_bufs.back().buffer(); - } - - /// Create a new staging buffer which will be stored internally for a copy operation - /// @tparam The data type of the staging buffer - /// @param data A std::span of the source data - /// @param name The internal name of the staging buffer (must not be empty) - /// @return The staging buffer's VkBuffer - template - [[nodiscard]] VkBuffer create_staging_buffer(const std::span data, const std::string &name) const { - return create_staging_buffer(data.data(), static_cast(sizeof(data) * data.size()), name); - } - - /// Call vkEndCommandBuffer - /// @return A const reference to the this pointer (allowing method calls to be chained) - const CommandBuffer &end_command_buffer() const; // NOLINT - -public: - /// Default constructor - /// @param device A const reference to the device wrapper class - /// @param cmd_pool The command pool from which the command buffer will be allocated - /// @param name The internal debug marker name of the command buffer (must not be empty) - CommandBuffer(const Device &device, VkCommandPool cmd_pool, std::string name); - - CommandBuffer(const CommandBuffer &) = delete; - CommandBuffer(CommandBuffer &&) noexcept; - - ~CommandBuffer() = default; - - CommandBuffer &operator=(const CommandBuffer &) = delete; - CommandBuffer &operator=(CommandBuffer &&) = delete; - - /// Call vkCmdBeginRenderPass - /// @param render_pass_bi The renderpass begin info - /// @param subpass_contents The subpass contents (``VK_SUBPASS_CONTENTS_INLINE`` by default) - /// @return A const reference to the this pointer (allowing method calls to be chained) - const CommandBuffer &begin_render_pass(const VkRenderPassBeginInfo &render_pass_bi, // NOLINT - VkSubpassContents subpass_contents = VK_SUBPASS_CONTENTS_INLINE) const; - - /// Call vkCmdBindDescriptorSets - /// @param desc_sets The descriptor sets to bind - /// @param layout The pipeline layout - /// @param bind_point the pipeline bind point (``VK_PIPELINE_BIND_POINT_GRAPHICS`` by default) - /// @param first_set The first descriptor set (``0`` by default) - /// @param dyn_offsets The dynamic offset values (empty by default) - /// @return A const reference to the this pointer (allowing method calls to be chained) - const CommandBuffer &bind_descriptor_sets(std::span desc_sets, // NOLINT - VkPipelineLayout layout, - VkPipelineBindPoint bind_point = VK_PIPELINE_BIND_POINT_GRAPHICS, - std::uint32_t first_set = 0, - std::span dyn_offsets = {}) const; - - /// Call vkCmdBindIndexBuffer - /// @param buf The index buffer to bind - /// @param index_type The index type to use (``VK_INDEX_TYPE_UINT32`` by default) - /// @param offset The offset (``0`` by default) - /// @return A const reference to the this pointer (allowing method calls to be chained) - const CommandBuffer &bind_index_buffer(VkBuffer buf, VkIndexType index_type = VK_INDEX_TYPE_UINT32, // NOLINT - VkDeviceSize offset = 0) const; - - /// Call vkCmdBindPipeline - /// @param pipeline The graphics pipeline to bind - /// @param bind_point The pipeline bind point (``VK_PIPELINE_BIND_POINT_GRAPHICS`` by default) - /// @return A const reference to the this pointer (allowing method calls to be chained) - const CommandBuffer &bind_pipeline(VkPipeline pipeline, // NOLINT - VkPipelineBindPoint bind_point = VK_PIPELINE_BIND_POINT_GRAPHICS) const; - - /// Call vkCmdBindVertexBuffers - /// @param bufs The vertex buffers to bind - /// @param first_binding The first binding (``0`` by default) - /// @param offsets The device offsets (empty by default) - /// @return A const reference to the this pointer (allowing method calls to be chained) - const CommandBuffer &bind_vertex_buffers(std::span bufs, // NOLINT - std::uint32_t first_binding = 0, - std::span offsets = {}) const; - - /// Call vkCmdPipelineBarrier - /// @param image The image - /// @param old_layout The old layout of the image - /// @param new_layout The new layout of the image - /// @note The new layout must be different from the old layout! - /// @param subres_range The image subresource range - /// @param src_mask The source pipeline stage flags (``VK_PIPELINE_STAGE_ALL_COMMANDS_BIT`` by default) - /// @param dst_mask The destination pipeline stage flags (``VK_PIPELINE_STAGE_ALL_COMMANDS_BIT`` by default) - /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) - const CommandBuffer & // NOLINT - change_image_layout(VkImage image, VkImageLayout old_layout, VkImageLayout new_layout, - VkImageSubresourceRange subres_range, - VkPipelineStageFlags src_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, - VkPipelineStageFlags dst_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) const; - - /// Call vkCmdPipelineBarrier - /// @param image The image - /// @param old_layout The old layout of the image - /// @param new_layout The new layout of the image - /// @param mip_level_count The number of mip levels (The parameter in ``VkImageSubresourceRange``) - /// @param array_layer_count The number of array layers (The parameter in ``VkImageSubresourceRange``) - /// @param base_mip_level The base mip level index (The parameter in ``VkImageSubresourceRange``) - /// @param base_array_layer The base array layer index (The parameter in ``VkImageSubresourceRange``) - /// @param src_mask The source pipeline stage flags (``VK_PIPELINE_STAGE_ALL_COMMANDS_BIT`` by default) - /// @param dst_mask The destination pipeline stage flags (``VK_PIPELINE_STAGE_ALL_COMMANDS_BIT`` by default) - /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) - const CommandBuffer & // NOLINT - change_image_layout(VkImage image, VkImageLayout old_layout, VkImageLayout new_layout, - std::uint32_t mip_level_count = 1, std::uint32_t array_layer_count = 1, - std::uint32_t base_mip_level = 0, std::uint32_t base_array_layer = 0, - VkPipelineStageFlags src_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, - VkPipelineStageFlags dst_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) const; - - /// Call vkCmdCopyBuffer - /// @param src_buf The source buffer - /// @param dst_buf The destination buffer - /// @param copy_region A single buffer copy region - /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) - const CommandBuffer ©_buffer(VkBuffer src_buf, VkBuffer dst_buf, // NOLINT - const VkBufferCopy ©_region) const; - - /// Call vkCmdCopyBuffer - /// @param src_buf The source buffer - /// @param dst_buf The destination buffer - /// @param copy_regions A std::span of buffer copy regions - /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) - const CommandBuffer ©_buffer(VkBuffer src_buf, VkBuffer dst_buf, // NOLINT - std::span copy_regions) const; - - /// Call vkCmdCopyBuffer - /// @param src_buf The source buffer - /// @param dst_buf The destination buffer - /// @param src_buf_size The size of the source buffer - /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) - const CommandBuffer ©_buffer(VkBuffer src_buf, VkBuffer dst_buf, // NOLINT - VkDeviceSize src_buf_size) const; - - /// Call vkCmdCopyBufferToImage - /// @param src_buf The source buffer - /// @param dst_img The destination image - /// @note The destination image is always expected to be in layout ``VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL`` - /// @param copy_regions A std::span of buffer image copy regions - /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) - const CommandBuffer ©_buffer_to_image(VkBuffer src_buf, VkImage dst_img, // NOLINT - std::span copy_regions) const; - - /// Call vkCmdCopyBufferToImage - /// copy region - /// @param src_buf The source buffer - /// @param dst_img The destination image - /// @note The destination image is always expected to be in layout ``VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL`` - /// @param copy_region The buffer image copy region - /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) - const CommandBuffer ©_buffer_to_image(VkBuffer src_buf, VkImage dst_img, // NOLINT - const VkBufferImageCopy ©_region) const; - - /// Call vkCmdCopyBuffer - /// @param data A raw pointer to the data to copy - /// @param data_size The size of the data to copy - /// @param dst_img The destination image (must not be ``VK_NULL_HANDLE``) - /// @note The destination image is always expected to be in layout ``VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL`` for the - /// copy operation - /// @param name The internal name of the staging buffer (must not be empty) - /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) - const CommandBuffer ©_buffer_to_image(const void *data, const VkDeviceSize data_size, // NOLINT - VkImage dst_img, const VkBufferImageCopy ©_region, - const std::string &name) const; - - /// Call vkCmdCopyBuffer - /// @param data A std::span of the source data - /// @note A staging buffer for the copy operation will be created automatically from ``data`` - /// @param dst_img The destination image (must not be ``VK_NULL_HANDLE``) - /// @note The destination image is always expected to be in layout ``VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL`` for the - /// copy operation - /// @param name The internal name of the staging buffer (must not be empty) - /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) - template - const CommandBuffer ©_buffer_to_image(const std::span data, // NOLINT - VkImage dst_img, const VkBufferImageCopy ©_region, - const std::string &name) const { - return copy_buffer_to_image(create_staging_buffer(data, name), dst_img, - static_cast(sizeof(data) * data.size()), copy_region, name); - } - - /// Call vkCmdDraw - /// @param vert_count The number of vertices to draw - /// @param inst_count The number of instances (``1`` by default) - /// @param first_vert The index of the first vertex (``0`` by default) - /// @param first_inst The instance ID of the first instance to draw (``0`` by default) - /// @return A const reference to the this pointer (allowing method calls to be chained) - const CommandBuffer &draw(std::uint32_t vert_count, std::uint32_t inst_count = 1, // NOLINT - std::uint32_t first_vert = 0, std::uint32_t first_inst = 0) const; - - /// Call vkCmdDrawIndexed - /// @param index_count The number of vertices to draw - /// @param inst_count The number of instances to draw (``1`` by defaul) - /// @param first_index The base index withing the index buffer (``0`` by default) - /// @param vert_offset The value added to the vertex index before indexing into the vertex buffer (``0`` by default) - /// @param first_inst The instance ID of the first instance to draw (``0`` by default) - /// @param index_count The number of indices to draw - /// @return A const reference to the this pointer (allowing method calls to be chained) - const CommandBuffer &draw_indexed(std::uint32_t index_count, std::uint32_t inst_count = 1, // NOLINT - std::uint32_t first_index = 0, std::int32_t vert_offset = 0, - std::uint32_t first_inst = 0) const; - - /// Call vkCmdEndRenderPass - /// @return A const reference to the this pointer (allowing method calls to be chained) - const CommandBuffer &end_render_pass() const; // NOLINT - - [[nodiscard]] VkResult fence_status() const { - return m_wait_fence->status(); - } - - /// Call vkCmdPipelineBarrier - /// @param src_stage_flags The the source stage flags - /// @param dst_stage_flags The destination stage flags - /// @param img_mem_barriers The image memory barriers - /// @note We start with image memory barriers as no-default parameter, since it's the most common use case - /// @param mem_barriers The memory barriers (empty by default) - /// @param buf_mem_barriers The buffer memory barriers (empty by default) - /// @param dep_flags The dependency flags (``0`` by default) - /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) - const CommandBuffer &pipeline_barrier(VkPipelineStageFlags src_stage_flags, // NOLINT - VkPipelineStageFlags dst_stage_flags, - std::span img_mem_barriers, - std::span mem_barriers = {}, - std::span buf_mem_barriers = {}, - VkDependencyFlags dep_flags = 0) const; - - /// Call vkCmdPipelineBarrier - /// @param src_stage_flags The the source stage flags - /// @param dst_stage_flags The destination stage flags - /// @param barrier The image memory barrier - /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) - const CommandBuffer &pipeline_image_memory_barrier(VkPipelineStageFlags src_stage_flags, // NOLINT - VkPipelineStageFlags dst_stage_flags, - const VkImageMemoryBarrier &barrier) const; - - /// Call vkCmdPipelineBarrier - /// @param src_stage_flags The the source stage flags - /// @param dst_stage_flags The destination stage flags - /// @param barrier The memory barrier - /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) - const CommandBuffer &pipeline_memory_barrier(VkPipelineStageFlags src_stage_flags, // NOLINT - VkPipelineStageFlags dst_stage_flags, - const VkMemoryBarrier &barrier) const; - - /// Call vkCmdPipelineBarrier to place a full memory barrier - /// @warning You should avoid full barriers since they are not the most performant solution in most cases - const CommandBuffer &full_barrier() const; - - /// Call vkCmdPushConstants - /// @param layout The pipeline layout - /// @param stage The shader stage that will be accepting the push constants - /// @param size The size of the push constant data in bytes - /// @param data A pointer to the push constant data - /// @param offset The offset value (``0`` by default) - /// @return A const reference to the this pointer (allowing method calls to be chained) - const CommandBuffer &push_constants(VkPipelineLayout layout, VkShaderStageFlags stage, // NOLINT - std::uint32_t size, const void *data, VkDeviceSize offset = 0) const; - - /// Call vkCmdPushConstants - /// @tparam T the data type of the push constant - /// @param layout The pipeline layout - /// @param data A const reference to the data - /// @param stage The shader stage that will be accepting the push constants - /// @param offset The offset value (``0`` by default) - /// @return A const reference to the this pointer (allowing method calls to be chained) - template - const CommandBuffer &push_constant(const VkPipelineLayout layout, const T &data, // NOLINT - const VkShaderStageFlags stage, const VkDeviceSize offset = 0) const { - return push_constants(layout, stage, sizeof(data), &data, offset); - } - - // Graphics commands - // TODO(): Switch to taking in OOP wrappers when we have them (e.g. bind_vertex_buffers takes in a VertexBuffer) - - [[nodiscard]] VkCommandBuffer get() const { - return m_command_buffer; - } - - [[nodiscard]] const Fence &get_wait_fence() const { - return *m_wait_fence; - } - - [[nodiscard]] const VkCommandBuffer *ptr() const { - return &m_command_buffer; - } - - /// Call the reset method of the Fence member - const CommandBuffer &reset_fence() const; - - /// Call vkQueueSubmit - /// @param submit_infos The submit infos - const CommandBuffer &submit(std::span submit_infos) const; // NOLINT - - /// Call vkQueueSubmit - /// @param submit_info The submit info - const CommandBuffer &submit(VkSubmitInfo submit_infos) const; // NOLINT - - /// Call vkQueueSubmit - const CommandBuffer &submit() const; // NOLINT - - /// Call vkQueueSubmit and use a fence to wait for command buffer submission and execution to complete - /// @param submit_infos The submit infos - const CommandBuffer &submit_and_wait(std::span submit_infos) const; // NOLINT - - /// Call vkQueueSubmit and use a fence to wait for command buffer submission and execution to complete - /// @param submit_info The submit info - const CommandBuffer &submit_and_wait(VkSubmitInfo submit_info) const; // NOLINT - - /// Call vkQueueSubmit and use a fence to wait for command buffer submission and execution to complete - const CommandBuffer &submit_and_wait() const; // NOLINT -}; - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/include/inexor/vulkan-renderer/wrapper/commands/command_buffer.hpp b/include/inexor/vulkan-renderer/wrapper/commands/command_buffer.hpp new file mode 100644 index 000000000..8edd63dc9 --- /dev/null +++ b/include/inexor/vulkan-renderer/wrapper/commands/command_buffer.hpp @@ -0,0 +1,440 @@ +#pragma once + +#include "inexor/vulkan-renderer/render-graph/buffer.hpp" +#include "inexor/vulkan-renderer/render-graph/image.hpp" +#include "inexor/vulkan-renderer/wrapper/synchronization/fence.hpp" + +#include +#include +#include +#include +#include + +namespace inexor::vulkan_renderer::wrapper { +// Forward declaration +class Device; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::render_graph { +// Forward declarations +class RenderGraph; +} // namespace inexor::vulkan_renderer::render_graph + +namespace inexor::vulkan_renderer::wrapper::pipelines { +// Forward declaration +class GraphicsPipeline; +} // namespace inexor::vulkan_renderer::wrapper::pipelines + +namespace inexor::vulkan_renderer::wrapper::synchronization { +// Forward declaration +class Fence; +} // namespace inexor::vulkan_renderer::wrapper::synchronization + +namespace inexor::vulkan_renderer::wrapper::commands { + +// Using declaration +using render_graph::Buffer; +using render_graph::BufferType; +using render_graph::Image; +using render_graph::RenderGraph; +using wrapper::pipelines::GraphicsPipeline; +using wrapper::synchronization::Fence; + +/// RAII wrapper class for VkCommandBuffer +/// @todo Make trivially copyable (this class doesn't really "own" the command buffer, more just an OOP wrapper). +class CommandBuffer { + friend class Device; + friend class RenderGraph; + friend class CommandPool; + +private: + VkCommandBuffer m_cmd_buf{VK_NULL_HANDLE}; + const Device &m_device; + std::string m_name; + std::unique_ptr m_cmd_buf_execution_completed; + + /// Call vkBeginCommandBuffer + /// @param flags The command buffer usage flags, ``VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT`` by default + const CommandBuffer & // NOLINT + begin_command_buffer(VkCommandBufferUsageFlags flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) const; + + /// Call vkEndCommandBuffer + /// @return A const reference to the this pointer (allowing method calls to be chained) + const CommandBuffer &end_command_buffer() const; // NOLINT + + /// Call vkQueueSubmit + /// @param queue_type The queue type to submit the command buffer to + /// @param wait_semaphores The semaphores to wait for + /// @param signal_semaphores The semaphores to signal + void submit_and_wait(VkQueueFlagBits queue_type, + std::span wait_semaphores = {}, + std::span signal_semaphores = {}) const; + +public: + /// Default constructor + /// @param device A const reference to the device wrapper class + /// @param cmd_pool The command pool from which the command buffer will be allocated + /// @param name The internal debug marker name of the command buffer (must not be empty) + CommandBuffer(const wrapper::Device &device, VkCommandPool cmd_pool, std::string name); + + CommandBuffer(const CommandBuffer &) = delete; + CommandBuffer(CommandBuffer &&) noexcept; + + /// @note Command buffers are allocated from a command pool, meaning the memory required for this will be + /// freed if the corresponding command pool is destroyed. Command buffers are not freed in the Destructor. + ~CommandBuffer() = default; + + CommandBuffer &operator=(const CommandBuffer &) = delete; + CommandBuffer &operator=(CommandBuffer &&) = delete; + + /// Call vkCmdBeginDebugUtilsLabelEXT + /// @param name The name of the debug label + /// @param color The color of the debug label + /// @return A const reference to the this pointer (allowing method calls to be chained) + const CommandBuffer &begin_debug_label_region(std::string name, std::array color) const; + + /// Call vkCmdBeginRendering + /// @note We don't need to call it ``vkCmdBeginRenderingKHR`` anymore since it's part of Vulkan 1.3's core + /// @note ``begin_render_pass`` has been deprecated because of dynamic rendering (``VK_KHR_dynamic_rendering``) + /// @param rendering_info The info for dynamic rendering + /// @return A const reference to the this pointer (allowing method calls to be chained) + const CommandBuffer &begin_rendering(const VkRenderingInfo &rendering_info) const; + + /// Call vkCmdBindDescriptorSets to bind one single descriptor set + /// @note Binding multiple descriptor sets would require implementing bind_descriptor_sets, which is not required + /// for now. + /// @return A const reference to the this pointer (allowing method calls to be chained) + /// @param descriptor_set The descriptor set to bind + /// @param pipeline The graphics pipeline whose pipeline layout will be used + /// @return A const reference to the this pointer (allowing method calls to be chained) + const CommandBuffer &bind_descriptor_set(VkDescriptorSet desc_set, std::weak_ptr pipeline) const; + + // TODO: Implement more parameters of vkCmdBindDescriptorSets in bind_descriptor_set if necessary + // TODO: Implement bind_descriptor_sets if binding multiple descriptor sets is necessary + + /// Call vkCmdBindIndexBuffer + /// @param buffer The index buffer to bind + /// @param index_type The index type to use (``VK_INDEX_TYPE_UINT32`` by default) + /// @param offset The offset (``0`` by default) + /// @return A const reference to the this pointer (allowing method calls to be chained) + const CommandBuffer &bind_index_buffer(std::weak_ptr buffer, + VkIndexType index_type = VK_INDEX_TYPE_UINT32, // NOLINT + VkDeviceSize offset = 0) const; + + /// Call vkCmdBindPipeline + /// @param pipeline The graphics pipeline to bind + /// @param bind_point The pipeline bind point (``VK_PIPELINE_BIND_POINT_GRAPHICS`` by default) + /// @return A const reference to the this pointer (allowing method calls to be chained) + const CommandBuffer &bind_pipeline(std::weak_ptr pipeline, // NOLINT + VkPipelineBindPoint bind_point = VK_PIPELINE_BIND_POINT_GRAPHICS) const; + + /// Call vkCmdBindVertexBuffers + /// @note When binding only a single vertex buffer, the parameters ``firstBinding`` and ``bindingCount`` in + /// ``pOffsets`` in ``vkCmdBindVertexBuffers`` do not need to be exposed. + /// @param buffer The vertex buffer to bind + /// @return A const reference to the this pointer (allowing method calls to be chained) + const CommandBuffer &bind_vertex_buffer(std::weak_ptr buffer) const; + + /// Call vkCmdPipelineBarrier + /// @param img The image + /// @param old_layout The old layout of the image + /// @param new_layout The new layout of the image + /// @note The new layout must be different from the old layout! + /// @param subres_range The image subresource range + /// @param src_mask The source pipeline stage flags (``VK_PIPELINE_STAGE_ALL_COMMANDS_BIT`` by default) + /// @param dst_mask The destination pipeline stage flags (``VK_PIPELINE_STAGE_ALL_COMMANDS_BIT`` by default) + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer & // NOLINT + change_image_layout(VkImage img, + VkImageLayout old_layout, + VkImageLayout new_layout, + VkImageSubresourceRange subres_range, + VkPipelineStageFlags src_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, + VkPipelineStageFlags dst_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) const; + + /// Call vkCmdPipelineBarrier + /// @param img The image + /// @param old_layout The old layout of the image + /// @param new_layout The new layout of the image + /// @param mip_level_count The number of mip levels (The parameter in ``VkImageSubresourceRange``) + /// @param array_layer_count The number of array layers (The parameter in ``VkImageSubresourceRange``) + /// @param base_mip_level The base mip level index (The parameter in ``VkImageSubresourceRange``) + /// @param base_array_layer The base array layer index (The parameter in ``VkImageSubresourceRange``) + /// @param src_mask The source pipeline stage flags (``VK_PIPELINE_STAGE_ALL_COMMANDS_BIT`` by default) + /// @param dst_mask The destination pipeline stage flags (``VK_PIPELINE_STAGE_ALL_COMMANDS_BIT`` by default) + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer & // NOLINT + change_image_layout(VkImage imgs, + VkImageLayout old_layout, + VkImageLayout new_layout, + std::uint32_t mip_level_count = 1, + std::uint32_t array_layer_count = 1, + std::uint32_t base_mip_level = 0, + std::uint32_t base_array_layer = 0, + VkPipelineStageFlags src_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, + VkPipelineStageFlags dst_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) const; + + /// Call vkCmdCopyBuffer + /// @param src_buf The source buffer + /// @param dst_buf The destination buffer + /// @param copy_region A single buffer copy region + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer ©_buffer(VkBuffer src_buf, + VkBuffer dst_buf, // NOLINT + const VkBufferCopy ©_region) const; + + /// Call vkCmdCopyBuffer + /// @param src_buf The source buffer + /// @param dst_buf The destination buffer + /// @param copy_regions A std::span of buffer copy regions + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer ©_buffer(VkBuffer src_buf, + VkBuffer dst_buf, // NOLINT + std::span copy_regions) const; + + /// Call vkCmdCopyBuffer + /// @param src_buf The source buffer + /// @param dst_buf The destination buffer + /// @param src_buf_size The size of the source buffer + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer ©_buffer(VkBuffer src_buf, + VkBuffer dst_buf, // NOLINT + VkDeviceSize src_buf_size) const; + + /// Call vkCmdCopyBufferToImage + /// @param src_buf The source buffer + /// @param dst_img The destination image + /// @note The destination image is always expected to be in layout ``VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL`` + /// @param copy_region The buffer image copy region + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer ©_buffer_to_image(VkBuffer src_buf, + VkImage dst_img, // NOLINT + const VkBufferImageCopy ©_region) const; + + /// Call vkCmdCopyBufferToImage + /// @param src_buffer The source buffer + /// @param dst_img The image to copy the buffer into + /// @param extent The extent of the image + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer ©_buffer_to_image(VkBuffer src_buffer, VkImage dst_img, VkExtent3D extent) const; + + /// Call vkCmdCopyBufferToImage + /// @param src_buffer The source buffer + /// @param img The image to copy the buffer into + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer ©_buffer_to_image(VkBuffer src_buffer, std::weak_ptr img) const; + + /// Call vkCmdDraw + /// @param vert_count The number of vertices to draw + /// @param inst_count The number of instances (``1`` by default) + /// @param first_vert The index of the first vertex (``0`` by default) + /// @param first_inst The instance ID of the first instance to draw (``0`` by default) + /// @return A const reference to the this pointer (allowing method calls to be chained) + const CommandBuffer &draw(std::uint32_t vert_count, + std::uint32_t inst_count = 1, + std::uint32_t first_vert = 0, + std::uint32_t first_inst = 0) const; + + /// Call vkCmdDrawIndexed + /// @param index_count The number of vertices to draw + /// @param inst_count The number of instances to draw (``1`` by defaul) + /// @param first_index The base index withing the index buffer (``0`` by default) + /// @param vert_offset The value added to the vertex index before indexing into the vertex buffer (``0`` by default) + /// @param first_inst The instance ID of the first instance to draw (``0`` by default) + /// @param index_count The number of indices to draw + /// @return A const reference to the this pointer (allowing method calls to be chained) + const CommandBuffer &draw_indexed(std::uint32_t index_count, + std::uint32_t inst_count = 1, + std::uint32_t first_index = 0, + std::int32_t vert_offset = 0, + std::uint32_t first_inst = 0) const; + + /// Call vkCmdEndDebugUtilsLabelEXT + /// @return A const reference to the this pointer (allowing method calls to be chained) + const CommandBuffer &end_debug_label_region() const; + + /// Call vkCmdEndRendering + /// @note We don't need to call it ``vkCmdEndRenderingKHR`` anymore since it's part of Vulkan 1.3's core + /// @note ``end_render_pass`` has been deprecated because of dynamic rendering (``VK_KHR_dynamic_rendering``) + /// @return A const reference to the this pointer (allowing method calls to be chained) + const CommandBuffer &end_rendering() const; + + /// Call vkCmdPipelineBarrier + /// @param src_stage_flags The the source stage flags + /// @param dst_stage_flags The destination stage flags + /// @param img_mem_barriers The image memory barriers + /// @note We start with image memory barriers as no-default parameter, since it's the most common use case + /// @param mem_barriers The memory barriers (empty by default) + /// @param buf_mem_barriers The buffer memory barriers (empty by default) + /// @param dep_flags The dependency flags (``0`` by default) + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer &pipeline_barrier(VkPipelineStageFlags src_stage_flags, // NOLINT + VkPipelineStageFlags dst_stage_flags, + std::span img_mem_barriers, + std::span mem_barriers = {}, + std::span buf_mem_barriers = {}, + VkDependencyFlags dep_flags = 0) const; + + /// Call vkCmdPipelineBarrier + /// @param src_stage_flags The source stage flags + /// @param dst_stage_flags The destination stage flags + /// @param buffer_mem_barrier The buffer memory barrier + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer &pipeline_buffer_memory_barrier(VkPipelineStageFlags src_stage_flags, + VkPipelineStageFlags dst_stage_flags, + const VkBufferMemoryBarrier &buffer_mem_barrier) const; + + /// Call vkCmdPipelineBarrier + /// @param src_stage_flags The source stage flags + /// @param dst_stage_flags The destination stage flags + /// @param src_access_flags The source access flags + /// @param dst_access_flags The destination access flags + /// @param buffer + /// @param size + /// @param offset + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer &pipeline_buffer_memory_barrier(VkPipelineStageFlags src_stage_flags, + VkPipelineStageFlags dst_stage_flags, + VkAccessFlags src_access_flags, + VkAccessFlags dst_access_flags, + VkBuffer buffer, + VkDeviceSize size = VK_WHOLE_SIZE, + VkDeviceSize offset = 0) const; + + /// Call vkCmdPipelineBarrier + /// @param src_stage_flags The the source stage flags + /// @param dst_stage_flags The destination stage flags + /// @param buffer_mem_barriers The buffer memory barriers + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer & + pipeline_buffer_memory_barriers(VkPipelineStageFlags src_stage_flags, + VkPipelineStageFlags dst_stage_flags, + std::span buffer_mem_barriers) const; + + /// Place a buffer memory pipeline barrier before a vkCmdCopyBuffer command + /// @param buffer The affected buffer + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer &pipeline_buffer_memory_barrier_before_copy_buffer(VkBuffer buffer) const; + + /// Place a buffer memory pipeline barrier after a vkCmdCopyBuffer command + /// @param buffer The affected buffer + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer &pipeline_buffer_memory_barrier_after_copy_buffer(VkBuffer buffer) const; + + /// Call vkCmdPipelineBarrier + /// @param src_stage_flags The the source stage flags + /// @param dst_stage_flags The destination stage flags + /// @param barrier The image memory barrier + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer &pipeline_image_memory_barrier(VkPipelineStageFlags src_stage_flags, // NOLINT + VkPipelineStageFlags dst_stage_flags, + const VkImageMemoryBarrier &barrier) const; + + /// Call vkCmdPipelineBarrier + /// @param src_stage_flags + /// @param dst_stage_flags + /// @param src_access_flags + /// @param dst_access_flags + /// @param old_img_layout + /// @param new_img_layout + /// @param img + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer &pipeline_image_memory_barrier(VkPipelineStageFlags src_stage_flags, + VkPipelineStageFlags dst_stage_flags, + VkAccessFlags src_access_flags, + VkAccessFlags dst_access_flags, + VkImageLayout old_img_layout, + VkImageLayout new_img_layout, + VkImage img) const; + /// Call vkCmdPipelineBarrier + /// @param src_stage_flags The the source stage flags + /// @param dst_stage_flags The destination stage flags + /// @param barriers The image memory barriers + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer &pipeline_image_memory_barriers(VkPipelineStageFlags src_stage_flags, // NOLINT + VkPipelineStageFlags dst_stage_flags, + std::span barriers) const; + + /// + /// @param img + /// @return + const CommandBuffer &pipeline_image_memory_barrier_after_copy_buffer_to_image(VkImage img) const; + + /// + /// @param img + /// @return + const CommandBuffer &pipeline_image_memory_barrier_before_copy_buffer_to_image(VkImage img) const; + + /// Call vkCmdPipelineBarrier + /// @param src_stage_flags The the source stage flags + /// @param dst_stage_flags The destination stage flags + /// @param barrier The memory barrier + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer &pipeline_memory_barrier(VkPipelineStageFlags src_stage_flags, // NOLINT + VkPipelineStageFlags dst_stage_flags, + const VkMemoryBarrier &barrier) const; + + /// Call vkCmdPipelineBarrier + /// @param src_stage_flags The the source stage flags + /// @param dst_stage_flags The destination stage flags + /// @param barriers The memory barriers + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer &pipeline_memory_barriers(VkPipelineStageFlags src_stage_flags, // NOLINT + VkPipelineStageFlags dst_stage_flags, + const std::span barriers) const; + + /// Call vkCmdPipelineBarrier to place a full memory barrier + /// @warning You should avoid full barriers since they are not the most performant solution in most cases + const CommandBuffer &full_barrier() const; + + /// Call vkCmdInsertDebugUtilsLabelEXT + /// @param name The name of the debug label to insert + /// @return A const reference to the dereferenced ``this`` pointer (allowing for method calls to be chained) + const CommandBuffer &insert_debug_label(std::string name, std::array color) const; + + /// Call vkCmdPushConstants + /// @param layout The pipeline layout + /// @param stage The shader stage that will be accepting the push constants + /// @param size The size of the push constant data in bytes + /// @param data A pointer to the push constant data + /// @param offset The offset value (``0`` by default) + /// @return A const reference to the this pointer (allowing method calls to be chained) + const CommandBuffer &push_constants(VkPipelineLayout layout, + VkShaderStageFlags stage, // NOLINT + std::uint32_t size, + const void *data, + VkDeviceSize offset = 0) const; + + /// Call vkCmdPushConstants + /// @tparam T the data type of the push constant + /// @param pipeline The graphics pipeline + /// @param data A const reference to the data + /// @param stage The shader stage that will be accepting the push constants + /// @param offset The offset value (``0`` by default) + /// @return A const reference to the this pointer (allowing method calls to be chained) + template + const CommandBuffer &push_constant(const std::weak_ptr pipeline, + const T &data, // NOLINT + const VkShaderStageFlags stage, + const VkDeviceSize offset = 0) const { + return push_constants(pipeline.lock()->m_pipeline_layout->m_pipeline_layout, stage, sizeof(data), &data, + offset); + } + + /// Set the scissor + /// @param scissor The scissor + /// @return A const reference to the this pointer (allowing method calls to be chained) + const CommandBuffer &set_scissor(VkRect2D scissor) const; + + /// Set the name of a command buffer during recording of a specific command in the current command buffer + /// @param name The name of the suboperation + /// @return A const reference to the this pointer (allowing method calls to be chained) + const CommandBuffer &set_suboperation_debug_name(std::string name) const; + + /// Set the viewport + /// @param viewport The viewport + /// @return A const reference to the this pointer (allowing method calls to be chained) + const CommandBuffer &set_viewport(VkViewport viewport) const; +}; + +} // namespace inexor::vulkan_renderer::wrapper::commands diff --git a/include/inexor/vulkan-renderer/wrapper/command_pool.hpp b/include/inexor/vulkan-renderer/wrapper/commands/command_pool.hpp similarity index 64% rename from include/inexor/vulkan-renderer/wrapper/command_pool.hpp rename to include/inexor/vulkan-renderer/wrapper/commands/command_pool.hpp index c088290e0..769365f91 100644 --- a/include/inexor/vulkan-renderer/wrapper/command_pool.hpp +++ b/include/inexor/vulkan-renderer/wrapper/commands/command_pool.hpp @@ -3,50 +3,46 @@ #include #include -#include "inexor/vulkan-renderer/wrapper/command_buffer.hpp" - -#include +#include "inexor/vulkan-renderer/wrapper/commands/command_buffer.hpp" namespace inexor::vulkan_renderer::wrapper { - // Forward declaration class Device; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::wrapper::commands { + +// Using declaration +using wrapper::Device; -/// @brief RAII wrapper class for VkCommandPool. +/// RAII wrapper class for VkCommandPool class CommandPool { std::string m_name; const Device &m_device; VkCommandPool m_cmd_pool{VK_NULL_HANDLE}; + VkQueueFlagBits m_queue_type; /// The command buffers which can be requested by the current thread - std::vector> m_cmd_bufs; + mutable std::vector> m_cmd_bufs; public: /// Default constructor /// @param device The device wrapper instance + /// @param queue_type The queue type /// @param name The internal debug marker name which will be assigned to this command pool - CommandPool(const Device &device, std::string name); + CommandPool(const Device &device, VkQueueFlagBits queue_type, std::string name); CommandPool(const CommandPool &) = delete; CommandPool(CommandPool &&) noexcept; - ~CommandPool(); CommandPool &operator=(const CommandPool &) = delete; CommandPool &operator=(CommandPool &&) = delete; - [[nodiscard]] VkCommandPool get() const { - return m_cmd_pool; - } - - [[nodiscard]] const VkCommandPool *ptr() const { - return &m_cmd_pool; - } - /// Request a command buffer /// @param name The internal debug name which will be assigned to this command buffer (must not be empty) /// @return A command buffer handle instance which allows access to the requested command buffer - [[nodiscard]] const CommandBuffer &request_command_buffer(const std::string &name); + [[nodiscard]] CommandBuffer &request_command_buffer(const std::string &name) const; }; -} // namespace inexor::vulkan_renderer::wrapper +} // namespace inexor::vulkan_renderer::wrapper::commands diff --git a/include/inexor/vulkan-renderer/wrapper/cpu_texture.hpp b/include/inexor/vulkan-renderer/wrapper/cpu_texture.hpp deleted file mode 100644 index 43ad0e6fd..000000000 --- a/include/inexor/vulkan-renderer/wrapper/cpu_texture.hpp +++ /dev/null @@ -1,72 +0,0 @@ -#pragma once - -#include - -#include - -namespace inexor::vulkan_renderer::wrapper { - -/// @brief RAII wrapper class for texture data. -/// @todo Scan asset directory automatically. -class CpuTexture { - std::string m_name; - - int m_texture_width{0}; - int m_texture_height{0}; - int m_texture_channels{0}; - int m_mip_levels{0}; - - stbi_uc *m_texture_data{nullptr}; - - /// @brief Generate a chessboard color pattern which will be used as error texture. - void generate_error_texture_data(); - -public: - /// @brief Create a CpuTexture instance with a default texture. - CpuTexture(); - - /// @brief Read a texture from a file. - /// @param file_name The file name of the texture. - /// @param name The internal debug marker name of the command buffer. This must not be an empty string. - CpuTexture(const std::string &file_name, std::string name); - - CpuTexture(const CpuTexture &) = delete; - CpuTexture(CpuTexture &&) noexcept; - - ~CpuTexture(); - - CpuTexture &operator=(const CpuTexture &) = delete; - CpuTexture &operator=(CpuTexture &&) = default; - - [[nodiscard]] const std::string &name() const { - return m_name; - } - - [[nodiscard]] int width() const { - return m_texture_width; - } - - [[nodiscard]] int height() const { - return m_texture_height; - } - - [[nodiscard]] int channels() const { - return m_texture_channels; - } - - [[nodiscard]] int mip_levels() const { - return m_mip_levels; - } - - [[nodiscard]] stbi_uc *data() const { - return m_texture_data; - } - - [[nodiscard]] std::size_t data_size() const { - // TODO: We will need to update this once we fully support mip levels. - return static_cast(m_texture_width) * static_cast(m_texture_height) * - static_cast(m_texture_channels); - } -}; - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/include/inexor/vulkan-renderer/wrapper/descriptor.hpp b/include/inexor/vulkan-renderer/wrapper/descriptor.hpp deleted file mode 100644 index 2dff130c9..000000000 --- a/include/inexor/vulkan-renderer/wrapper/descriptor.hpp +++ /dev/null @@ -1,52 +0,0 @@ -#pragma once - -#include - -#include -#include - -namespace inexor::vulkan_renderer::wrapper { - -// Forward declaration -class Device; - -/// @brief RAII wrapper class for resource descriptors. -class ResourceDescriptor { - std::string m_name; - const Device &m_device; - VkDescriptorPool m_descriptor_pool{VK_NULL_HANDLE}; - VkDescriptorSetLayout m_descriptor_set_layout{VK_NULL_HANDLE}; - std::vector m_descriptor_set_layout_bindings; - std::vector m_write_descriptor_sets; - std::vector m_descriptor_sets; - -public: - /// @brief Default constructor. - /// @param device The const reference to a device RAII wrapper instance. - /// @param layout_bindings The descriptor layout bindings. - /// @param descriptor_writes The write descriptor sets. - /// @param name The internal debug marker name of the resource descriptor. - ResourceDescriptor(const Device &device, std::vector &&layout_bindings, - std::vector &&descriptor_writes, std::string &&name); - - ResourceDescriptor(const ResourceDescriptor &) = delete; - ResourceDescriptor(ResourceDescriptor &&) noexcept; - ~ResourceDescriptor(); - - ResourceDescriptor &operator=(const ResourceDescriptor &) = delete; - ResourceDescriptor &operator=(ResourceDescriptor &&) = delete; - - [[nodiscard]] const auto &descriptor_sets() const { - return m_descriptor_sets; - } - - [[nodiscard]] auto descriptor_set_layout() const { - return m_descriptor_set_layout; - } - - [[nodiscard]] const auto &descriptor_set_layout_bindings() const { - return m_descriptor_set_layout_bindings; - } -}; - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/include/inexor/vulkan-renderer/wrapper/descriptor_builder.hpp b/include/inexor/vulkan-renderer/wrapper/descriptor_builder.hpp deleted file mode 100644 index c0b53f034..000000000 --- a/include/inexor/vulkan-renderer/wrapper/descriptor_builder.hpp +++ /dev/null @@ -1,99 +0,0 @@ -#pragma once - -#include "inexor/vulkan-renderer/wrapper/make_info.hpp" - -#include - -#include -#include -#include -#include - -namespace inexor::vulkan_renderer::wrapper { - -// Forward declarations -class Device; -class ResourceDescriptor; - -class DescriptorBuilder { - const Device &m_device; - - std::vector m_layout_bindings; - std::vector m_write_sets; - std::vector m_descriptor_buffer_infos; - std::vector m_descriptor_image_infos; - -public: - /// @brief Constructs the descriptor builder. - /// @param device The const reference to a device RAII wrapper instance. - explicit DescriptorBuilder(const Device &device); - - DescriptorBuilder(const DescriptorBuilder &) = delete; - DescriptorBuilder(DescriptorBuilder &&) = delete; - ~DescriptorBuilder() = default; - - DescriptorBuilder &operator=(const DescriptorBuilder &) = delete; - DescriptorBuilder &operator=(DescriptorBuilder &&) = delete; - - // TODO: Implement more descriptor types than just uniform buffers and combined image samplers. - // TODO: Support uniform buffer offset in VkDescriptorBufferInfo. - // TODO: Offer overloaded methods which expose more fields of the structures. - - /// @brief Adds a uniform buffer to the descriptor container. - /// @tparam T The type of the uniform buffer. - /// @param uniform_buffer The uniform buffer which contains the data which will be accessed by the shader. - /// @param binding The binding index which will be used in the SPIR-V shader. - /// @param shader_stage The shader stage the uniform buffer will be used in, most likely the vertex shader. - /// @return A const reference to this DescriptorBuilder instance. - template - DescriptorBuilder &add_uniform_buffer(VkBuffer uniform_buffer, std::uint32_t binding, - VkShaderStageFlagBits shader_stage = VK_SHADER_STAGE_VERTEX_BIT); - - /// @brief Adds a combined image sampler to the descriptor container. - /// @param image_sampler The pointer to the combined image sampler. - /// @param image_view The pointer to the image view. - /// @param binding The binding index which will be used in the SPIR-V shader. - /// @param shader_stage The shader stage the uniform buffer will be used in, most likely the fragment shader. - /// @return A const reference to this DescriptorBuilder instance. - DescriptorBuilder &add_combined_image_sampler(VkSampler image_sampler, VkImageView image_view, - std::uint32_t binding, - VkShaderStageFlagBits shader_stage = VK_SHADER_STAGE_FRAGMENT_BIT); - - /// @brief Builds the resource descriptor. - /// @param name The internal name of the resource descriptor. - /// @return The resource descriptor which was created by the builder. - [[nodiscard]] ResourceDescriptor build(std::string name); -}; - -template -DescriptorBuilder &DescriptorBuilder::add_uniform_buffer(const VkBuffer uniform_buffer, const std::uint32_t binding, - const VkShaderStageFlagBits shader_stage) { - assert(uniform_buffer); - - m_layout_bindings.push_back({ - .binding = binding, - .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, - .descriptorCount = 1, - .stageFlags = static_cast(shader_stage), - .pImmutableSamplers = nullptr, - }); - - m_descriptor_buffer_infos.push_back({ - .buffer = uniform_buffer, - .offset = 0, - .range = sizeof(T), - }); - - m_write_sets.push_back(make_info({ - .dstSet = nullptr, - .dstBinding = binding, - .dstArrayElement = 0, - .descriptorCount = 1, - .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, - .pBufferInfo = &m_descriptor_buffer_infos.back(), - })); - - return *this; -} - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/include/inexor/vulkan-renderer/wrapper/descriptors/descriptor_pool.hpp b/include/inexor/vulkan-renderer/wrapper/descriptors/descriptor_pool.hpp new file mode 100644 index 000000000..2770de621 --- /dev/null +++ b/include/inexor/vulkan-renderer/wrapper/descriptors/descriptor_pool.hpp @@ -0,0 +1,53 @@ +#pragma once + +#include + +#include +#include + +namespace inexor::vulkan_renderer::wrapper { +// Forward declaration +class Device; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::wrapper::descriptors { + +// Forward declaration +class DescriptorPoolAllocator; + +/// RAII wrapper for VkDescriptorPool +/// For internal use inside of rendergraph only! +class DescriptorPool { +private: + const Device &m_device; + std::string m_name; + VkDescriptorPool m_descriptor_pool{VK_NULL_HANDLE}; + std::vector m_pool_sizes; + +public: + /// Default constructor is private so only DescriptorPoolAllocator can access it + /// @param device The device wrapper + /// @param pool_sizes The descriptor pool sizes (must not be empty!) + /// @param max_sets The max descriptor set count + /// @param name The internal debug name of this descriptor pool (must not be empty!) + /// @exception std::invalid_argument Internal debug name for descriptor pool must not be empty + /// @exception std::invalid_argument Descriptor pool sizes must not be empty + /// @exception VulkanException vkCreateDescriptorPool call failed + DescriptorPool(const Device &device, std::vector pool_sizes, std::uint32_t max_sets, + std::string name); + + DescriptorPool(const DescriptorPool &) = delete; + DescriptorPool(DescriptorPool &&) noexcept; + + /// Call vkDestroyDescriptorPool + ~DescriptorPool(); + + DescriptorPool &operator=(const DescriptorPool &) = delete; + DescriptorPool &operator=(DescriptorPool &&) = delete; + + [[nodiscard]] auto descriptor_pool() const noexcept { + return m_descriptor_pool; + } +}; + +} // namespace inexor::vulkan_renderer::wrapper::descriptors diff --git a/include/inexor/vulkan-renderer/wrapper/descriptors/descriptor_pool_allocator.hpp b/include/inexor/vulkan-renderer/wrapper/descriptors/descriptor_pool_allocator.hpp new file mode 100644 index 000000000..5115be977 --- /dev/null +++ b/include/inexor/vulkan-renderer/wrapper/descriptors/descriptor_pool_allocator.hpp @@ -0,0 +1,47 @@ +#pragma once + +#include "inexor/vulkan-renderer/wrapper/descriptors/descriptor_pool.hpp" + +#include + +#include + +// Forward declaration +namespace inexor::vulkan_renderer::wrapper { +class Device; +} + +namespace inexor::vulkan_renderer::wrapper::descriptors { + +// Forward declaration +class DescriptorSetAllocator; + +/// Allocator for DescriptorPool +class DescriptorPoolAllocator { + friend DescriptorSetAllocator; + +private: + /// The device wrapper + const Device &m_device; + /// The descriptor pools + std::vector m_pools; + + /// Default constructor + /// @param device The device wrapper + explicit DescriptorPoolAllocator(const Device &device); + + /// Return a descriptor pool from ``m_pools`` and in case all pools are used up, create a new one + /// @note If we run out of descriptor pools, we simply create one new descriptor pool (not multiple ones!) + /// @return A new descriptor pool that has not been used yet + [[nodiscard]] VkDescriptorPool request_new_descriptor_pool(); + +public: + DescriptorPoolAllocator(const DescriptorPoolAllocator &) = delete; + DescriptorPoolAllocator(DescriptorPoolAllocator &&) noexcept; + ~DescriptorPoolAllocator() = default; + + DescriptorPoolAllocator &operator=(const DescriptorPoolAllocator &) = delete; + DescriptorPoolAllocator &operator=(DescriptorPoolAllocator &&) noexcept; +}; + +} // namespace inexor::vulkan_renderer::wrapper::descriptors diff --git a/include/inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_allocator.hpp b/include/inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_allocator.hpp new file mode 100644 index 000000000..ba2273913 --- /dev/null +++ b/include/inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_allocator.hpp @@ -0,0 +1,57 @@ +#pragma once + +#include "inexor/vulkan-renderer/wrapper/descriptors/descriptor_pool_allocator.hpp" + +#include + +#include +#include + +// Forward declaration +namespace inexor::vulkan_renderer::wrapper { +class Device; +} + +namespace inexor::vulkan_renderer::wrapper::descriptors { + +// Forward declaration +class DescriptorBuilder; + +/// This classes manages descriptors by allocating VkDescriptorPools and VkDescriptorSetLayouts. +/// It is also responsible for caching VkDescriptorSetLayouts, meaning we do not create duplicates +/// For internal use inside of rendergraph only! +class DescriptorSetAllocator { + friend class DescriptorBuilder; + +private: + /// The device wrapper + const Device &m_device; + // The descriptor pool currently in use (handled by a DescriptorPool instance) + VkDescriptorPool m_current_pool{VK_NULL_HANDLE}; + /// The descriptor pool allocator + DescriptorPoolAllocator m_descriptor_pool_allocator; + +public: + /// Default constructor + /// @note This is private because descriptor allocators are for internal use in rendergraph only! + /// @param device The device wrapper + explicit DescriptorSetAllocator(const Device &device); + + /// Allocate a new descriptor set + /// @note We are currently not batching calls vkAllocateDescriptorSets, which would allow multiple descriptor sets + /// to be allcoated in one vkAllocateDescriptorSets call. The problem is that batching could lead to running out of + /// memory in the VkDescriptorPool, so a new descriptor pool would be created. + /// @param name The name of the descriptor set layout + /// @param descriptor_set_layout The descriptor set layout to allocate the descriptor set with + /// @return The descriptor set which was allocated + [[nodiscard]] VkDescriptorSet allocate(const std::string &name, VkDescriptorSetLayout descriptor_set_layout); + + DescriptorSetAllocator(const DescriptorSetAllocator &) = delete; + DescriptorSetAllocator(DescriptorSetAllocator &&) noexcept; + ~DescriptorSetAllocator() = default; + + DescriptorSetAllocator &operator=(const DescriptorSetAllocator &) = delete; + DescriptorSetAllocator &operator=(DescriptorSetAllocator &&) = delete; +}; + +} // namespace inexor::vulkan_renderer::wrapper::descriptors diff --git a/include/inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_layout.hpp b/include/inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_layout.hpp new file mode 100644 index 000000000..70c0705b7 --- /dev/null +++ b/include/inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_layout.hpp @@ -0,0 +1,48 @@ +#pragma once + +#include + +#include + +// Forward declaration +namespace inexor::vulkan_renderer::wrapper { +class Device; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::wrapper::descriptors { + +// Forward declaration +class DescriptorSetLayoutCache; + +/// RAII wrapper for VkDescriptorPool +/// For internal use inside of rendergraph only! +class DescriptorSetLayout { + friend DescriptorSetLayoutCache; + // TODO: Make RenderGraph friend + +private: + const Device &m_device; + std::string m_name; + VkDescriptorSetLayout m_descriptor_set_layout{VK_NULL_HANDLE}; + +public: + // TODO: Move me into private again and make rendergraph a friend! + + /// Default constructor + /// @param device The device wrapper + /// @param descriptor_set_layout_ci The descriptor set layout create info + /// @param name The internal debug name of the descriptor set layout + DescriptorSetLayout(const Device &device, VkDescriptorSetLayoutCreateInfo descriptor_set_layout_ci, + std::string name); + + DescriptorSetLayout(const DescriptorSetLayout &) = delete; + DescriptorSetLayout(DescriptorSetLayout &&) noexcept; + + /// Call vkDestroyDescriptorSetLayout + ~DescriptorSetLayout(); + + DescriptorSetLayout &operator=(const DescriptorSetLayout &) = delete; + DescriptorSetLayout &operator=(DescriptorSetLayout &&) = delete; +}; + +} // namespace inexor::vulkan_renderer::wrapper::descriptors diff --git a/include/inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_layout_builder.hpp b/include/inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_layout_builder.hpp new file mode 100644 index 000000000..5e6b7da87 --- /dev/null +++ b/include/inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_layout_builder.hpp @@ -0,0 +1,66 @@ +#pragma once + +#include "inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_layout_cache.hpp" + +#include + +#include +#include +#include + +namespace inexor::vulkan_renderer { +// Forward declaration +class RenderGraph; +} // namespace inexor::vulkan_renderer + +namespace inexor::vulkan_renderer::wrapper { +// Forward declaration +class Device; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::wrapper::descriptors { + +/// A builder for descriptors +class DescriptorSetLayoutBuilder { + friend class RenderGraph; + +private: + const Device &m_device; + /// All instances of DescriptorSetLayoutBuilder have the same DescriptorSetLayoutCache instance! + DescriptorSetLayoutCache m_descriptor_set_layout_cache; + std::vector m_bindings; + std::uint32_t m_binding{0}; + +public: + /// Default constructor + /// @param device The device wrapper + DescriptorSetLayoutBuilder(const Device &device); + + DescriptorSetLayoutBuilder(const DescriptorSetLayoutBuilder &) = delete; + DescriptorSetLayoutBuilder(DescriptorSetLayoutBuilder &&) noexcept; + ~DescriptorSetLayoutBuilder() = default; + + DescriptorSetLayoutBuilder &operator=(const DescriptorSetLayoutBuilder &) = delete; + DescriptorSetLayoutBuilder &operator=(DescriptorSetLayoutBuilder &&) = delete; + + // TODO: Support other descriptor types besides uniform buffers and combined image samplers! + + /// Add a combined image sampler to the descriptor set + /// @param shader_stage The shader stage + /// @param count The number of combined image samplers + /// @return The dereferenced this pointer + DescriptorSetLayoutBuilder &add_combined_image_sampler(VkShaderStageFlags shader_stage, std::uint32_t count = 1); + + /// Add a uniform buffer to the descriptor set + /// @param shader_stage The shader stage + /// @param count The number of uniform buffers + /// @return The dereferenced this pointer + DescriptorSetLayoutBuilder &add_uniform_buffer(VkShaderStageFlags shader_stage, std::uint32_t count = 1); + + /// Build the descriptor set layout + /// @param name The name of the descriptor set layout + /// @return The descriptor set layout that was created + [[nodiscard]] VkDescriptorSetLayout build(std::string name); +}; + +} // namespace inexor::vulkan_renderer::wrapper::descriptors diff --git a/include/inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_layout_cache.hpp b/include/inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_layout_cache.hpp new file mode 100644 index 000000000..e255f23c5 --- /dev/null +++ b/include/inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_layout_cache.hpp @@ -0,0 +1,77 @@ +#pragma once + +#include "inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_layout.hpp" + +#include + +#include +#include +#include +#include + +namespace inexor::vulkan_renderer::wrapper { +// Forward declaration +class Device; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::render_graph { +// Forward declaration +class RenderGraph; +} // namespace inexor::vulkan_renderer::render_graph + +namespace inexor::vulkan_renderer::wrapper::descriptors { + +// Forward declaration +class DescriptorBuilder; + +/// A metadata struct for information on descriptor set layouts +struct DescriptorSetLayoutInfo { + std::vector bindings; + [[nodiscard]] bool operator==(const DescriptorSetLayoutInfo &other) const; + [[nodiscard]] std::size_t hash() const; +}; + +/// A hash object for descriptor set layouts +struct DescriptorSetLayoutHash { + std::size_t operator()(const DescriptorSetLayoutInfo &k) const { + return k.hash(); + } +}; + +/// A class for caching VkDescriptorSetLayouts with the help of std::unordered_map and a hashing function +/// For internal use inside of rendergraph only! +class DescriptorSetLayoutCache { + friend DescriptorBuilder; + friend render_graph::RenderGraph; + +private: + /// The device wrapper + const Device &m_device; + + /// The actual descriptor set layout cache + /// Note that std::unordered_map can accept a third template parameter which is the hash function + /// Also note that the lifetime of the VkDescriptorSetLayout objects is bound to the lifetime of this unordered_map + /// The destructor of the DescriptorSetLayout wrapper instances will be called when DescriptorSetLayoutCache's + /// destructor is called + std::unordered_map m_cache; + +public: + /// Default constructor + /// @param device The device wrapper + DescriptorSetLayoutCache(const Device &device); + DescriptorSetLayoutCache(const DescriptorSetLayoutCache &) = delete; + DescriptorSetLayoutCache(DescriptorSetLayoutCache &&) noexcept; + ~DescriptorSetLayoutCache() = default; + + DescriptorSetLayoutCache &operator=(const DescriptorSetLayoutCache &) = delete; + DescriptorSetLayoutCache &operator=(DescriptorSetLayoutCache &&) = delete; + + /// Create a descriptor set layout with the help of the cache + /// @param descriptor_set_layout_ci The descriptor set layout create info + /// @param name The name of the descriptor set layout + /// @return The descriptor set layout that was created + [[nodiscard]] VkDescriptorSetLayout + create_descriptor_set_layout(VkDescriptorSetLayoutCreateInfo descriptor_set_layout_ci, std::string name); +}; + +} // namespace inexor::vulkan_renderer::wrapper::descriptors diff --git a/include/inexor/vulkan-renderer/wrapper/descriptors/write_descriptor_set_builder.hpp b/include/inexor/vulkan-renderer/wrapper/descriptors/write_descriptor_set_builder.hpp new file mode 100644 index 000000000..9eca39f06 --- /dev/null +++ b/include/inexor/vulkan-renderer/wrapper/descriptors/write_descriptor_set_builder.hpp @@ -0,0 +1,67 @@ +#pragma once + +#include + +#include +#include + +namespace inexor::vulkan_renderer::wrapper { +// Forward declaration +class Device; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::render_graph { +// Forward declaration +class Buffer; +enum class BufferType; +class Texture; +} // namespace inexor::vulkan_renderer::render_graph + +namespace inexor::vulkan_renderer::wrapper::descriptors { + +// Using declarations +using render_graph::Buffer; +using render_graph::BufferType; +using render_graph::Texture; + +/// A wrapper class for batching calls to vkUpdateDescriptorSets +class WriteDescriptorSetBuilder { +private: + const Device &m_device; + std::vector m_write_descriptor_sets; + std::uint32_t m_binding{0}; + + /// Reset the data of the builder so it can be re-used + void reset(); + +public: + /// Default constructor + /// @param device The device wrapper + explicit WriteDescriptorSetBuilder(const Device &device); + + WriteDescriptorSetBuilder(const WriteDescriptorSetBuilder &) = default; + WriteDescriptorSetBuilder(WriteDescriptorSetBuilder &&) noexcept; + ~WriteDescriptorSetBuilder() = default; + + WriteDescriptorSetBuilder &operator=(const WriteDescriptorSetBuilder &) = default; + WriteDescriptorSetBuilder &operator=(WriteDescriptorSetBuilder &&) = delete; + + /// Add a write descriptor set for a uniform buffer + /// @param descriptor_set The destination descriptor set + /// @param uniform_buffer The rendergraph uniform buffer + /// @return A reference to the dereferenced ``this`` pointer + WriteDescriptorSetBuilder &add_uniform_buffer_update(VkDescriptorSet descriptor_set, + std::weak_ptr uniform_buffer); + + /// Add a write descriptor set for a combined image sampler + /// @param descriptor_set The destination descriptor set + /// @param texture_image The rendergraph texture + /// @return A reference to the dereferenced ``this`` pointer + WriteDescriptorSetBuilder &add_combined_image_sampler_update(VkDescriptorSet descriptor_set, + std::weak_ptr texture_image); + /// Return the write descriptor sets and reset the builder + /// @return A std::vector of VkWriteDescriptorSet + [[nodiscard]] std::vector build(); +}; + +} // namespace inexor::vulkan_renderer::wrapper::descriptors diff --git a/include/inexor/vulkan-renderer/wrapper/device.hpp b/include/inexor/vulkan-renderer/wrapper/device.hpp index b58cf75d3..c27fad61c 100644 --- a/include/inexor/vulkan-renderer/wrapper/device.hpp +++ b/include/inexor/vulkan-renderer/wrapper/device.hpp @@ -1,6 +1,7 @@ #pragma once -#include "inexor/vulkan-renderer/wrapper/command_pool.hpp" +#include "inexor/vulkan-renderer/vk_tools/representation.hpp" +#include "inexor/vulkan-renderer/wrapper/commands/command_pool.hpp" #include #include @@ -9,6 +10,48 @@ #include namespace inexor::vulkan_renderer::wrapper { +// Forward declarations +class Swapchain; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::wrapper::commands { +// Forward declarations +class CommandBuffer; +class CommandPool; +} // namespace inexor::vulkan_renderer::wrapper::commands + +namespace inexor::vulkan_renderer::wrapper { + +/// The debug label colors for vkCmdBeginDebugUtilsLabelEXT +enum class DebugLabelColor { + RED, + BLUE, + GREEN, + YELLOW, + PURPLE, + ORANGE, + MAGENTA, + CYAN, + BROWN, + PINK, + LIME, + TURQUOISE, + BEIGE, + MAROON, + OLIVE, + NAVY, + TEAL, +}; + +/// Convert a DebugLabelColor to an array of RGBA float values to pass to vkCmdBeginDebugUtilsLabelEXT +/// @param color The DebugLabelColor +/// @return An array of RGBA float values to be passed into vkCmdBeginDebugUtilsLabelEXT +[[nodiscard]] std::array get_debug_label_color(const DebugLabelColor color); + +// Using declarations +using commands::CommandBuffer; +using commands::CommandPool; +using wrapper::Swapchain; // Forward declaration class Instance; @@ -26,116 +69,129 @@ struct DeviceInfo { }; /// A RAII wrapper class for VkDevice, VkPhysicalDevice and VkQueues -/// @note There is no method ``is_layer_supported`` in this wrapper class because device layers are deprecated. +/// @note There is no method ``is_layer_supported`` in this wrapper class because device layers are deprecated class Device { + friend class CommandBuffer; + friend class CommandPool; + friend class Swapchain; + +private: VkDevice m_device{VK_NULL_HANDLE}; VkPhysicalDevice m_physical_device{VK_NULL_HANDLE}; VmaAllocator m_allocator{VK_NULL_HANDLE}; std::string m_gpu_name; + VkPhysicalDeviceFeatures m_enabled_features{}; + VkPhysicalDeviceProperties m_properties{}; + VkSampleCountFlagBits m_max_available_sample_count{VK_SAMPLE_COUNT_1_BIT}; + VkQueue m_compute_queue{VK_NULL_HANDLE}; VkQueue m_graphics_queue{VK_NULL_HANDLE}; VkQueue m_present_queue{VK_NULL_HANDLE}; VkQueue m_transfer_queue{VK_NULL_HANDLE}; + VkQueue m_sprase_binding_queue{VK_NULL_HANDLE}; std::uint32_t m_present_queue_family_index{0}; std::uint32_t m_graphics_queue_family_index{0}; std::uint32_t m_transfer_queue_family_index{0}; + std::uint32_t m_compute_queue_family_index{0}; + std::uint32_t m_sparse_binding_queue_family{0}; /// According to NVidia, we should aim for one command pool per thread /// https://developer.nvidia.com/blog/vulkan-dos-donts/ mutable std::vector> m_cmd_pools; mutable std::mutex m_mutex; - // The debug marker extension is not part of the core, so function pointers need to be loaded manually - PFN_vkDebugMarkerSetObjectTagEXT m_vk_debug_marker_set_object_tag{nullptr}; - PFN_vkDebugMarkerSetObjectNameEXT m_vk_debug_marker_set_object_name{nullptr}; - PFN_vkCmdDebugMarkerBeginEXT m_vk_cmd_debug_marker_begin{nullptr}; - PFN_vkCmdDebugMarkerEndEXT m_vk_cmd_debug_marker_end{nullptr}; - PFN_vkCmdDebugMarkerInsertEXT m_vk_cmd_debug_marker_insert{nullptr}; - PFN_vkSetDebugUtilsObjectNameEXT m_vk_set_debug_utils_object_name{nullptr}; + /// Set the debug name of a Vulkan object using debug utils extension (VK_EXT_debug_utils) + /// @note We thought about overloading this method several times so the obj_type is set automatically depending on + /// the type of the obj_handle you pass in, but it would make the code larger while being a little harder to + /// understand what's really going on. + /// @param obj_type The Vulkan object type + /// @param obj_handle The Vulkan object handle (must not be nullptr!) + /// @param name the internal debug name of the Vulkan object + void set_debug_utils_object_name(VkObjectType obj_type, std::uint64_t obj_handle, const std::string &name) const; - /// Get the thread_local command pool + /// Get the thread_local compute pool for transfer commands /// @note This method will create a command pool for the thread if it doesn't already exist - CommandPool &thread_graphics_pool() const; + const CommandPool &thread_local_command_pool(VkQueueFlagBits queue_type) const; public: - /// Pick the best physical device automatically - /// @param physical_device_infos The data of the physical devices - /// @param required_features The required device features - /// @param required_extensions The required device extensions - /// @exception std::runtime_error There are no physical devices are available at all - /// @exception std::runtime_error No suitable physical device could be determined - /// @return The chosen physical device which is most suitable - static VkPhysicalDevice pick_best_physical_device(std::vector &&physical_device_infos, - const VkPhysicalDeviceFeatures &required_features, - std::span required_extensions); - - /// Pick the best physical device automatically - /// @param inst The Vulkan instance - /// @param surface The window surface - /// @param required_features The required device features - /// @param required_extensions The required device extensions - /// @return The chosen physical device which is most suitable - static VkPhysicalDevice pick_best_physical_device(const Instance &inst, VkSurfaceKHR surface, - const VkPhysicalDeviceFeatures &required_features, - std::span required_extensions); - /// Default constructor - /// @param inst The Vulkan instance - /// @param surface The window surface - /// @param prefer_distinct_transfer_queue Specifies if a distinct transfer queue will be preferred - /// @param physical_device The physical device - /// @param required_extensions The required device extensions - /// @param required_features The required device features which the physical device must all support - /// @param optional_features The optional device features which do not necessarily have to be present - /// @exception std::runtime_error The physical device is not suitable - /// @exception std::runtime_error No graphics queue could be found - /// @exception std::runtime_error No presentation queue could be found - /// @exception VulkanException vkCreateDevice call failed - /// @exception VulkanException vmaCreateAllocator call failed - /// @note The creation of the physical device will not fail if one of the optional device features is not available - Device(const Instance &inst, VkSurfaceKHR surface, bool prefer_distinct_transfer_queue, - VkPhysicalDevice physical_device, std::span required_extensions, - const VkPhysicalDeviceFeatures &required_features, const VkPhysicalDeviceFeatures &optional_features = {}); + /// @param inst The Vulkan instance wrapper + /// @param surface The surface + /// @param physical_device The physical device to choose + /// @param required_extensions The required extensions + /// @note If any of the required extensions is not available, an exception is thrown! + /// @param required_features The required features + /// @note If any of the required features is not available, an exception is thrown! + /// @param optional_extensions The optional extensions (empty by default) + /// @param optional_features The optional features (``std::nullopt`` by default) + /// @param on_optional_extension_unavailable A callback function to call in case an optional extension is not + /// available. The callback function can return true, in which case constructor continue, or return false, in which + /// case an exception is thrown. + /// @param on_optional_feature_unavailable A callback function to call in case an optional feature is not available. + /// The callback function can return true, in which case constructor continue, or return false, in which case an + /// exception is thrown. + Device(const Instance &inst, + VkSurfaceKHR surface, + VkPhysicalDevice physical_device, + std::span required_extensions, + const VkPhysicalDeviceFeatures &required_features, + std::span optional_extensions = {}, + std::optional optional_features = std::nullopt, + std::optional> on_optional_extension_unavailable = + std::nullopt, + std::optional> on_optional_feature_unavailable = + std::nullopt); Device(const Device &) = delete; + // TODO: Implement me! Device(Device &&) noexcept; - ~Device(); Device &operator=(const Device &) = delete; - Device &operator=(Device &&) = delete; + // TODO: Implement me! + Device &operator=(Device &&) noexcept; + + [[nodiscard]] VmaAllocator allocator() const { + return m_allocator; + } [[nodiscard]] VkDevice device() const { return m_device; } - /// Call vkGetPhysicalDeviceSurfaceCapabilitiesKHR - /// @param surface The window surface - /// @exception VulkanException vkGetPhysicalDeviceSurfaceCapabilitiesKHR call failed - /// @return The surface capabilities - [[nodiscard]] VkSurfaceCapabilitiesKHR get_surface_capabilities(VkSurfaceKHR surface) const; - - /// Check if a format supports a feature for images created with ``VK_IMAGE_TILING_OPTIMAL`` - /// @param format The format - /// @param feature The requested format feature - /// @return ``true`` if the format feature is supported - [[nodiscard]] bool format_supports_feature(VkFormat format, VkFormatFeatureFlagBits feature) const; - - /// Call vkGetPhysicalDeviceSurfaceSupportKHR - /// @param surface The window surface - /// @param queue_family_index The queue family index - /// @exception VulkanException vkGetPhysicalDeviceSurfaceSupportKHR call failed - /// @return ``true`` if presentation is supported - [[nodiscard]] bool is_presentation_supported(VkSurfaceKHR surface, std::uint32_t queue_family_index) const; - /// A wrapper method for beginning, ending and submitting command buffers. This method calls the request method for - /// the given command pool, begins the command buffer, executes the lambda, ends recording the command buffer, - /// submits it and waits for it. + /// the given command pool, begins the command buffer, invokes the recording function, ends recording the command + /// buffer, submits it on the specified queue and waits for it. Using this execute method is the preferred way of + /// using command buffers in the engine. There is no need to request a command buffer manually, which is why this + /// method in CommandPool is not public. + /// + /// @code{.cpp} + /// m_device.execute("Upload Data", VK_QUEUE_TRANSFER_BIT, DebugLabelColor::RED, + /// [](const CommandBuffer &cmd_buf) { /*Do you vkCmd calls in here*/ } + /// /*Both could be a std::vector, an std::array, or std::span of VkSemaphore + /// It's also possible to submit one VkSemaphore as a std::span using {&wait_semaphore, 1}*/ + /// wait_semaphores, signal_semaphores) + /// @endcode + /// /// @param name The internal debug name of the command buffer (must not be empty) - /// @param cmd_lambda The command lambda to execute - void execute(const std::string &name, const std::function &cmd_lambda) const; + /// @param queue_type The queue type to submit the command buffer to + /// @param dbg_label_color The color of the debug label when calling ``begin_debug_label_region`` + /// @note Debug label colors are only visible in graphics debuggers such as RenderDoc + /// @param cmd_buf_recording_func The command buffer recording function to invoke after starting recording + /// @note It's technically allowed that the command buffer recording function is empty or a function which does not + /// do any vkCmd command calls, but this makes no real sense because an empty command buffer will be submitted. It + /// will not be checked if any commands have been recorded into the command buffer, although this could be + /// implemented using CommandBuffer wrapper. However, this would be a case for validation layers though. + /// @param wait_semaphores The semaphores to wait on before starting command buffer execution (empty by default) + /// @param signal_semaphores The semaphores to signal once command buffer execution will finish (empty by default) + void execute(const std::string &name, + VkQueueFlagBits queue_type, + DebugLabelColor dbg_label_color, + const std::function &cmd_buf_recording_func, + std::span wait_semaphores = {}, + std::span signal_semaphores = {}) const; /// Find a queue family index that suits a specific criteria /// @param criteria_lambda The lambda to sort out unsuitable queue families @@ -143,190 +199,64 @@ class Device { std::optional find_queue_family_index_if( const std::function &criteria_lambda); - [[nodiscard]] VkPhysicalDevice physical_device() const { - return m_physical_device; - } - - [[nodiscard]] VmaAllocator allocator() const { - return m_allocator; - } - - /// @note Enabled features = required features + optional features which are supported - [[nodiscard]] const VkPhysicalDeviceFeatures &enabled_device_features() const { - return m_enabled_features; - } - + /// Get the name of the physical device that was created [[nodiscard]] const std::string &gpu_name() const { return m_gpu_name; } - [[nodiscard]] VkQueue graphics_queue() const { - return m_graphics_queue; - } - - [[nodiscard]] VkQueue present_queue() const { - return m_present_queue; - } + /// Call vkGetPhysicalDeviceSurfaceSupportKHR + /// @param surface The window surface + /// @param queue_family_index The queue family index + /// @exception VulkanException vkGetPhysicalDeviceSurfaceSupportKHR call failed + /// @return ``true`` if presentation is supported + [[nodiscard]] bool is_presentation_supported(VkSurfaceKHR surface, std::uint32_t queue_family_index) const; - [[nodiscard]] VkQueue transfer_queue() const { - return m_transfer_queue; + /// Return the maximum sample count that is available + [[nodiscard]] VkSampleCountFlagBits max_available_sample_count() const { + return m_max_available_sample_count; } - [[nodiscard]] std::uint32_t graphics_queue_family_index() const { - return m_graphics_queue_family_index; - } + /// Pick the best physical device automatically + /// @param physical_device_infos The data of the physical devices + /// @param required_features The required device features + /// @param required_extensions The required device extensions + /// @exception std::runtime_error There are no physical devices are available at all + /// @exception std::runtime_error No suitable physical device could be determined + /// @return The chosen physical device which is most suitable + static VkPhysicalDevice pick_best_physical_device(std::vector &&physical_device_infos, + const VkPhysicalDeviceFeatures &required_features, + std::span required_extensions); - [[nodiscard]] std::uint32_t present_queue_family_index() const { - return m_present_queue_family_index; - } + /// Pick the best physical device automatically + /// @param inst The Vulkan instance + /// @param surface The window surface + /// @param required_features The required device features + /// @param required_extensions The required device extensions + /// @return The chosen physical device which is most suitable + static VkPhysicalDevice pick_best_physical_device(const Instance &inst, + VkSurfaceKHR surface, + const VkPhysicalDeviceFeatures &required_features, + std::span required_extensions); - [[nodiscard]] std::uint32_t transfer_queue_family_index() const { - return m_transfer_queue_family_index; + /// Automatically detect the type of a Vulkan object and set the internal debug name to it + /// @tparam VulkanObjectType The Vulkan object type. This template parameter will be automatically translated into + /// the matching ``VkObjectType`` using ``vk_tools::get_vulkan_object_type(vk_object)``. This is the most advanced + /// abstraction that we found and it's really easy to use set_debug_name now because it's not possible to make a + /// mistake because you don't have to specify the VkObjectType manually when naming a Vulkan object. + /// @param vk_object The Vulkan object to assign a name to + /// @param name The internal debug name of the Vulkan object (must not be empty!) + template + void set_debug_name(const VulkanObjectType vk_object, const std::string &name) const { + // The get_vulkan_object_type template allows us to convert the template parameter into a VK_OBJECT_TYPE + // There is no other trivial way in C++ to do this as far as we know + return set_debug_utils_object_name(vk_tools::get_vulkan_object_type(vk_object), + reinterpret_cast(vk_object), name); } - /// Assign an internal Vulkan debug marker name to a Vulkan object. - /// This internal name can be seen in external debuggers like RenderDoc. - /// @note This method is only available in debug mode with ``VK_EXT_debug_marker`` device extension enabled. - /// @param object The Vulkan object - /// @param object_type The Vulkan debug report object type - /// @param name The internal name of the Vulkan object - void set_debug_marker_name(void *object, VkDebugReportObjectTypeEXT object_type, const std::string &name) const; - - /// Assigns a block of memory to a Vulkan resource. - /// This memory block can be seen in external debuggers like RenderDoc. - /// @note This method is only available in debug mode with ``VK_EXT_debug_marker`` device extension enabled. - /// @param object The Vulkan object - /// @param object_type The Vulkan debug report object type - /// @param name The name of the memory block which will be connected to this object - /// @param memory_size The size of the memory block in bytes - /// @param memory_block The memory block to read from - void set_memory_block_attachment(void *object, VkDebugReportObjectTypeEXT object_type, std::uint64_t name, - std::size_t memory_size, const void *memory_block) const; - - /// Annotate a rendering region in Vulkan debug markers. - /// The rendering region will be visible in external debuggers like RenderDoc. - /// @param command_buffer The command buffer - /// @param name The name of the rendering region - /// @param color The rgba color of the rendering region - void bind_debug_region(VkCommandBuffer command_buffer, const std::string &name, std::array color) const; - - /// Insert a debug markers into the current renderpass using vkCmdDebugMarkerInsertEXT. - /// This debug markers can be seen in external debuggers like RenderDoc. - /// @param command_buffer The command buffer which is associated to the debug marker - /// @param name The name of the debug marker - /// @param color An array of red, green, blue and alpha values for the debug region's color - void insert_debug_marker(VkCommandBuffer command_buffer, const std::string &name, std::array color) const; - - /// End the debug region of the current renderpass using vkCmdDebugMarkerEndEXT. - /// @param command_buffer The command buffer which is associated to the debug marker - void end_debug_region(VkCommandBuffer command_buffer) const; - - /// Call vkCreateCommandPool - /// @param command_pool_ci The command pool create info structure - /// @param command_pool The command pool to create - /// @param name The internal debug marker name which will be assigned to this command pool - void create_command_pool(const VkCommandPoolCreateInfo &command_pool_ci, VkCommandPool *command_pool, - const std::string &name) const; - - /// Call vkCreateDescriptorPool - /// @param descriptor_pool_ci The descriptor pool create info structure - /// @param descriptor_pool The descriptor pool to create - /// @param name The internal debug marker name which will be assigned to this command pool - void create_descriptor_pool(const VkDescriptorPoolCreateInfo &descriptor_pool_ci, VkDescriptorPool *descriptor_pool, - const std::string &name) const; - - /// Call vkCreateDescriptorSetLayout - /// @param descriptor_set_layout_ci The descriptor set layout create info structure - /// @param descriptor_set_layout The descriptor set layout to create - /// @param name The internal debug marker name which will be assigned to this descriptor set layout - void create_descriptor_set_layout(const VkDescriptorSetLayoutCreateInfo &descriptor_set_layout_ci, - VkDescriptorSetLayout *descriptor_set_layout, const std::string &name) const; - - /// Call vkCreateFence - /// @param fence_ci The fence create info structure - /// @param fence The fence to create - /// @param name The internal debug marker name which will be assigned to this fence - void create_fence(const VkFenceCreateInfo &fence_ci, VkFence *fence, const std::string &name) const; - - /// Call vkCreateFramebuffer - /// @param framebuffer_ci The framebuffer create info structure - /// @param framebuffer The Vulkan framebuffer to create - /// @param name The internal debug marker name which will be assigned to this framebuffer - void create_framebuffer(const VkFramebufferCreateInfo &framebuffer_ci, VkFramebuffer *framebuffer, - const std::string &name) const; - - /// Call vkCreateGraphicsPipelines - /// @param pipeline_ci The graphics pipeline create info structure - /// @param pipeline The graphics pipeline to create - /// @param name The internal debug marker name which will be assigned to this pipeline - // TODO: Offer parameter for Vulkan pipeline caches! - // TODO: Use std::span to offer a more general method (creating multiple pipelines with one call) - // TODO: We might want to use std::span> - void create_graphics_pipeline(const VkGraphicsPipelineCreateInfo &pipeline_ci, VkPipeline *pipeline, - const std::string &name) const; - - /// Call vkCreateImageView - /// @param image_view_ci The image view create info structure - /// @param image_view The image view to create - /// @param name The internal debug marker name which will be assigned to this image view - void create_image_view(const VkImageViewCreateInfo &image_view_ci, VkImageView *image_view, - const std::string &name) const; - - /// Call vkCreatePipelineLayout - /// @param pipeline_layout_ci The pipeline layout create info structure - /// @param pipeline_layout The pipeline layout to create - /// @param name The internal debug marker name which will be assigned to this pipeline layout - void create_pipeline_layout(const VkPipelineLayoutCreateInfo &pipeline_layout_ci, VkPipelineLayout *pipeline_layout, - const std::string &name) const; - - /// Call vkCreateRenderPass - /// @param render_pass_ci The render pass create info structure - /// @param render_pass The render pass to create - /// @param name The internal debug marker name which will be assigned to this render pass - void create_render_pass(const VkRenderPassCreateInfo &render_pass_ci, VkRenderPass *render_pass, - const std::string &name) const; - - /// Call vkCreateSampler - /// @param sampler_ci The sampler create info structure - /// @param sampler The sampler to create - /// @param name The internal debug marker name which will be assigned to this sampler - void create_sampler(const VkSamplerCreateInfo &sampler_ci, VkSampler *sampler, const std::string &name) const; - - /// Call vkCreateSemaphore - /// @param semaphore_ci The semaphore create info structure - /// @param semaphore The semaphore to create - /// @param name The internal debug marker name which will be assigned to this semaphore - void create_semaphore(const VkSemaphoreCreateInfo &semaphore_ci, VkSemaphore *semaphore, - const std::string &name) const; - - /// Call vkCreateShaderModule - /// @param shader_module_ci The shader module create info structure - /// @param shader_module The shader module to create - /// @param name The internal debug marker name which will be assigned to this shader module - void create_shader_module(const VkShaderModuleCreateInfo &shader_module_ci, VkShaderModule *shader_module, - const std::string &name) const; - - /// Call vkCreateSwapchainKHR - /// @param swapchain_ci The swapchain_ci create info structure - /// @param swapchain The swapchain to create - /// @param name The internal debug marker name which will be assigned to this swapchain - void create_swapchain(const VkSwapchainCreateInfoKHR &swapchain_ci, VkSwapchainKHR *swapchain, - const std::string &name) const; - - /// Request a command buffer from the thread_local command pool - /// @param name The name which will be assigned to the command buffer - /// @return A command buffer from the thread_local command pool - [[nodiscard]] const CommandBuffer &request_command_buffer(const std::string &name); - - /// Check if a surface supports a certain image usage - /// @param surface The window surface - /// @param usage The requested image usage - /// @return ``true`` if the format feature is supported - [[nodiscard]] bool surface_supports_usage(VkSurfaceKHR surface, VkImageUsageFlagBits usage) const; - - /// Call vkDeviceWaitIdle + /// Call vkDeviceWaitIdle or vkQueueWaitIdle if a VkQueue is specified as parameter + /// @param A queue to wait on (``VK_NULL_HANDLE`` by default) /// @exception VulkanException vkDeviceWaitIdle call failed - void wait_idle() const; + void wait_idle(VkQueue queue = VK_NULL_HANDLE) const; }; } // namespace inexor::vulkan_renderer::wrapper diff --git a/include/inexor/vulkan-renderer/wrapper/fence.hpp b/include/inexor/vulkan-renderer/wrapper/fence.hpp deleted file mode 100644 index 6791a16ed..000000000 --- a/include/inexor/vulkan-renderer/wrapper/fence.hpp +++ /dev/null @@ -1,52 +0,0 @@ -#pragma once - -#include - -#include -#include -#include - -namespace inexor::vulkan_renderer::wrapper { - -// Forward declaration -class Device; - -/// @brief A RAII wrapper for VkFences. -class Fence { - const Device &m_device; - std::string m_name; - VkFence m_fence{VK_NULL_HANDLE}; - -public: - /// @brief Default constructor. - /// @param device The const reference to a device RAII wrapper instance. - /// @param name The internal debug marker name of the VkFence. - /// @param in_signaled_state True if the VkFence will be constructed in signaled state, false otherwise. - /// @warning Make sure to specify in_signaled_state correctly as needed, otherwise synchronization problems occur. - Fence(const Device &device, const std::string &name, bool in_signaled_state); - - Fence(const Fence &) = delete; - Fence(Fence &&) noexcept; - - ~Fence(); - - Fence &operator=(const Fence &) = delete; - Fence &operator=(Fence &&) = delete; - - [[nodiscard]] VkFence get() const { - return m_fence; - } - - /// @brief Block fence by calling vkWaitForFences and wait until fence condition is fulfilled. - /// @param timeout_limit The time to wait in milliseconds. If no time is specified, the numeric maximum value - /// is used. - void block(std::uint64_t timeout_limit = std::numeric_limits::max()) const; - - /// @brief Call vkResetFences. - void reset() const; - - /// Call vkGetFenceStatus - [[nodiscard]] VkResult status() const; -}; - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/include/inexor/vulkan-renderer/wrapper/framebuffer.hpp b/include/inexor/vulkan-renderer/wrapper/framebuffer.hpp deleted file mode 100644 index f4c8dd5da..000000000 --- a/include/inexor/vulkan-renderer/wrapper/framebuffer.hpp +++ /dev/null @@ -1,43 +0,0 @@ -#pragma once - -#include - -#include -#include - -namespace inexor::vulkan_renderer::wrapper { - -// Forward declarations -class Device; -class Swapchain; - -/// @brief RAII wrapper class for VkFramebuffer. -class Framebuffer { - const Device &m_device; - VkFramebuffer m_framebuffer{VK_NULL_HANDLE}; - std::string m_name; - -public: - /// @brief Default constructor. - /// @param device The const reference to a device RAII wrapper instance. - /// @param render_pass The renderpass which is associated with the framebuffer. - /// @param attachments The attachments to use. - /// @param swapchain The associated swapchain. - /// @param name The internal debug marker name of the VkFramebuffer. - Framebuffer(const Device &device, VkRenderPass render_pass, const std::vector &attachments, - const Swapchain &swapchain, std::string name); - - Framebuffer(const Framebuffer &) = delete; - Framebuffer(Framebuffer &&) noexcept; - - ~Framebuffer(); - - Framebuffer &operator=(const Framebuffer &) = delete; - Framebuffer &operator=(Framebuffer &&) = delete; - - [[nodiscard]] VkFramebuffer get() const { - return m_framebuffer; - } -}; - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/include/inexor/vulkan-renderer/wrapper/gpu_memory_buffer.hpp b/include/inexor/vulkan-renderer/wrapper/gpu_memory_buffer.hpp deleted file mode 100644 index 0dc4095b5..000000000 --- a/include/inexor/vulkan-renderer/wrapper/gpu_memory_buffer.hpp +++ /dev/null @@ -1,70 +0,0 @@ -#pragma once - -#include - -#include - -namespace inexor::vulkan_renderer::wrapper { - -// Forward declaration -class Device; - -/// @brief RAII wrapper class for GPU Memory buffers. -/// Uniform buffers or vertex/index buffers use this as a base class. -/// @note The core of Inexor's memory management is Vulkan Memory Allocator library (VMA). -class GPUMemoryBuffer { -protected: - std::string m_name; - const Device &m_device; - VkBuffer m_buffer{VK_NULL_HANDLE}; - VkDeviceSize m_buffer_size{0}; - VmaAllocation m_allocation{VK_NULL_HANDLE}; - VmaAllocationInfo m_allocation_info{}; - -public: - /// @brief Construct the GPU memory buffer without specifying the actual data to fill in, only the memory size. - /// @param device The const reference to a device RAII wrapper instance. - /// @param name The internal debug marker name of the GPU memory buffer. - /// @param buffer_size The size of the memory buffer in bytes. - /// @param buffer_usage The buffer usage flags. - /// @param memory_usage The VMA memory usage flags which specify the required memory allocation. - GPUMemoryBuffer(const Device &device, const std::string &name, const VkDeviceSize &size, - const VkBufferUsageFlags &buffer_usage, const VmaMemoryUsage &memory_usage); - - /// @brief Construct the GPU memory buffer and specifies the actual data to fill it in. - /// @param device The const reference to a device RAII wrapper instance. - /// @param name The internal debug marker name of the GPU memory buffer. - /// @param buffer_size The size of the memory buffer in bytes. - /// @param data A pointer to the data to fill the GPU memory buffer with. - /// @param data_size The size of the memory to copy from data pointer. - /// @param buffer_usage The buffer usage flags. - /// @param memory_usage The VMA memory usage flags which specify the required memory allocation. - GPUMemoryBuffer(const Device &device, const std::string &name, const VkDeviceSize &buffer_size, const void *data, - std::size_t data_size, const VkBufferUsageFlags &buffer_usage, const VmaMemoryUsage &memory_usage); - - GPUMemoryBuffer(const GPUMemoryBuffer &) = delete; - GPUMemoryBuffer(GPUMemoryBuffer &&) noexcept; - - virtual ~GPUMemoryBuffer(); - - GPUMemoryBuffer &operator=(const GPUMemoryBuffer &) = delete; - GPUMemoryBuffer &operator=(GPUMemoryBuffer &&) = delete; - - [[nodiscard]] const std::string &name() const { - return m_name; - } - - [[nodiscard]] VkBuffer buffer() const { - return m_buffer; - } - - [[nodiscard]] VmaAllocation allocation() const { - return m_allocation; - } - - [[nodiscard]] VmaAllocationInfo allocation_info() const { - return m_allocation_info; - } -}; - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/include/inexor/vulkan-renderer/wrapper/gpu_texture.hpp b/include/inexor/vulkan-renderer/wrapper/gpu_texture.hpp deleted file mode 100644 index 99b5b09dd..000000000 --- a/include/inexor/vulkan-renderer/wrapper/gpu_texture.hpp +++ /dev/null @@ -1,89 +0,0 @@ -#pragma once - -#include "inexor/vulkan-renderer/wrapper/cpu_texture.hpp" -#include "inexor/vulkan-renderer/wrapper/device.hpp" -#include "inexor/vulkan-renderer/wrapper/gpu_memory_buffer.hpp" -#include "inexor/vulkan-renderer/wrapper/image.hpp" - -#include - -namespace inexor::vulkan_renderer::wrapper { - -// Forward declarations -class Device; -class GPUMemoryBuffer; - -/// @note The code which loads textures from files is wrapped in CpuTexture. -/// @brief RAII wrapper class for textures which are stored in GPU memory. -/// @todo Support 3D textures and cube maps (implement new and separate wrappers though). -class GpuTexture { - std::unique_ptr m_texture_image; - VkSampler m_sampler{VK_NULL_HANDLE}; - - int m_texture_width{0}; - int m_texture_height{0}; - int m_texture_channels{0}; - int m_mip_levels{0}; - - std::string m_name; - const Device &m_device; - const VkFormat m_texture_image_format{VK_FORMAT_R8G8B8A8_UNORM}; - - /// @brief Create the texture. - /// @param texture_data A pointer to the texture data. - /// @param texture_size The size of the texture. - void create_texture(void *texture_data, std::size_t texture_size); - - /// @brief Transform the image layout. - /// @param image The image. - /// @param old_layout The old image layout. - /// @param new_layout The new image layout. - void transition_image_layout(VkImage image, VkImageLayout old_layout, VkImageLayout new_layout); - - /// @brief Create the texture sampler. - void create_texture_sampler(); - -public: - /// @brief Construct a texture from a file. - /// @param device The const reference to a device RAII wrapper instance. - /// @param file_name The name of the texture file. - /// @param name The internal debug marker name of the texture. - GpuTexture(const Device &device, const CpuTexture &cpu_texture); - - /// @brief Construct a texture from a block of memory. - /// @param device The const reference to a device RAII wrapper instance. - /// @param device The const reference to a device RAII wrapper instance. - /// @param texture_data A pointer to the texture data. - /// @param texture_width The width of the texture. - /// @param texture_height The height of the texture. - /// @param texture_size The size of the texture. - /// @param name The internal debug marker name of the texture. - GpuTexture(const Device &device, void *data, std::size_t data_size, int texture_width, int texture_height, - int texture_channels, int mip_levels, std::string name); - - GpuTexture(const GpuTexture &) = delete; - GpuTexture(GpuTexture &&) noexcept; - - ~GpuTexture(); - - GpuTexture &operator=(const GpuTexture &) = delete; - GpuTexture &operator=(GpuTexture &&) = delete; - - [[nodiscard]] const std::string &name() const { - return m_name; - } - - [[nodiscard]] VkImage image() const { - return m_texture_image->get(); - } - - [[nodiscard]] VkImageView image_view() const { - return m_texture_image->image_view(); - } - - [[nodiscard]] VkSampler sampler() const { - return m_sampler; - } -}; - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/include/inexor/vulkan-renderer/wrapper/image.hpp b/include/inexor/vulkan-renderer/wrapper/image.hpp deleted file mode 100644 index e77200c4a..000000000 --- a/include/inexor/vulkan-renderer/wrapper/image.hpp +++ /dev/null @@ -1,55 +0,0 @@ -#pragma once - -#include - -#include - -namespace inexor::vulkan_renderer::wrapper { - -// Forward declaration -class Device; - -/// @brief RAII wrapper class for VkImage. -class Image { - const Device &m_device; - VmaAllocation m_allocation{VK_NULL_HANDLE}; - VmaAllocationInfo m_allocation_info{}; - VkImage m_image{VK_NULL_HANDLE}; - VkFormat m_format{VK_FORMAT_UNDEFINED}; - VkImageView m_image_view{VK_NULL_HANDLE}; - std::string m_name; - -public: - /// @brief Default constructor. - /// @param device The const reference to a device RAII wrapper instance. - /// @param format The color format. - /// @param image_usage The image usage flags. - /// @param aspect_flags The aspect flags. - /// @param sample_count The sample count. - /// @param name The internal debug marker name of the VkImage. - /// @param image_extent The width and height of the image. - Image(const Device &device, VkFormat format, VkImageUsageFlags image_usage, VkImageAspectFlags aspect_flags, - VkSampleCountFlagBits sample_count, const std::string &name, VkExtent2D image_extent); - - Image(const Image &) = delete; - Image(Image &&) noexcept; - - ~Image(); - - Image &operator=(const Image &) = delete; - Image &operator=(Image &&) = delete; - - [[nodiscard]] VkFormat image_format() const { - return m_format; - } - - [[nodiscard]] VkImageView image_view() const { - return m_image_view; - } - - [[nodiscard]] VkImage get() const { - return m_image; - } -}; - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/include/inexor/vulkan-renderer/wrapper/instance.hpp b/include/inexor/vulkan-renderer/wrapper/instance.hpp index ee3e9cc0b..78be17e61 100644 --- a/include/inexor/vulkan-renderer/wrapper/instance.hpp +++ b/include/inexor/vulkan-renderer/wrapper/instance.hpp @@ -8,58 +8,54 @@ namespace inexor::vulkan_renderer::wrapper { -/// @brief RAII wrapper class for VkInstances. +/// RAII wrapper class for VkInstance and VkDebugUtilsMessengerEXT class Instance { private: VkInstance m_instance{VK_NULL_HANDLE}; - static constexpr std::uint32_t REQUIRED_VK_API_VERSION{VK_API_VERSION_1_2}; + VkDebugUtilsMessengerEXT m_debug_callback{VK_NULL_HANDLE}; public: - /// @brief Check if a certain instance layer is available on the system. - /// @param layer_name The name of the instance layer. - /// @return ``true`` if the instance layer is supported. - [[nodiscard]] static bool is_layer_supported(const std::string &layer_name); - - /// @brief Check if a certain instance extension is supported on the system. - /// @param extension_name The name of the instance extension. - /// @return ``true`` if the instance extension is supported. - [[nodiscard]] static bool is_extension_supported(const std::string &extension_name); - - /// @brief Construct the Vulkan instance and specify the requested instance layers and instance extensions. - /// @param application_name The Vulkan application's internal application name - /// @param engine_name The Vulkan application's internal engine name - /// @param application_version The Vulkan application's internal version - /// @param engine_version The Vulkan application's internal engine version - /// @param enable_validation_layers True if validation layers should be enabled - /// @param enable_renderdoc_layer True if renderdoc layer should be enabled - /// @param requested_instance_extensions The instance extensions which are requested - /// @param requested_instance_layers The instance layers which are requested - Instance(const std::string &application_name, const std::string &engine_name, std::uint32_t application_version, - std::uint32_t engine_version, bool enable_validation_layers, bool enable_renderdoc_layer, - const std::vector &requested_instance_extensions, - const std::vector &requested_instance_layers); - - /// @brief Construct the Vulkan instance without the requested instance layers and instance extensions. + /// Construct the Vulkan instance and specify the requested instance layers and instance extensions. /// @param application_name The Vulkan application's internal application name /// @param engine_name The Vulkan application's internal engine name /// @param application_version The Vulkan application's internal version /// @param engine_version The Vulkan application's internal engine version - /// @param enable_validation_layers True if validation layers should be enabled, false otherwise - /// @param enable_renderdoc_layer True if renderdoc layer should be enabled, false otherwise - Instance(const std::string &application_name, const std::string &engine_name, std::uint32_t application_version, - std::uint32_t engine_version, bool enable_validation_layers, bool enable_renderdoc_layer); + /// @param debug_callback The debug utils messenger callback (VK_EXT_debug_utils) + /// @param requested_instance_extensions The instance extensions which are requested (empty by default) + /// @param requested_instance_layers The instance layers which are requested (empty by default) + Instance(const std::string &application_name, + const std::string &engine_name, + std::uint32_t application_version, + std::uint32_t engine_version, + PFN_vkDebugUtilsMessengerCallbackEXT debug_callback, + const std::vector &requested_instance_extensions = {}, + const std::vector &requested_instance_layers = {}); Instance(const Instance &) = delete; Instance(Instance &&) noexcept; + /// Call vkDestroyDebugUtilsMessengerEXT and vkDestroyInstance ~Instance(); Instance &operator=(const Instance &) = delete; Instance &operator=(Instance &&) = default; + // TODO: Remove get methods and use access to private members via friend declarations! [[nodiscard]] VkInstance instance() const { return m_instance; } + + /// Check if a certain instance layer is available on the system + /// @param layer_name The name of the instance layer + /// @return ``true`` if the instance layer is supported + [[nodiscard]] static bool is_layer_supported(const std::string &layer_name); + + /// Check if a certain instance extension is supported on the system + /// @param extension_name The name of the instance extension + /// @return ``true`` if the instance extension is supported + [[nodiscard]] static bool is_extension_supported(const std::string &extension_name); + + static constexpr std::uint32_t REQUIRED_VK_API_VERSION{VK_API_VERSION_1_3}; }; } // namespace inexor::vulkan_renderer::wrapper diff --git a/include/inexor/vulkan-renderer/wrapper/pipelines/pipeline.hpp b/include/inexor/vulkan-renderer/wrapper/pipelines/pipeline.hpp new file mode 100644 index 000000000..00cb36442 --- /dev/null +++ b/include/inexor/vulkan-renderer/wrapper/pipelines/pipeline.hpp @@ -0,0 +1,63 @@ +#pragma once + +#include "inexor/vulkan-renderer/wrapper/pipelines/pipeline_layout.hpp" + +#include + +#include +#include + +namespace inexor::vulkan_renderer::wrapper { +// Forward declarations +class Device; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::wrapper::commands { +// Forward declarations +class CommandBuffer; +} // namespace inexor::vulkan_renderer::wrapper::commands + +namespace inexor::vulkan_renderer::render_graph { +// Forward declaration +class RenderGraph; +} // namespace inexor::vulkan_renderer::render_graph + +namespace inexor::vulkan_renderer::wrapper::pipelines { + +// TODO: Implement compute pipelines + +/// RAII wrapper for graphics pipelines +class GraphicsPipeline { + friend class commands::CommandBuffer; + friend class render_graph::RenderGraph; + +private: + const Device &m_device; + std::unique_ptr m_pipeline_layout{nullptr}; + VkPipeline m_pipeline{VK_NULL_HANDLE}; + std::string m_name; + +public: + /// Default constructor + /// @param device The device wrapper + /// @param descriptor_set_layouts The descriptor set layouts in the pipeline layout + /// @param push_constant_ranges The push constant ranges in the pipeline layout + /// @param pipeline_ci The pipeline create info + /// @param name The internal debug name of the graphics pipeline + GraphicsPipeline(const Device &device, + std::vector descriptor_set_layouts, + std::vector push_constant_ranges, + VkGraphicsPipelineCreateInfo pipeline_ci, + std::string name); + + GraphicsPipeline(const GraphicsPipeline &) = delete; + GraphicsPipeline(GraphicsPipeline &&) noexcept; + + /// Call vkDestroyPipeline + ~GraphicsPipeline(); + + GraphicsPipeline &operator=(const GraphicsPipeline &) = delete; + GraphicsPipeline &operator=(GraphicsPipeline &&) = delete; +}; + +} // namespace inexor::vulkan_renderer::wrapper::pipelines diff --git a/include/inexor/vulkan-renderer/wrapper/pipelines/pipeline_builder.hpp b/include/inexor/vulkan-renderer/wrapper/pipelines/pipeline_builder.hpp new file mode 100644 index 000000000..4d2379637 --- /dev/null +++ b/include/inexor/vulkan-renderer/wrapper/pipelines/pipeline_builder.hpp @@ -0,0 +1,287 @@ +#pragma once + +#include + +#include "inexor/vulkan-renderer/wrapper/make_info.hpp" +#include "inexor/vulkan-renderer/wrapper/pipelines/pipeline.hpp" +#include "inexor/vulkan-renderer/wrapper/shader.hpp" + +#include + +#include +#include +#include +#include +#include + +namespace inexor::vulkan_renderer::wrapper { +// Forward declarations +class Device; +class Shader; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::render_graph { +// Forward declaration +class RenderGraph; +} // namespace inexor::vulkan_renderer::render_graph + +namespace inexor::vulkan_renderer::wrapper::pipelines { + +// Forward declaration +class render_graph::RenderGraph; +class wrapper::Shader; + +// TODO: ComputePipelineBuilder + +/// Builder class for VkPipelineCreateInfo for graphics pipelines which use dynamic rendering +/// @note This builder pattern does not perform any checks which are already covered by validation layers. +/// This means if you forget to specify viewport for example, creation of the graphics pipeline will fail. +/// It is the reponsibility of the programmer to use validation layers to check for problems. +class GraphicsPipelineBuilder { + friend class RenderGraph; + +private: + /// The device wrapper reference + const Device &m_device; + + // We are not using member initializers here: + // Note that all members are initialized in the reset() method + // This method is also called after the graphics pipeline has been created, + // allowing one instance of GraphicsPipelineBuilder to be reused + + // With the builder we can either call add_shader or set_shaders + std::vector m_shader_stages{}; + + std::vector m_vertex_input_binding_descriptions{}; + std::vector m_vertex_input_attribute_descriptions{}; + // With the builder we can fill vertex binding descriptions and vertex attribute descriptions in here + VkPipelineVertexInputStateCreateInfo m_vertex_input_sci{}; + + // With the builder we can set topology in here + VkPipelineInputAssemblyStateCreateInfo m_input_assembly_sci{}; + + // With the builder we can set the patch control point count in here + VkPipelineTessellationStateCreateInfo m_tesselation_sci{}; + + std::vector m_viewports{}; + std::vector m_scissors{}; + // With the builder we can set viewport(s) and scissor(s) in here + VkPipelineViewportStateCreateInfo m_viewport_sci{}; + + // With the builder we can set polygon mode, cull mode, front face, and line width + // TODO: Implement methods to enable depth bias and for setting the depth bias parameters + VkPipelineRasterizationStateCreateInfo m_rasterization_sci{}; + + // With the builder we can't set individial fields of this struct, + // since it's easier to specify an entire VkPipelineDepthStencilStateCreateInfo struct to the builder instead + VkPipelineDepthStencilStateCreateInfo m_depth_stencil_sci{}; + + /// This is used for dynamic rendering + VkFormat m_depth_attachment_format{}; + VkFormat m_stencil_attachment_format{}; + std::vector m_color_attachments{}; + + VkPipelineRenderingCreateInfo m_pipeline_rendering_ci{}; + + // With the builder we can set rasterization samples and min sample shading + // TODO: Expose more multisampling parameters if desired + VkPipelineMultisampleStateCreateInfo m_multisample_sci{}; + + // With the builder we can't set individial fields of this struct, + // since it's easier to specify an entire VkPipelineColorBlendStateCreateInfo struct to the builder instead + VkPipelineColorBlendStateCreateInfo m_color_blend_sci{}; + + std::vector m_dynamic_states{}; + // This will be filled in the build() method + VkPipelineDynamicStateCreateInfo m_dynamic_states_sci{}; + + /// The layout of the graphics pipeline + VkPipelineLayout m_pipeline_layout{VK_NULL_HANDLE}; + + // With the builder we can either call add_color_blend_attachment or set_color_blend_attachments + std::vector m_color_blend_attachment_states{}; + + /// The push constant ranges of the graphics pass + std::vector m_push_constant_ranges{}; + + VkDescriptorSetLayout m_descriptor_set_layout{VK_NULL_HANDLE}; + + /// Reset all data in this class so the builder can be re-used + /// @note This is called by the constructor + void reset(); + + /// Default constructor is private, so only rendergraph can access it + /// @param device The device wrapper + explicit GraphicsPipelineBuilder(const Device &device); + +public: + GraphicsPipelineBuilder(const GraphicsPipelineBuilder &) = delete; + GraphicsPipelineBuilder(GraphicsPipelineBuilder &&other) noexcept; + ~GraphicsPipelineBuilder() = default; + + GraphicsPipelineBuilder &operator=(const GraphicsPipelineBuilder &) = delete; + GraphicsPipelineBuilder &operator=(GraphicsPipelineBuilder &&) = delete; + + /// Adds a color attachment + /// @param format The format of the color attachment + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder &add_color_attachment_format(VkFormat format); + + /// Add a color blend attachment + /// @param attachment The color blend attachment + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder & + add_color_blend_attachment(const VkPipelineColorBlendAttachmentState &attachment); + + /// Add the default color blend attachment + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder &add_default_color_blend_attachment(); + + /// Add a push constant range to the graphics pass + /// @param shader_stage The shader stage for the push constant range + /// @param size The size of the push constant + /// @param offset The offset in the push constant range + /// @return A const reference to the this pointer (allowing method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder & + add_push_constant_range(VkShaderStageFlags shader_stage, std::uint32_t size, std::uint32_t offset = 0); + + /// Add a shader to the graphics pipeline + /// @param shader The shader + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder &add_shader(std::weak_ptr shader); + + /// Build the graphics pipeline with specified pipeline create flags + /// @param name The debug name of the graphics pipeline + /// @return The unique pointer instance of ``GraphicsPipeline`` that was created + [[nodiscard]] std::shared_ptr build(std::string name); + + /// Set the color blend manually + /// @param color_blend The color blend + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder &set_color_blend(const VkPipelineColorBlendStateCreateInfo &color_blend); + + /// Set all color blend attachments manually + /// @note You should prefer to use ``add_color_blend_attachment`` instead + /// @param attachments The color blend attachments + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder & + set_color_blend_attachments(const std::vector &attachments); + + /// Enable or disable culling + /// @warning Disabling culling will have a significant performance impact + /// @param culling_enabled ``true`` if culling is enabled + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder &set_culling_mode(VkBool32 culling_enabled); + + /// Set the deptch attachment format + /// @param format The format of the depth attachment + /// @return A const reference to the this pointer (allowing method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder &set_depth_attachment_format(VkFormat format); + + /// Set the descriptor set layout + /// @param descriptor_set_layout The descriptor set layout + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder &set_descriptor_set_layout(VkDescriptorSetLayout descriptor_set_layout); + + /// Set the depth stencil + /// @warning Disabling culling can have performance impacts! + /// @param depth_stencil The depth stencil + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder & + set_depth_stencil(const VkPipelineDepthStencilStateCreateInfo &depth_stencil); + + /// Set the dynamic states + /// @param dynamic_states The dynamic states + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder &set_dynamic_states(const std::vector &dynamic_states); + + /// Set the stencil attachment format + /// @param format The format of the stencil attachment + /// @return A const reference to the this pointer (allowing method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder &set_stencil_attachment_format(VkFormat format); + + /// Set the input assembly state create info + /// @note If you just want to set the triangle topology, call ``set_triangle_topology`` instead, because this is the + /// most powerful method of this method in case you really need to overwrite it + /// @param input_assembly The pipeline input state create info + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder & + set_input_assembly(const VkPipelineInputAssemblyStateCreateInfo &input_assembly); + + /// Set the line width of rasterization + /// @param line_width The line width used in rasterization + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder &set_line_width(float width); + + /// Set the most important MSAA settings + /// @param sample_count The number of samples used in rasterization + /// @param min_sample_shading A minimum fraction of sample shading + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder &set_multisampling(VkSampleCountFlagBits sample_count, + std::optional min_sample_shading); + + /// Store the pipeline layout + /// @param layout The pipeline layout + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder &set_pipeline_layout(VkPipelineLayout layout); + + /// Set the triangle topology + /// @param topology the primitive topology + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder &set_primitive_topology(VkPrimitiveTopology topology); + + /// Set the rasterization state of the graphics pipeline manually + /// @param rasterization The rasterization state + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder & + set_rasterization(const VkPipelineRasterizationStateCreateInfo &rasterization); + + /// Set the scissor data in VkPipelineViewportStateCreateInfo + /// There is another method called set_scissors in case multiple scissors will be used + /// @param scissors The scissors in in VkPipelineViewportStateCreateInfo + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder &set_scissor(const VkRect2D &scissor); + + /// Set the scissor data in VkPipelineViewportStateCreateInfo (convert VkExtent2D to VkRect2D) + /// @param extent The extent of the scissor + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder &set_scissor(const VkExtent2D &extent); + + /// Set the tesselation control point count + /// @note This is not used in the code so far, because we are not using tesselation + /// @param control_point_count The tesselation control point count + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder &set_tesselation_control_point_count(std::uint32_t control_point_count); + + /// Set the vertex input attribute descriptions manually + /// @note As of C++23, there is no mechanism to do so called reflection in C++, meaning we can't get any information + /// about the members of a struct, which would allow us to determine vertex input attributes automatically. + /// @param descriptions The vertex input attribute descriptions + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder & + set_vertex_input_attributes(const std::vector &descriptions); + + /// Set the vertex input binding descriptions manually + /// @param descriptions The vertex input binding descriptions + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder & + set_vertex_input_bindings(const std::vector &descriptions); + + /// Set the viewport in VkPipelineViewportStateCreateInfo + /// There is another method called set_viewports in case multiple viewports will be used + /// @param viewport The viewport in VkPipelineViewportStateCreateInfo + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder &set_viewport(const VkViewport &viewport); + + /// Set the viewport in VkPipelineViewportStateCreateInfo (convert VkExtent2D to VkViewport) + /// @param extent The extent of the viewport + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder &set_viewport(const VkExtent2D &extent); + + /// Set the wireframe mode + /// @param wireframe ``true`` if wireframe is enabled + /// @return A reference to the dereferenced this pointer (allows method calls to be chained) + [[nodiscard]] GraphicsPipelineBuilder &set_wireframe(VkBool32 wireframe); +}; + +} // namespace inexor::vulkan_renderer::wrapper::pipelines diff --git a/include/inexor/vulkan-renderer/wrapper/pipelines/pipeline_layout.hpp b/include/inexor/vulkan-renderer/wrapper/pipelines/pipeline_layout.hpp new file mode 100644 index 000000000..8192c14f1 --- /dev/null +++ b/include/inexor/vulkan-renderer/wrapper/pipelines/pipeline_layout.hpp @@ -0,0 +1,61 @@ +#pragma once + +#include "inexor/vulkan-renderer/wrapper/device.hpp" + +#include + +namespace inexor::vulkan_renderer::render_graph { +// Forward declaration +class RenderGraph; +} // namespace inexor::vulkan_renderer::render_graph + +namespace inexor::vulkan_renderer::wrapper::commands { +// Forward declaration +class CommandBuffer; +} // namespace inexor::vulkan_renderer::wrapper::commands + +namespace inexor::vulkan_renderer::wrapper::pipelines { + +// Forward declaration +class GraphicsPipeline; + +// Using declaration +using commands::CommandBuffer; +using render_graph::RenderGraph; + +/// RAII wrapper class for VkPipelineLayout +class PipelineLayout { + friend class RenderGraph; + friend class GraphicsPipeline; + friend class CommandBuffer; + +private: + const Device &m_device; + std::string m_name; + + // There is no get method for this because only rendergraph needs to access it through friend class + VkPipelineLayout m_pipeline_layout{VK_NULL_HANDLE}; + +public: + /// Call vkCreatePipelineLayout + /// @note The constructor is private because only friend class RenderGraph needs access to it + /// @param device The device wrapper + /// @param name The name of the pipeline layout + /// @param descriptor_set_layouts The descriptor set layouts of the pipeline layout + /// @param push_constant_ranges The push constant ranges of the pipeline layout + PipelineLayout(const Device &device, + std::string name, + std::span descriptor_set_layouts, + std::span push_constant_ranges); + + PipelineLayout(const PipelineLayout &) = delete; + PipelineLayout(PipelineLayout &&) noexcept; + + /// Call vkDestroyPipelineLayout + ~PipelineLayout(); + + PipelineLayout &operator=(const PipelineLayout &) = delete; + PipelineLayout &operator=(PipelineLayout &&other) noexcept = delete; +}; + +} // namespace inexor::vulkan_renderer::wrapper::pipelines diff --git a/include/inexor/vulkan-renderer/wrapper/sampler.hpp b/include/inexor/vulkan-renderer/wrapper/sampler.hpp new file mode 100644 index 000000000..8b0f5b119 --- /dev/null +++ b/include/inexor/vulkan-renderer/wrapper/sampler.hpp @@ -0,0 +1,68 @@ +#pragma once + +#include "inexor/vulkan-renderer/wrapper/make_info.hpp" + +#include + +#include + +namespace inexor::vulkan_renderer::render_graph { +// Forward declaration +class RenderGraph; +class Texture; +} // namespace inexor::vulkan_renderer::render_graph + +namespace inexor::vulkan_renderer::wrapper { + +// Forward declaration +class Device; + +// Using declaration +using render_graph::RenderGraph; +using render_graph::Texture; + +/// RAII wrapper class for VkSampler +class Sampler { + friend class RenderGraph; + friend class Texture; + +private: + const Device &m_device; + VkSampler m_sampler{VK_NULL_HANDLE}; + std::string m_name; + +public: + /// Default constructor + /// @param device The device wrapper + /// @param name The internal debug name of the sampler + /// @param sampler_ci The sampler create info + Sampler(const Device &device, + std::string name, + const VkSamplerCreateInfo &sampler_ci = make_info({ + // NOTE: These are the default sampler settings + .magFilter = VK_FILTER_LINEAR, + .minFilter = VK_FILTER_LINEAR, + .mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR, + .addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT, + .addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT, + .addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT, + .mipLodBias = 0.0f, + .anisotropyEnable = VK_FALSE, + .maxAnisotropy = 1.0f, + .compareEnable = VK_FALSE, + .compareOp = VK_COMPARE_OP_ALWAYS, + .minLod = 0.0f, + .maxLod = 0.0f, + .borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK, + .unnormalizedCoordinates = VK_FALSE, + })); + + Sampler(const Sampler &) = delete; + Sampler(Sampler &&) noexcept; + ~Sampler(); + + Sampler &operator=(const Sampler &) = delete; + Sampler &operator=(Sampler &&) = delete; +}; + +} // namespace inexor::vulkan_renderer::wrapper diff --git a/include/inexor/vulkan-renderer/wrapper/shader.hpp b/include/inexor/vulkan-renderer/wrapper/shader.hpp index 46b9caf5e..0f5ab8321 100644 --- a/include/inexor/vulkan-renderer/wrapper/shader.hpp +++ b/include/inexor/vulkan-renderer/wrapper/shader.hpp @@ -6,61 +6,56 @@ #include namespace inexor::vulkan_renderer::wrapper { - // Forward declaration class Device; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::wrapper::pipelines { +// Forward declaration +class GraphicsPipelineBuilder; +} // namespace inexor::vulkan_renderer::wrapper::pipelines + +namespace inexor::vulkan_renderer::wrapper { + +// Forward declaration +class RenderGraph; -/// @brief RAII wrapper class for VkShaderModules. +// TODO: Support shader specialization constants + +// We don't want to type that out all the time +using inexor::vulkan_renderer::wrapper::Device; +using inexor::vulkan_renderer::wrapper::pipelines::GraphicsPipelineBuilder; + +/// RAII wrapper class for VkShaderModule class Shader { +private: + friend class RenderGraph; + friend class GraphicsPipelineBuilder; + const Device &m_device; std::string m_name; - std::string m_entry_point; - VkShaderStageFlagBits m_type; + std::string m_file_name; + VkShaderStageFlagBits m_shader_stage; VkShaderModule m_shader_module{VK_NULL_HANDLE}; + // TODO: Use a SPIR-V library like spirv-cross to deduce shader type automatically using shader reflection! + public: - /// @brief Construct a shader module from a block of SPIR-V memory. - /// @param device The const reference to a device RAII wrapper instance. - /// @param type The shader type. - /// @param name The internal debug marker name of the VkShaderModule. - /// @param code The memory block of the SPIR-V shader. - /// @param entry_point The name of the entry point, "main" by default. - Shader(const Device &m_device, VkShaderStageFlagBits type, const std::string &name, const std::vector &code, - const std::string &entry_point = "main"); - - /// @brief Construct a shader module from a SPIR-V file. - /// This constructor loads the file content and just calls the other constructor. - /// @param device The const reference to a device RAII wrapper instance. - /// @param type The shader type. - /// @param name The internal debug marker name of the VkShaderModule. - /// @param file_name The name of the SPIR-V shader file to load. - /// @param entry_point The name of the entry point, "main" by default. - Shader(const Device &m_device, VkShaderStageFlagBits type, const std::string &name, const std::string &file_name, - const std::string &entry_point = "main"); + /// Load the shader file and call vkCreateShaderModule + /// @param device The device wrapper + /// @param name The internal debug name of the shader (not necessarily the file name) + /// @param type The shader type + /// @param file_name The shader file name + Shader(const Device &device, std::string name, VkShaderStageFlagBits type, std::string file_name); Shader(const Shader &) = delete; Shader(Shader &&) noexcept; + /// Call vkDestroyShaderModule ~Shader(); Shader &operator=(const Shader &) = delete; Shader &operator=(Shader &&) = delete; - - [[nodiscard]] const std::string &name() const { - return m_name; - } - - [[nodiscard]] const std::string &entry_point() const { - return m_entry_point; - } - - [[nodiscard]] VkShaderStageFlagBits type() const { - return m_type; - } - - [[nodiscard]] VkShaderModule module() const { - return m_shader_module; - } }; } // namespace inexor::vulkan_renderer::wrapper diff --git a/include/inexor/vulkan-renderer/wrapper/surface.hpp b/include/inexor/vulkan-renderer/wrapper/surface.hpp new file mode 100644 index 000000000..567602129 --- /dev/null +++ b/include/inexor/vulkan-renderer/wrapper/surface.hpp @@ -0,0 +1,35 @@ +#pragma once + +#include +#include + +namespace inexor::vulkan_renderer::wrapper { + +/// RAII wrapper class for VkSurfaceKHR +class Surface { + +private: + const VkInstance m_instance{VK_NULL_HANDLE}; + GLFWwindow *m_window{nullptr}; + VkSurfaceKHR m_surface{VK_NULL_HANDLE}; + +public: + /// Create a GLFW surface + /// @param instance The Vulkan instance + /// @param window The GLFW window to create the surface with + Surface(VkInstance instance, GLFWwindow *window); + Surface(const Surface &) = delete; + // TODO: Implement me! + Surface(Surface &&) noexcept; + ~Surface(); + + Surface &operator=(const Surface &) = delete; + // TODO: Implement me! + Surface &operator=(Surface &&) noexcept; + + [[nodiscard]] VkSurfaceKHR surface() const { + return m_surface; + } +}; + +} // namespace inexor::vulkan_renderer::wrapper diff --git a/include/inexor/vulkan-renderer/wrapper/swapchain.hpp b/include/inexor/vulkan-renderer/wrapper/swapchain.hpp index 55e1a574e..0e6a2f6d0 100644 --- a/include/inexor/vulkan-renderer/wrapper/swapchain.hpp +++ b/include/inexor/vulkan-renderer/wrapper/swapchain.hpp @@ -1,6 +1,6 @@ #pragma once -#include "inexor/vulkan-renderer/wrapper/semaphore.hpp" +#include "inexor/vulkan-renderer/wrapper/synchronization/semaphore.hpp" #include #include @@ -8,24 +8,57 @@ #include #include +namespace inexor::vulkan_renderer::wrapper::synchronization { +// Forward declaration +class Semaphore; +} // namespace inexor::vulkan_renderer::wrapper::synchronization + +namespace inexor::vulkan_renderer::render_graph { +// Forward declaration +class RenderGraph; +class GraphicsPass; +class GraphicsPassBuilder; +} // namespace inexor::vulkan_renderer::render_graph + +namespace inexor::vulkan_renderer::wrapper::commands { +// Forward declaration +class CommandBuffer; +} // namespace inexor::vulkan_renderer::wrapper::commands + namespace inexor::vulkan_renderer::wrapper { -// Forward declarations +// Forward declaration class Device; -class Semaphore; +class Window; + +// Using declarations +using commands::CommandBuffer; +using render_graph::GraphicsPass; +using render_graph::GraphicsPassBuilder; +using render_graph::RenderGraph; /// RAII wrapper class for swapchains class Swapchain { + // + friend class RenderGraph; + friend class GraphicsPassBuilder; + friend class GraphicsPass; + private: Device &m_device; + std::string m_name; VkSwapchainKHR m_swapchain{VK_NULL_HANDLE}; VkSurfaceKHR m_surface{VK_NULL_HANDLE}; std::optional m_surface_format{}; std::vector m_imgs; std::vector m_img_views; VkExtent2D m_extent{}; - std::unique_ptr m_img_available; + std::unique_ptr m_img_available; bool m_vsync_enabled{false}; + std::uint32_t m_img_index; + VkImage m_current_swapchain_img{VK_NULL_HANDLE}; + VkImageView m_current_swapchain_img_view{VK_NULL_HANDLE}; + bool m_prepared_for_rendering{false}; /// Call vkGetSwapchainImagesKHR /// @exception inexor::vulkan_renderer::VulkanException vkGetSwapchainImagesKHR call failed @@ -35,11 +68,12 @@ class Swapchain { public: /// Default constructor /// @param device The device wrapper + /// @param name The name of the swapchain /// @param surface The surface /// @param width The swapchain image width /// @param height The swapchain image height /// @param vsync_enabled ``true`` if vertical synchronization is enabled - Swapchain(Device &device, VkSurfaceKHR surface, std::uint32_t width, std::uint32_t height, bool vsync_enabled); + Swapchain(Device &device, std::string name, const VkSurfaceKHR surface, const Window &wnd, bool vsync_enabled); Swapchain(const Swapchain &) = delete; Swapchain(Swapchain &&) noexcept; @@ -52,9 +86,7 @@ class Swapchain { /// Call vkAcquireNextImageKHR /// @param timeout (``std::numeric_limits::max()`` by default) /// @exception VulkanException vkAcquireNextImageKHR call failed - /// @return The index of the next image - [[nodiscard]] std::uint32_t - acquire_next_image_index(std::uint64_t timeout = std::numeric_limits::max()); + void acquire_next_image_index(std::uint64_t timeout = std::numeric_limits::max()); /// Choose the composite alpha /// @param request_composite_alpha requested compositing flag @@ -72,7 +104,8 @@ class Swapchain { /// @param current_extent The current extent /// @return The chosen swapchain image extent [[nodiscard]] static VkExtent2D choose_image_extent(const VkExtent2D &requested_extent, - const VkExtent2D &min_extent, const VkExtent2D &max_extent, + const VkExtent2D &min_extent, + const VkExtent2D &max_extent, const VkExtent2D ¤t_extent); /// Choose the present mode @@ -84,7 +117,8 @@ class Swapchain { /// @note If none of the ``present_mode_priority_list`` are supported, ``VK_PRESENT_MODE_FIFO_KHR`` will be returned [[nodiscard]] static VkPresentModeKHR choose_present_mode(const std::vector &available_present_modes, - const std::vector &present_mode_priority_list, bool vsync_enabled); + const std::vector &present_mode_priority_list, + bool vsync_enabled); /// Choose a surface format /// @param available_formats The available surface formats @@ -99,10 +133,6 @@ class Swapchain { return m_extent; } - [[nodiscard]] const VkSemaphore *image_available_semaphore() const { - return m_img_available->semaphore(); - } - [[nodiscard]] std::uint32_t image_count() const { return static_cast(m_imgs.size()); } @@ -111,14 +141,17 @@ class Swapchain { return m_surface_format.value().format; } - [[nodiscard]] const std::vector &image_views() const { - return m_img_views; - } + /// Change the image layout with a pipeline barrier to prepare for rendering + /// @param cmd_buf The command buffer used for recording + void change_image_layout_to_prepare_for_rendering(const CommandBuffer &cmd_buf); - /// Call vkQueuePresentKHR - /// @param img_index The image index + /// Change the image layout with a pipeline barrier to prepare to call vkQueuePresentKHR + /// @param cmd_buf The command buffer used for recording + void change_image_layout_to_prepare_for_presenting(const CommandBuffer &cmd_buf); + + /// Call vkQueuePresentKHR with the current image index /// @exception VulkanException vkQueuePresentKHR call failed - void present(std::uint32_t img_index); + void present(); /// Setup the swapchain /// @param width The width of the swapchain images @@ -126,11 +159,7 @@ class Swapchain { /// @param vsync_enabled ``true`` if vertical synchronization is enabled /// @exception VulkanException vkCreateSwapchainKHR call failed /// @exception VulkanException vkGetPhysicalDeviceSurfaceSupportKHR call failed - void setup_swapchain(std::uint32_t width, std::uint32_t height, bool vsync_enabled); - - [[nodiscard]] const VkSwapchainKHR *swapchain() const { - return &m_swapchain; - } + void setup(std::uint32_t width, std::uint32_t height, bool vsync_enabled); }; } // namespace inexor::vulkan_renderer::wrapper diff --git a/include/inexor/vulkan-renderer/wrapper/synchronization/fence.hpp b/include/inexor/vulkan-renderer/wrapper/synchronization/fence.hpp new file mode 100644 index 000000000..85d2e5582 --- /dev/null +++ b/include/inexor/vulkan-renderer/wrapper/synchronization/fence.hpp @@ -0,0 +1,62 @@ +#pragma once + +#include + +#include +#include +#include + +namespace inexor::vulkan_renderer::wrapper { +// Forward declaration +class Device; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::wrapper::commands { +// Forward declaration +class CommandBuffer; +} // namespace inexor::vulkan_renderer::wrapper::commands + +namespace inexor::vulkan_renderer::wrapper::synchronization { + +// Using declaration +using wrapper::Device; +using wrapper::commands::CommandBuffer; + +/// A RAII wrapper for VkFence +class Fence { + friend class CommandBuffer; + +private: + const Device &m_device; + std::string m_name; + VkFence m_fence{VK_NULL_HANDLE}; + +public: + /// Default constructor + /// @param device The device wrapper + /// @param name The internal debug name of the Vulkan object + /// @param in_signaled_state ``true`` if the VkFence will be constructed in signaled state + /// @warning Make sure to specify in_signaled_state correctly as needed to avoid synchronization problems! + Fence(const Device &device, const std::string &name, bool in_signaled_state); + + Fence(const Fence &) = delete; + Fence(Fence &&) noexcept; + ~Fence(); + + Fence &operator=(const Fence &) = delete; + Fence &operator=(Fence &&) = delete; + + /// Call vkCmdWaitForFences + /// @param timeout_limit The time to wait in milliseconds (numeric limit by default) + void wait(std::uint64_t timeout_limit = std::numeric_limits::max()) const; + + /// Call vkResetFences + /// @note This is deliberately called ``reset_fences`` and not ``reset`` because ``reset`` is very easy to confuse + /// this with the reset method a smart pointer itself, which could end up in horrible bugs. + void reset_fence() const; + + /// Call vkGetFenceStatus + [[nodiscard]] VkResult status() const; +}; + +} // namespace inexor::vulkan_renderer::wrapper::synchronization diff --git a/include/inexor/vulkan-renderer/wrapper/semaphore.hpp b/include/inexor/vulkan-renderer/wrapper/synchronization/semaphore.hpp similarity index 62% rename from include/inexor/vulkan-renderer/wrapper/semaphore.hpp rename to include/inexor/vulkan-renderer/wrapper/synchronization/semaphore.hpp index daf0b5434..326faacb2 100644 --- a/include/inexor/vulkan-renderer/wrapper/semaphore.hpp +++ b/include/inexor/vulkan-renderer/wrapper/synchronization/semaphore.hpp @@ -5,20 +5,36 @@ #include namespace inexor::vulkan_renderer::wrapper { - // Forward declaration class Device; +class Swapchain; +} // namespace inexor::vulkan_renderer::wrapper + +namespace inexor::vulkan_renderer::render_graph { +// Forward declaration +class RenderGraph; +} // namespace inexor::vulkan_renderer::render_graph + +namespace inexor::vulkan_renderer::wrapper::synchronization { + +// Using declaration +using render_graph::RenderGraph; +using wrapper::Swapchain; /// RAII wrapper class for VkSemaphore class Semaphore { + friend class RenderGraph; + friend class Swapchain; + +private: const Device &m_device; VkSemaphore m_semaphore{VK_NULL_HANDLE}; std::string m_name; public: /// Default constructor - /// @param device The const reference to a device RAII wrapper instance. - /// @param name The internal debug marker name of the VkSemaphore. + /// @param device The const reference to a device RAII wrapper instance + /// @param name The internal debug marker name of the VkSemaphore Semaphore(const Device &device, const std::string &name); Semaphore(const Semaphore &) = delete; Semaphore(Semaphore &&) noexcept; @@ -26,10 +42,6 @@ class Semaphore { Semaphore &operator=(const Semaphore &) = delete; Semaphore &operator=(Semaphore &&) = delete; - - [[nodiscard]] const VkSemaphore *semaphore() const { - return &m_semaphore; - } }; -} // namespace inexor::vulkan_renderer::wrapper +} // namespace inexor::vulkan_renderer::wrapper::synchronization diff --git a/include/inexor/vulkan-renderer/wrapper/uniform_buffer.hpp b/include/inexor/vulkan-renderer/wrapper/uniform_buffer.hpp deleted file mode 100644 index 71f635ff5..000000000 --- a/include/inexor/vulkan-renderer/wrapper/uniform_buffer.hpp +++ /dev/null @@ -1,34 +0,0 @@ -#pragma once - -#include "inexor/vulkan-renderer/wrapper/gpu_memory_buffer.hpp" - -namespace inexor::vulkan_renderer::wrapper { - -// Forward declaration -class Device; - -/// @brief RAII wrapper class for uniform buffers. -class UniformBuffer : public GPUMemoryBuffer { -public: - /// @brief Default constructor. - /// @param device The const reference to a device RAII wrapper instance. - /// @param name The internal debug marker name of the uniform buffer. - /// @param size The size of the uniform buffer. - /// @todo Add overloaded constructor which directly accepts the uniform buffer data. - UniformBuffer(const Device &device, const std::string &name, const VkDeviceSize &size); - - UniformBuffer(const UniformBuffer &) = delete; - UniformBuffer(UniformBuffer &&) noexcept; - - ~UniformBuffer() override = default; - - UniformBuffer &operator=(const UniformBuffer &) = delete; - UniformBuffer &operator=(UniformBuffer &&) = delete; - - /// @brief Update uniform buffer data. - /// @param data A pointer to the uniform buffer data. - /// @param size The size of the uniform buffer memory to copy. - void update(void *data, std::size_t size); -}; - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/include/inexor/vulkan-renderer/wrapper/window.hpp b/include/inexor/vulkan-renderer/wrapper/window.hpp index 1bec6eba8..f4d951efc 100644 --- a/include/inexor/vulkan-renderer/wrapper/window.hpp +++ b/include/inexor/vulkan-renderer/wrapper/window.hpp @@ -2,12 +2,14 @@ #include +#include + #include #include namespace inexor::vulkan_renderer::wrapper { -/// @brief RAII wrapper class for GLFW windows. +/// RAII wrapper class for GLFW windows and VkSurfaceKHR class Window { public: enum class Mode { WINDOWED, FULLSCREEN, WINDOWED_FULLSCREEN }; @@ -19,14 +21,19 @@ class Window { GLFWwindow *m_window{nullptr}; public: - /// @brief Default constructor. + /// Default constructor /// @param title The title of the window. This will be displayed in the window bar. /// @param width The width of the window. /// @param height The height of the window. /// @param visible True if the window is visible after creation, false otherwise. /// @param resizable True if the window should be resizable, false otherwise. - Window(const std::string &title, std::uint32_t width, std::uint32_t height, bool visible, bool resizable, + Window(const std::string &title, + std::uint32_t width, + std::uint32_t height, + bool visible, + bool resizable, Mode mode); + Window(const Window &) = delete; Window(Window &&) = delete; ~Window(); @@ -34,51 +41,52 @@ class Window { Window &operator=(const Window &) = delete; Window &operator=(Window &&) = delete; - /// @brief In case the window has been minimized, process events until it has been restored. - void wait_for_focus(); + /// Get the framebuffer size + /// @param width [out] The width of the framebuffer + /// @param height [out] The height of the framebuffer + void get_framebuffer_size(int *width, int *height); - /// @brief Change the window title. + /// Change the window title /// @param title The new title of the window. void set_title(const std::string &title); - /// @brief Set the GLFW window user pointer. + /// Set the GLFW window user pointer /// @param user_ptr The window user pointer. // @note Since GLFW is a C-style API, we can't use a class method as callback for window resize. void set_user_ptr(void *user_ptr); - /// @brief Set up the window resize callback. + /// Set up the window resize callback /// @param frame_buffer_resize_callback The window resize callback. void set_resize_callback(GLFWframebuffersizefun frame_buffer_resize_callback); - /// @brief Call glfwSetKeyCallback. + /// Call glfwSetKeyCallback /// @param key_input_callback The keyboard input callback. void set_keyboard_button_callback(GLFWkeyfun keyboard_button_callback); - /// @brief Call glfwSetCursorPosCallback. + /// Call glfwSetCursorPosCallback /// @param cursor_pos_callback They cursor position callback. void set_cursor_position_callback(GLFWcursorposfun cursor_pos_callback); - /// @brief Call glfwSetMouseButtonCallback. + /// Call glfwSetMouseButtonCallback /// @param mouse_button_callback The mouse button callback. void set_mouse_button_callback(GLFWmousebuttonfun mouse_button_callback); - /// @brief Call glfwSetScrollCallback. + /// Call glfwSetScrollCallback /// @param mouse_scroll_callback The mouse scroll callback. void set_mouse_scroll_callback(GLFWscrollfun mouse_scroll_callback); - /// @brief Call glfwShowWindow. + /// Call glfwShowWindow void show(); - /// @brief Call glfwPollEvents. + /// Call glfwPollEvents static void poll(); - /// @brief Check if the window is about to close. + /// Check if the window is about to close. /// @return ``true`` if the window will be closed. bool should_close(); - [[nodiscard]] GLFWwindow *get() const { - return m_window; - } + /// In case the window has been minimized, process events until it has been restored. + void wait_for_focus(); [[nodiscard]] std::uint32_t width() const { return m_width; @@ -91,6 +99,10 @@ class Window { [[nodiscard]] Mode mode() const { return m_mode; } + + [[nodiscard]] GLFWwindow *window() const { + return m_window; + } }; } // namespace inexor::vulkan_renderer::wrapper diff --git a/include/inexor/vulkan-renderer/wrapper/window_surface.hpp b/include/inexor/vulkan-renderer/wrapper/window_surface.hpp deleted file mode 100644 index c724058ff..000000000 --- a/include/inexor/vulkan-renderer/wrapper/window_surface.hpp +++ /dev/null @@ -1,32 +0,0 @@ -#pragma once - -#include -#include - -namespace inexor::vulkan_renderer::wrapper { - -/// @brief RAII wrapper class for VkSurfaceKHR. -class WindowSurface { - VkInstance m_instance{VK_NULL_HANDLE}; - VkSurfaceKHR m_surface{VK_NULL_HANDLE}; - -public: - /// @brief Default constructor. - /// @param instance The Vulkan instance which will be associated with this surface. - /// @param window The window which will be associated with this surface. - WindowSurface(VkInstance instance, GLFWwindow *window); - - WindowSurface(const WindowSurface &) = delete; - WindowSurface(WindowSurface &&) noexcept; - - ~WindowSurface(); - - WindowSurface &operator=(const WindowSurface &) = delete; - WindowSurface &operator=(WindowSurface &&) = default; - - [[nodiscard]] VkSurfaceKHR get() const { - return m_surface; - } -}; - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index c9b853113..cc4ed0ff5 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -4,43 +4,56 @@ set(INEXOR_SOURCE_FILES vulkan-renderer/camera.cpp vulkan-renderer/exception.cpp vulkan-renderer/fps_counter.cpp - vulkan-renderer/imgui.cpp - vulkan-renderer/render_graph.cpp - vulkan-renderer/renderer.cpp vulkan-renderer/time_step.cpp vulkan-renderer/input/keyboard_mouse_data.cpp vulkan-renderer/io/byte_stream.cpp + vulkan-renderer/io/io_exception.cpp vulkan-renderer/io/nxoc_parser.cpp + vulkan-renderer/io/octree_parser.cpp + + vulkan-renderer/renderers/imgui.cpp + + vulkan-renderer/render-graph/buffer.cpp + vulkan-renderer/render-graph/graphics_pass.cpp + vulkan-renderer/render-graph/graphics_pass_builder.cpp + vulkan-renderer/render-graph/image.cpp + vulkan-renderer/render-graph/render_graph.cpp + vulkan-renderer/render-graph/texture.cpp vulkan-renderer/tools/cla_parser.cpp - vulkan-renderer/tools/file.cpp vulkan-renderer/vk_tools/device_info.cpp vulkan-renderer/vk_tools/enumerate.cpp - vulkan-renderer/vk_tools/gpu_info.cpp vulkan-renderer/vk_tools/representation.cpp - vulkan-renderer/wrapper/command_buffer.cpp - vulkan-renderer/wrapper/command_pool.cpp - vulkan-renderer/wrapper/cpu_texture.cpp - vulkan-renderer/wrapper/descriptor.cpp - vulkan-renderer/wrapper/descriptor_builder.cpp vulkan-renderer/wrapper/device.cpp - vulkan-renderer/wrapper/fence.cpp - vulkan-renderer/wrapper/framebuffer.cpp - vulkan-renderer/wrapper/gpu_memory_buffer.cpp - vulkan-renderer/wrapper/gpu_texture.cpp - vulkan-renderer/wrapper/image.cpp vulkan-renderer/wrapper/instance.cpp vulkan-renderer/wrapper/make_info.cpp - vulkan-renderer/wrapper/semaphore.cpp + vulkan-renderer/wrapper/sampler.cpp vulkan-renderer/wrapper/shader.cpp vulkan-renderer/wrapper/swapchain.cpp - vulkan-renderer/wrapper/uniform_buffer.cpp + vulkan-renderer/wrapper/surface.cpp vulkan-renderer/wrapper/window.cpp - vulkan-renderer/wrapper/window_surface.cpp + + vulkan-renderer/wrapper/commands/command_buffer.cpp + vulkan-renderer/wrapper/commands/command_pool.cpp + + vulkan-renderer/wrapper/descriptors/descriptor_pool.cpp + vulkan-renderer/wrapper/descriptors/descriptor_pool_allocator.cpp + vulkan-renderer/wrapper/descriptors/descriptor_set_allocator.cpp + vulkan-renderer/wrapper/descriptors/descriptor_set_layout.cpp + vulkan-renderer/wrapper/descriptors/descriptor_set_layout_builder.cpp + vulkan-renderer/wrapper/descriptors/descriptor_set_layout_cache.cpp + vulkan-renderer/wrapper/descriptors/write_descriptor_set_builder.cpp + + vulkan-renderer/wrapper/pipelines/pipeline.cpp + vulkan-renderer/wrapper/pipelines/pipeline_builder.cpp + vulkan-renderer/wrapper/pipelines/pipeline_layout.cpp + + vulkan-renderer/wrapper/synchronization/fence.cpp + vulkan-renderer/wrapper/synchronization/semaphore.cpp vulkan-renderer/world/collision.cpp vulkan-renderer/world/collision_query.cpp diff --git a/src/vulkan-renderer/application.cpp b/src/vulkan-renderer/application.cpp index 26e2dbfd9..7414b080c 100644 --- a/src/vulkan-renderer/application.cpp +++ b/src/vulkan-renderer/application.cpp @@ -3,15 +3,20 @@ #include "inexor/vulkan-renderer/exception.hpp" #include "inexor/vulkan-renderer/meta.hpp" #include "inexor/vulkan-renderer/octree_gpu_vertex.hpp" +#include "inexor/vulkan-renderer/render-graph/graphics_pass_builder.hpp" #include "inexor/vulkan-renderer/standard_ubo.hpp" #include "inexor/vulkan-renderer/tools/cla_parser.hpp" #include "inexor/vulkan-renderer/vk_tools/enumerate.hpp" #include "inexor/vulkan-renderer/world/collision.hpp" -#include "inexor/vulkan-renderer/wrapper/cpu_texture.hpp" -#include "inexor/vulkan-renderer/wrapper/descriptor_builder.hpp" +#include "inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_layout_builder.hpp" #include "inexor/vulkan-renderer/wrapper/instance.hpp" +#include "inexor/vulkan-renderer/wrapper/pipelines/pipeline_layout.hpp" #include +#include +#include +#include +#include #include #include @@ -19,20 +24,145 @@ namespace inexor::vulkan_renderer { -void Application::key_callback(GLFWwindow * /*window*/, int key, int, int action, int /*mods*/) { - if (key < 0 || key > GLFW_KEY_LAST) { - return; +VKAPI_ATTR VkBool32 VKAPI_CALL debug_messenger_callback(const VkDebugUtilsMessageSeverityFlagBitsEXT severity, + const VkDebugUtilsMessageTypeFlagsEXT type, + const VkDebugUtilsMessengerCallbackDataEXT *data, + void *user_data) { + // Validation layers get their own logger + std::shared_ptr m_validation_log{spdlog::default_logger()->clone("validation")}; + + if (severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT) { + m_validation_log->trace("{}", data->pMessage); + } else if (severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT) { + m_validation_log->info("{}", data->pMessage); + } else if (severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) { + m_validation_log->warn("{}", data->pMessage); + } else if (severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) { + m_validation_log->critical("{}", data->pMessage); + } + // TODO: Implement stop on validation error + return false; +} + +Application::Application(int argc, char **argv) { + initialize_spdlog(); + + spdlog::trace("Initialising vulkan-renderer"); + + tools::CommandLineArgumentParser cla_parser; + cla_parser.parse_args(argc, argv); + + spdlog::trace("Application version: {}.{}.{}", APP_VERSION[0], APP_VERSION[1], APP_VERSION[2]); + spdlog::trace("Engine version: {}.{}.{}", ENGINE_VERSION[0], ENGINE_VERSION[1], ENGINE_VERSION[2]); + + load_toml_configuration_file("configuration/renderer.toml"); + + spdlog::trace("Creating Vulkan instance"); + + m_window = std::make_unique(m_wnd_title, m_wnd_width, m_wnd_height, true, true, m_wnd_mode); + + // Management of VkDebugUtilsMessengerCallbackDataEXT is part of the instance wrapper class + m_instance = std::make_unique( + APP_NAME, ENGINE_NAME, VK_MAKE_API_VERSION(0, APP_VERSION[0], APP_VERSION[1], APP_VERSION[2]), + VK_MAKE_API_VERSION(0, ENGINE_VERSION[0], ENGINE_VERSION[1], ENGINE_VERSION[2]), debug_messenger_callback); + + m_surface = std::make_shared(m_instance->instance(), m_window->window()); + + m_input_data = std::make_unique(); + + setup_window_and_input_callbacks(); + + spdlog::trace("Creating window surface"); + + // The user can specify with "--gpu " which graphics card to prefer (index starts from 0) + auto preferred_graphics_card = cla_parser.arg("--gpu"); + if (preferred_graphics_card) { + spdlog::trace("Preferential gpu index {} specified", *preferred_graphics_card); + } else { + spdlog::trace("No user preferred gpu index specified"); } - switch (action) { - case GLFW_PRESS: - m_input_data->press_key(key); - break; - case GLFW_RELEASE: - m_input_data->release_key(key); - break; - default: - break; + // If the user specified command line argument "--vsync", the presentation engine waits + // for the next vertical blanking period to update the current image. + const auto enable_vertical_synchronisation = cla_parser.arg("--vsync"); + if (enable_vertical_synchronisation.value_or(false)) { + spdlog::trace("V-sync enabled!"); + m_vsync_enabled = true; + } else { + spdlog::trace("V-sync disabled!"); + } + + const auto physical_devices = vk_tools::get_physical_devices(m_instance->instance()); + if (preferred_graphics_card && *preferred_graphics_card >= physical_devices.size()) { + spdlog::critical("GPU index {} out of range!", *preferred_graphics_card); + + // TODO: Do not throw an exception + throw std::runtime_error("Invalid GPU index"); + } + + const VkPhysicalDeviceFeatures required_features{ + // Add required features here + }; + + std::vector required_extensions{ + VK_KHR_SWAPCHAIN_EXTENSION_NAME, // VK_KHR_swapchain + }; + + const VkPhysicalDeviceFeatures optional_features{ + .sampleRateShading = VK_TRUE, + .samplerAnisotropy = VK_TRUE, + }; + + std::vector optional_extensions{ + // Add desired optional extensions here! + }; + + const VkPhysicalDevice physical_device = + preferred_graphics_card ? physical_devices[*preferred_graphics_card] + : Device::pick_best_physical_device(*m_instance, m_surface->surface(), + required_features, required_extensions); + + // TODO: Implement on_extension_unavailable and on_feature_unavailable callback + m_device = std::make_unique(*m_instance, m_surface->surface(), physical_device, required_extensions, + required_features, optional_extensions, optional_features); + + m_swapchain = + std::make_unique(*m_device, "Default Swapchain", m_surface->surface(), *m_window, m_vsync_enabled); + + load_octree_geometry(true); + generate_octree_indices(); + + m_camera = std::make_unique(glm::vec3(6.0f, 10.0f, 2.0f), 180.0f, 0.0f, + static_cast(m_window->width()), static_cast(m_window->height())); + + m_camera->set_movement_speed(5.0f); + m_camera->set_rotation_speed(0.5f); + m_window->show(); + recreate_swapchain(); +} + +Application::~Application() { + spdlog::trace("Shutting down vulkan renderer"); +} + +void Application::check_octree_collisions() { + // Check for collision between camera ray and every octree + for (const auto &world : m_worlds) { + const auto collision = ray_cube_collision_check(*world, m_camera->position(), m_camera->front()); + + if (collision) { + const auto intersection = collision.value().intersection(); + const auto face_normal = collision.value().face(); + const auto corner = collision.value().corner(); + const auto edge = collision.value().edge(); + + spdlog::trace("pos {} {} {} | face {} {} {} | corner {} {} {} | edge {} {} {}", intersection.x, + intersection.y, intersection.z, face_normal.x, face_normal.y, face_normal.z, corner.x, + corner.y, corner.z, edge.x, edge.y, edge.z); + + // Break after one collision. + break; + } } } @@ -40,27 +170,59 @@ void Application::cursor_position_callback(GLFWwindow * /*window*/, double x_pos m_input_data->set_cursor_pos(x_pos, y_pos); } -void Application::mouse_button_callback(GLFWwindow * /*window*/, int button, int action, int /*mods*/) { - if (button < 0 || button > GLFW_MOUSE_BUTTON_LAST) { +void Application::generate_octree_indices() { + auto old_vertices = std::move(m_octree_vertices); + m_octree_indices.clear(); + m_octree_vertices.clear(); + std::unordered_map vertex_map; + for (auto &vertex : old_vertices) { + // TODO: Use std::unordered_map::contains() when we switch to C++ 20. + if (vertex_map.count(vertex) == 0) { + assert(vertex_map.size() < std::numeric_limits::max() && "Octree too big!"); + vertex_map.emplace(vertex, static_cast(vertex_map.size())); + m_octree_vertices.push_back(vertex); + } + m_octree_indices.push_back(vertex_map.at(vertex)); + } + spdlog::trace("Reduced octree by {} vertices (from {} to {})", old_vertices.size() - m_octree_vertices.size(), + old_vertices.size(), m_octree_vertices.size()); + spdlog::trace("Total indices {} ", m_octree_indices.size()); +} + +void Application::initialize_spdlog() { + spdlog::init_thread_pool(8192, 2); + + auto console_sink = std::make_shared(); + auto file_sink = std::make_shared("vulkan-renderer.log", true); + auto vulkan_renderer_log = + std::make_shared("vulkan-renderer", spdlog::sinks_init_list{console_sink, file_sink}, + spdlog::thread_pool(), spdlog::async_overflow_policy::block); + vulkan_renderer_log->set_level(spdlog::level::trace); + vulkan_renderer_log->set_pattern("%Y-%m-%d %T.%f %^%l%$ %5t [%-10n] %v"); + vulkan_renderer_log->flush_on(spdlog::level::trace); + + spdlog::set_default_logger(vulkan_renderer_log); + + spdlog::trace("Inexor vulkan-renderer, BUILD " + std::string(__DATE__) + ", " + __TIME__); +} + +void Application::key_callback(GLFWwindow * /*window*/, int key, int, int action, int /*mods*/) { + if (key < 0 || key > GLFW_KEY_LAST) { return; } switch (action) { case GLFW_PRESS: - m_input_data->press_mouse_button(button); + m_input_data->press_key(key); break; case GLFW_RELEASE: - m_input_data->release_mouse_button(button); + m_input_data->release_key(key); break; default: break; } } -void Application::mouse_scroll_callback(GLFWwindow * /*window*/, double /*x_offset*/, double y_offset) { - m_camera->change_zoom(static_cast(y_offset)); -} - void Application::load_toml_configuration_file(const std::string &file_name) { spdlog::trace("Loading TOML configuration file: {}", file_name); @@ -83,28 +245,20 @@ void Application::load_toml_configuration_file(const std::string &file_name) { using WindowMode = ::inexor::vulkan_renderer::wrapper::Window::Mode; const auto &wmodestr = toml::find(renderer_configuration, "application", "window", "mode"); if (wmodestr == "windowed") { - m_window_mode = WindowMode::WINDOWED; + m_wnd_mode = WindowMode::WINDOWED; } else if (wmodestr == "windowed_fullscreen") { - m_window_mode = WindowMode::WINDOWED_FULLSCREEN; + m_wnd_mode = WindowMode::WINDOWED_FULLSCREEN; } else if (wmodestr == "fullscreen") { - m_window_mode = WindowMode::FULLSCREEN; + m_wnd_mode = WindowMode::FULLSCREEN; } else { spdlog::warn("Invalid application window mode: {}", wmodestr); - m_window_mode = WindowMode::WINDOWED; + m_wnd_mode = WindowMode::WINDOWED; } - m_window_width = toml::find(renderer_configuration, "application", "window", "width"); - m_window_height = toml::find(renderer_configuration, "application", "window", "height"); - m_window_title = toml::find(renderer_configuration, "application", "window", "name"); - spdlog::trace("Window: {}, {} x {}", m_window_title, m_window_width, m_window_height); - - m_texture_files = toml::find>(renderer_configuration, "textures", "files"); - - spdlog::trace("Textures:"); - - for (const auto &texture_file : m_texture_files) { - spdlog::trace(" - {}", texture_file); - } + m_wnd_width = toml::find(renderer_configuration, "application", "window", "width"); + m_wnd_height = toml::find(renderer_configuration, "application", "window", "height"); + m_wnd_title = toml::find(renderer_configuration, "application", "window", "name"); + spdlog::trace("Window: {}, {} x {}", m_wnd_title, m_wnd_width, m_wnd_height); m_gltf_model_files = toml::find>(renderer_configuration, "glTFmodels", "files"); @@ -113,108 +267,269 @@ void Application::load_toml_configuration_file(const std::string &file_name) { for (const auto &gltf_model_file : m_gltf_model_files) { spdlog::trace(" - {}", gltf_model_file); } +} - m_vertex_shader_files = toml::find>(renderer_configuration, "shaders", "vertex", "files"); +void Application::load_octree_geometry(bool initialize) { + spdlog::trace("Creating octree geometry"); - spdlog::trace("Vertex shaders:"); + // 4: 23 012 | 5: 184352 | 6: 1474162 | 7: 11792978 cubes, DO NOT USE 7! + m_worlds.clear(); + m_worlds.push_back( + world::create_random_world(2, {0.0f, 0.0f, 0.0f}, initialize ? std::optional(42) : std::nullopt)); + m_worlds.push_back( + world::create_random_world(2, {10.0f, 0.0f, 0.0f}, initialize ? std::optional(60) : std::nullopt)); - for (const auto &vertex_shader_file : m_vertex_shader_files) { - spdlog::trace(" - {}", vertex_shader_file); + m_octree_vertices.clear(); + for (const auto &world : m_worlds) { + for (const auto &polygons : world->polygons(true)) { + for (const auto &triangle : *polygons) { + for (const auto &vertex : triangle) { + glm::vec3 color = { + static_cast(rand()) / static_cast(RAND_MAX), + static_cast(rand()) / static_cast(RAND_MAX), + static_cast(rand()) / static_cast(RAND_MAX), + }; + m_octree_vertices.emplace_back(vertex, color); + } + } + } } +} - m_fragment_shader_files = - toml::find>(renderer_configuration, "shaders", "fragment", "files"); - - spdlog::trace("Fragment shaders:"); - - for (const auto &fragment_shader_file : m_fragment_shader_files) { - spdlog::trace(" - {}", fragment_shader_file); +void Application::mouse_button_callback(GLFWwindow * /*window*/, int button, int action, int /*mods*/) { + if (button < 0 || button > GLFW_MOUSE_BUTTON_LAST) { + return; + } + switch (action) { + case GLFW_PRESS: + m_input_data->press_mouse_button(button); + break; + case GLFW_RELEASE: + m_input_data->release_mouse_button(button); + break; + default: + break; } - - // TODO: Load more info from TOML file. } -void Application::load_textures() { - assert(m_device->device()); - assert(m_device->physical_device()); - assert(m_device->allocator()); - - // Insert the new texture into the list of textures. - std::string texture_name = "unnamed texture"; +void Application::mouse_scroll_callback(GLFWwindow * /*window*/, double /*x_offset*/, double y_offset) { + m_camera->change_zoom(static_cast(y_offset)); +} - spdlog::trace("Loading texture files:"); +void Application::process_keyboard_input() {} - for (const auto &texture_file : m_texture_files) { - spdlog::trace(" - {}", texture_file); +void Application::process_mouse_input() { + const auto cursor_pos_delta = m_input_data->calculate_cursor_position_delta(); - wrapper::CpuTexture cpu_texture(texture_file, texture_name); - m_textures.emplace_back(*m_device, cpu_texture); + if (m_camera->type() == CameraType::LOOK_AT && m_input_data->is_mouse_button_pressed(GLFW_MOUSE_BUTTON_LEFT)) { + m_camera->rotate(static_cast(cursor_pos_delta[0]), -static_cast(cursor_pos_delta[1])); } + + m_camera->set_movement_state(CameraMovement::FORWARD, m_input_data->is_key_pressed(GLFW_KEY_W)); + m_camera->set_movement_state(CameraMovement::LEFT, m_input_data->is_key_pressed(GLFW_KEY_A)); + m_camera->set_movement_state(CameraMovement::BACKWARD, m_input_data->is_key_pressed(GLFW_KEY_S)); + m_camera->set_movement_state(CameraMovement::RIGHT, m_input_data->is_key_pressed(GLFW_KEY_D)); } -void Application::load_shaders() { - assert(m_device->device()); +void Application::recreate_swapchain() { + m_window->wait_for_focus(); + m_device->wait_idle(); - spdlog::trace("Loading vertex shaders:"); + // Query the framebuffer size here again although the window width is set during framebuffer resize callback + // The reason for this is that the framebuffer size could already be different again because we missed a poll + // This seems to be an issue on Linux only though + int wnd_width = 0; + int wnd_height = 0; + m_window->get_framebuffer_size(&wnd_width, &wnd_height); - if (m_vertex_shader_files.empty()) { - spdlog::error("No vertex shaders to load!"); - } + m_swapchain->setup(wnd_width, wnd_height, m_vsync_enabled); + m_render_graph = std::make_unique(*m_device); + setup_render_graph(); - // Loop through the list of vertex shaders and initialise all of them. - for (const auto &vertex_shader_file : m_vertex_shader_files) { - spdlog::trace(" - {}", vertex_shader_file); + m_camera->set_aspect_ratio(wnd_width, wnd_height); +} - // Insert the new shader into the list of shaders. - m_shaders.emplace_back(*m_device, VK_SHADER_STAGE_VERTEX_BIT, "unnamed vertex shader", vertex_shader_file); +void Application::render_frame() { + if (m_wnd_resized) { + m_wnd_resized = false; + recreate_swapchain(); + return; } - spdlog::trace("Loading fragment shaders:"); + m_swapchain->acquire_next_image_index(); - if (m_fragment_shader_files.empty()) { - spdlog::error("No fragment shaders to load!"); - } + m_render_graph->render(); - // Loop through the list of fragment shaders and initialise all of them. - for (const auto &fragment_shader_file : m_fragment_shader_files) { - spdlog::trace(" - {}", fragment_shader_file); + m_swapchain->present(); - // Insert the new shader into the list of shaders. - m_shaders.emplace_back(*m_device, VK_SHADER_STAGE_FRAGMENT_BIT, "unnamed fragment shader", - fragment_shader_file); + if (auto fps_value = m_fps_counter.update()) { + m_window->set_title("Inexor Vulkan API renderer demo - " + std::to_string(*fps_value) + " FPS"); + spdlog::trace("FPS: {}, window size: {} x {}", *fps_value, m_window->width(), m_window->height()); } +} + +void Application::run() { + spdlog::trace("Running Application"); - spdlog::trace("Loading shaders finished"); + while (!m_window->should_close()) { + m_window->poll(); + process_keyboard_input(); + process_mouse_input(); + m_camera->update(m_time_passed); + m_time_passed = m_stopwatch.time_step(); + check_octree_collisions(); + render_frame(); + } } -void Application::load_octree_geometry(bool initialize) { - spdlog::trace("Creating octree geometry"); +void Application::setup_render_graph() { + const auto swapchain_extent = m_swapchain->extent(); - // 4: 23 012 | 5: 184352 | 6: 1474162 | 7: 11792978 cubes, DO NOT USE 7! - m_worlds.clear(); - m_worlds.push_back( - world::create_random_world(2, {0.0f, 0.0f, 0.0f}, initialize ? std::optional(42) : std::nullopt)); - m_worlds.push_back( - world::create_random_world(2, {10.0f, 0.0f, 0.0f}, initialize ? std::optional(60) : std::nullopt)); + m_color_attachment = m_render_graph->add_texture( + "Color", render_graph::TextureUsage::COLOR_ATTACHMENT, m_swapchain->image_format(), swapchain_extent.width, + swapchain_extent.height, 4 /*, m_device->get_max_usable_sample_count() */); - m_octree_vertices.clear(); - for (const auto &world : m_worlds) { - for (const auto &polygons : world->polygons(true)) { - for (const auto &triangle : *polygons) { - for (const auto &vertex : triangle) { - glm::vec3 color = { - static_cast(rand()) / static_cast(RAND_MAX), - static_cast(rand()) / static_cast(RAND_MAX), - static_cast(rand()) / static_cast(RAND_MAX), - }; - m_octree_vertices.emplace_back(vertex, color); - } - } + m_depth_attachment = m_render_graph->add_texture( + "Depth", render_graph::TextureUsage::DEPTH_ATTACHMENT, VK_FORMAT_D32_SFLOAT_S8_UINT, swapchain_extent.width, + swapchain_extent.height, 4 /*, m_device->get_max_usable_sample_count()*/); + + m_vertex_buffer = m_render_graph->add_buffer("Octree|Vertex", render_graph::BufferType::VERTEX_BUFFER, [&]() { + // If the key N was pressed once, generate a new octree + if (m_input_data->was_key_pressed_once(GLFW_KEY_N)) { + load_octree_geometry(false); + generate_octree_indices(); } - } + // Request update of the octree vertex buffer + m_vertex_buffer.lock()->request_update(m_octree_vertices); + }); + + m_octree_vert = std::make_shared(*m_device, "Octree|Vert", VK_SHADER_STAGE_VERTEX_BIT, + "shaders/main.vert.spv"); + m_octree_frag = std::make_shared(*m_device, "Octree|Frag", VK_SHADER_STAGE_FRAGMENT_BIT, + "shaders/main.frag.spv"); + + // Note that the index buffer is updated together with the vertex buffer to keep data consistent + // This means for m_index_buffer, on_init and on_update are defaulted to std::nullopt here! + m_index_buffer = m_render_graph->add_buffer("Octree|Index", render_graph::BufferType::INDEX_BUFFER, [&]() { + // Request update of the octree index buffer + m_index_buffer.lock()->request_update(m_octree_indices); + }); + + // TODO: This must be in the init() method of some OctreeRenderer class in the future! + /// Initialize octree vertices and indices here + load_octree_geometry(false); + generate_octree_indices(); + m_vertex_buffer.lock()->request_update(m_octree_vertices); + m_index_buffer.lock()->request_update(m_octree_indices); + + m_uniform_buffer = m_render_graph->add_buffer("Octree|Uniform", render_graph::BufferType::UNIFORM_BUFFER, [&]() { + m_mvp_matrices.view = m_camera->view_matrix(); + m_mvp_matrices.proj = m_camera->perspective_matrix(); + m_mvp_matrices.proj[1][1] *= -1; + m_uniform_buffer.lock()->request_update(m_mvp_matrices); + }); + + m_render_graph->add_resource_descriptor( + [&](wrapper::descriptors::DescriptorSetLayoutBuilder &builder) { + m_descriptor_set_layout = builder.add_uniform_buffer(VK_SHADER_STAGE_VERTEX_BIT).build("Octree"); + }, + [&](wrapper::descriptors::DescriptorSetAllocator &allocator) { + m_descriptor_set = allocator.allocate("Octree", m_descriptor_set_layout); + }, + [&](wrapper::descriptors::WriteDescriptorSetBuilder &builder) { + return builder.add_uniform_buffer_update(m_descriptor_set, m_uniform_buffer).build(); + }); + + m_render_graph->add_graphics_pipeline([&](wrapper::pipelines::GraphicsPipelineBuilder &builder) { + m_octree_pipeline = builder + .set_vertex_input_bindings({ + { + .binding = 0, + .stride = sizeof(OctreeGpuVertex), + .inputRate = VK_VERTEX_INPUT_RATE_VERTEX, + }, + }) + // TODO: Fix me! + //.set_multisampling(m_device->get_max_usable_sample_count(), 0.25f) + .add_default_color_blend_attachment() + .add_color_attachment_format(m_swapchain->image_format()) + // TODO: Implement m_device->get_available_depth_format() + .set_depth_attachment_format(m_depth_attachment.lock()->format()) + .set_depth_stencil({.depthTestEnable = VK_TRUE, + .depthWriteEnable = VK_TRUE, + .depthCompareOp = VK_COMPARE_OP_LESS_OR_EQUAL, + .back{ + .compareOp = VK_COMPARE_OP_ALWAYS, + }}) + .set_vertex_input_attributes({ + { + .location = 0, + .binding = 0, + .format = VK_FORMAT_R32G32B32_SFLOAT, + .offset = offsetof(OctreeGpuVertex, position), + }, + { + .location = 1, + .binding = 0, + .format = VK_FORMAT_R32G32B32_SFLOAT, + .offset = offsetof(OctreeGpuVertex, color), + }, + }) + .set_viewport(m_swapchain->extent()) + .set_scissor(m_swapchain->extent()) + .set_descriptor_set_layout(m_descriptor_set_layout) + .add_shader(m_octree_vert) + .add_shader(m_octree_frag) + .build("Octree"); + return m_octree_pipeline; + }); + + m_octree_pass = m_render_graph->add_graphics_pass( + m_render_graph + ->get_graphics_pass_builder() + // TODO: Helper function for clear values + .writes_to(m_swapchain, + VkClearValue{ + .color = {1.0f, 1.0f, 1.0f, 1.0f}, + }) + // TODO: Helper function for clear values + .writes_to(m_depth_attachment, + VkClearValue{ + .depthStencil = + VkClearDepthStencilValue{ + .depth = 1.0f, + }, + }) + .set_on_record([&](const wrapper::commands::CommandBuffer &cmd_buf) { + cmd_buf.bind_pipeline(m_octree_pipeline) + .bind_descriptor_set(m_descriptor_set, m_octree_pipeline) + .bind_vertex_buffer(m_vertex_buffer) + .bind_index_buffer(m_index_buffer) + .draw_indexed(static_cast(m_octree_indices.size())); + }) + .build("Octree", render_graph::DebugLabelColor::RED)); + + // TODO: We don't need to recreate the imgui overlay when swapchain is recreated, use a .recreate() method instead? + // TODO: Decouple ImGuiRenderer form ImGuiLoader + m_imgui_overlay = std::make_unique(*m_device, m_render_graph, m_octree_pass, m_swapchain, + [&]() { update_imgui_overlay(); }); + + m_render_graph->compile(); } void Application::setup_window_and_input_callbacks() { + // The following code requires some explanation + // Because glfw is a C-style API, we can't use a pointer to non-static class methods as window or input callbacks. + // For example, we can't use Application::key_callback in glfwSetKeyCallback as key callback directly. + // A good explanation can be found on Stack Overflow: + // https://stackoverflow.com/questions/7676971/pointing-to-a-function-that-is-a-class-member-glfw-setkeycallback + // In order to fix this, we can pass a lambda to glfwSetKeyCallback, which calls Application::key_callback + // internally. But there is another problem: Inside of the template, we need to call Application::Key_callback. In + // order to do so, we need to have access to the this-pointer. Unfortunately, the this-pointer can't be captured + // in the lambda capture like [this](){}, because the glfw would not accept the lambda then. To work around this + // problem, we store the this pointer using glfwSetWindowUserPointer. Inside of these lambdas, we then cast the + // pointer to Application* again, allowing us to finally use the callbacks. + m_window->set_user_ptr(this); spdlog::trace("Setting up window callback:"); @@ -222,7 +537,7 @@ void Application::setup_window_and_input_callbacks() { auto lambda_frame_buffer_resize_callback = [](GLFWwindow *window, int width, int height) { auto *app = static_cast(glfwGetWindowUserPointer(window)); spdlog::trace("Frame buffer resize callback called. window width: {}, height: {}", width, height); - app->m_window_resized = true; + app->m_wnd_resized = true; }; m_window->set_resize_callback(lambda_frame_buffer_resize_callback); @@ -264,270 +579,10 @@ void Application::setup_window_and_input_callbacks() { m_window->set_mouse_scroll_callback(lambda_mouse_scroll_callback); } -void Application::setup_vulkan_debug_callback() { - // Check if validation is enabled check for availability of VK_EXT_debug_utils. - if (m_enable_validation_layers) { - spdlog::trace("Khronos validation layer is enabled"); - - if (wrapper::Instance::is_extension_supported(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) { - auto debug_report_ci = wrapper::make_info(); - debug_report_ci.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT | // NOLINT - VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT | // NOLINT - VK_DEBUG_REPORT_ERROR_BIT_EXT; // NOLINT - - // We use this user data pointer to signal the callback if "" is specified. - // All other solutions to this problem either involve duplicated versions of the lambda - // or global variables. - debug_report_ci.pUserData = reinterpret_cast(&m_stop_on_validation_message); // NOLINT - - debug_report_ci.pfnCallback = reinterpret_cast( // NOLINT - +[](VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT, std::uint64_t, std::size_t, std::int32_t, - const char *, const char *message, void *user_data) { - if ((flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT) != 0) { - spdlog::info(message); - } else if ((flags & VK_DEBUG_REPORT_DEBUG_BIT_EXT) != 0) { - spdlog::debug(message); - } else if ((flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) != 0) { - spdlog::error(message); - } else { - // This also deals with VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT. - spdlog::warn(message); - } - - // Check if --stop-on-validation-message is enabled. - if (user_data != nullptr) { - // This feature stops command lines from overflowing with messages in case many validation - // layer messages are reported in a short amount of time. - spdlog::critical("Command line argument --stop-on-validation-message is enabled"); - spdlog::critical("Application will cause a break point now!"); - - // Wait for spdlog to shut down before aborting. - spdlog::shutdown(); - std::abort(); - } - return VK_FALSE; - }); - - // We have to explicitly load this function. - auto vkCreateDebugReportCallbackEXT = reinterpret_cast( // NOLINT - vkGetInstanceProcAddr(m_instance->instance(), "vkCreateDebugReportCallbackEXT")); - - if (vkCreateDebugReportCallbackEXT != nullptr) { - if (const auto result = vkCreateDebugReportCallbackEXT(m_instance->instance(), &debug_report_ci, - nullptr, &m_debug_report_callback); - result != VK_SUCCESS) { - throw VulkanException("Error: vkCreateDebugReportCallbackEXT failed!", result); - } - spdlog::trace("Creating Vulkan debug callback"); - m_debug_report_callback_initialised = true; - } else { - spdlog::error("vkCreateDebugReportCallbackEXT is a null-pointer! Function not available"); - } - } else { - spdlog::warn("Khronos validation layer is not available!"); - } - } else { - spdlog::warn("Khronos validation layer is DISABLED"); - } -} - -Application::Application(int argc, char **argv) { - spdlog::trace("Initialising vulkan-renderer"); - - tools::CommandLineArgumentParser cla_parser; - cla_parser.parse_args(argc, argv); - - spdlog::trace("Application version: {}.{}.{}", APP_VERSION[0], APP_VERSION[1], APP_VERSION[2]); - spdlog::trace("Engine version: {}.{}.{}", ENGINE_VERSION[0], ENGINE_VERSION[1], ENGINE_VERSION[2]); - - // Load the configuration from the TOML file. - load_toml_configuration_file("configuration/renderer.toml"); - - bool enable_renderdoc_instance_layer = false; - - auto enable_renderdoc = cla_parser.arg("--renderdoc"); - if (enable_renderdoc) { -#ifdef NDEBUG - spdlog::warn("You can't use --renderdoc command line argument in release mode. You have to download the code " - "and compile it yourself in debug mode"); -#else - if (*enable_renderdoc) { - spdlog::trace("--renderdoc specified, enabling renderdoc instance layer"); - enable_renderdoc_instance_layer = true; - } -#endif - } - - // If the user specified command line argument "--no-validation", the Khronos validation instance layer will be - // disabled. For debug builds, this is not advisable! Always use validation layers during development! - const auto disable_validation = cla_parser.arg("--no-validation"); - if (disable_validation.value_or(false)) { - spdlog::warn("--no-validation specified, disabling validation layers"); - m_enable_validation_layers = false; - } - - spdlog::trace("Creating Vulkan instance"); - - m_window = - std::make_unique(m_window_title, m_window_width, m_window_height, true, true, m_window_mode); - - m_instance = std::make_unique( - APP_NAME, ENGINE_NAME, VK_MAKE_API_VERSION(0, APP_VERSION[0], APP_VERSION[1], APP_VERSION[2]), - VK_MAKE_API_VERSION(0, ENGINE_VERSION[0], ENGINE_VERSION[1], ENGINE_VERSION[2]), m_enable_validation_layers, - enable_renderdoc_instance_layer); - - vk_tools::print_driver_vulkan_version(); - - m_input_data = std::make_unique(); - - m_surface = std::make_unique(m_instance->instance(), m_window->get()); - - setup_window_and_input_callbacks(); - -#ifndef NDEBUG - if (cla_parser.arg("--stop-on-validation-message").value_or(false)) { - spdlog::warn("--stop-on-validation-message specified. Application will call a breakpoint after reporting a " - "validation layer message"); - m_stop_on_validation_message = true; - } - - setup_vulkan_debug_callback(); -#endif - - spdlog::trace("Creating window surface"); - - // The user can specify with "--gpu " which graphics card to prefer. - auto preferred_graphics_card = cla_parser.arg("--gpu"); - if (preferred_graphics_card) { - spdlog::trace("Preferential graphics card index {} specified", *preferred_graphics_card); - } - - bool display_graphics_card_info = true; - - // If the user specified command line argument "--nostats", no information will be - // displayed about all the graphics cards which are available on the system. - const auto hide_gpu_stats = cla_parser.arg("--no-stats"); - if (hide_gpu_stats.value_or(false)) { - spdlog::trace("--no-stats specified, no extended information about graphics cards will be shown"); - display_graphics_card_info = false; - } - - // If the user specified command line argument "--vsync", the presentation engine waits - // for the next vertical blanking period to update the current image. - const auto enable_vertical_synchronisation = cla_parser.arg("--vsync"); - if (enable_vertical_synchronisation.value_or(false)) { - spdlog::trace("V-sync enabled!"); - m_vsync_enabled = true; - } else { - spdlog::trace("V-sync disabled!"); - m_vsync_enabled = false; - } - - if (display_graphics_card_info) { - vk_tools::print_all_physical_devices(m_instance->instance(), m_surface->get()); - } - - bool use_distinct_data_transfer_queue = true; - - // Ignore distinct data transfer queue - const auto forbid_distinct_data_transfer_queue = cla_parser.arg("--no-separate-data-queue"); - if (forbid_distinct_data_transfer_queue.value_or(false)) { - spdlog::warn("Command line argument --no-separate-data-queue specified"); - spdlog::warn("This will force the application to avoid using a distinct queue for data transfer to GPU"); - spdlog::warn("Performance loss might be a result of this!"); - use_distinct_data_transfer_queue = false; - } - - bool enable_debug_marker_device_extension = true; - - if (!enable_renderdoc_instance_layer) { - // Debug markers are only available if RenderDoc is enabled. - enable_debug_marker_device_extension = false; - } - - // Check if Vulkan debug markers should be disabled. - // Those are only available if RenderDoc instance layer is enabled! - const auto no_vulkan_debug_markers = cla_parser.arg("--no-vk-debug-markers"); - if (no_vulkan_debug_markers.value_or(false)) { - spdlog::warn("--no-vk-debug-markers specified, disabling useful debug markers!"); - enable_debug_marker_device_extension = false; - } - - const auto physical_devices = vk_tools::get_physical_devices(m_instance->instance()); - if (preferred_graphics_card && *preferred_graphics_card >= physical_devices.size()) { - spdlog::critical("GPU index {} out of range!", *preferred_graphics_card); - throw std::runtime_error("Invalid GPU index"); - } - - const VkPhysicalDeviceFeatures required_features{ - // Add required physical device features here - }; - - const VkPhysicalDeviceFeatures optional_features{ - // Add optional physical device features here - }; - - std::vector required_extensions{ - // Since we want to draw on a window, we need the swapchain extension - VK_KHR_SWAPCHAIN_EXTENSION_NAME, - }; - -#ifndef NDEBUG - if (enable_debug_marker_device_extension) { - required_extensions.push_back(VK_EXT_DEBUG_MARKER_EXTENSION_NAME); - } -#endif - - const VkPhysicalDevice physical_device = - preferred_graphics_card ? physical_devices[*preferred_graphics_card] - : wrapper::Device::pick_best_physical_device(*m_instance, m_surface->get(), - required_features, required_extensions); - - m_device = - std::make_unique(*m_instance, m_surface->get(), use_distinct_data_transfer_queue, - physical_device, required_extensions, required_features, optional_features); - - m_swapchain = std::make_unique(*m_device, m_surface->get(), m_window->width(), - m_window->height(), m_vsync_enabled); - - load_textures(); - load_shaders(); - - m_uniform_buffers.emplace_back(*m_device, "matrices uniform buffer", sizeof(UniformBufferObject)); - - // Create an instance of the resource descriptor builder. - // This allows us to make resource descriptors with the help of a builder pattern. - wrapper::DescriptorBuilder descriptor_builder(*m_device); - - // Make use of the builder to create a resource descriptor for the uniform buffer. - m_descriptors.emplace_back( - descriptor_builder.add_uniform_buffer(m_uniform_buffers[0].buffer(), 0) - .build("Default uniform buffer")); - - load_octree_geometry(true); - generate_octree_indices(); - - m_window->show(); - recreate_swapchain(); -} - -void Application::update_uniform_buffers() { - UniformBufferObject ubo{}; - - ubo.model = glm::mat4(1.0f); - ubo.view = m_camera->view_matrix(); - ubo.proj = m_camera->perspective_matrix(); - ubo.proj[1][1] *= -1; - - // TODO: Embed this into the render graph. - m_uniform_buffers[0].update(&ubo, sizeof(ubo)); -} - void Application::update_imgui_overlay() { - auto cursor_pos = m_input_data->get_cursor_pos(); - ImGuiIO &io = ImGui::GetIO(); - io.DeltaTime = m_time_passed; + io.DeltaTime = m_time_passed + 0.00001f; + auto cursor_pos = m_input_data->get_cursor_pos(); io.MousePos = ImVec2(static_cast(cursor_pos[0]), static_cast(cursor_pos[1])); io.MouseDown[0] = m_input_data->is_mouse_button_pressed(GLFW_MOUSE_BUTTON_LEFT); io.MouseDown[1] = m_input_data->is_mouse_button_pressed(GLFW_MOUSE_BUTTON_RIGHT); @@ -543,83 +598,28 @@ void Application::update_imgui_overlay() { ImGui::Text("%s", m_device->gpu_name().c_str()); ImGui::Text("Engine version %d.%d.%d (Git sha %s)", ENGINE_VERSION[0], ENGINE_VERSION[1], ENGINE_VERSION[2], BUILD_GIT); - ImGui::Text("Vulkan API %d.%d.%d", VK_API_VERSION_MAJOR(VK_API_VERSION_1_2), - VK_API_VERSION_MINOR(VK_API_VERSION_1_2), VK_API_VERSION_PATCH(VK_API_VERSION_1_2)); - const auto cam_pos = m_camera->position(); + ImGui::Text("Vulkan API %d.%d.%d", VK_API_VERSION_MAJOR(wrapper::Instance::REQUIRED_VK_API_VERSION), + VK_API_VERSION_MINOR(wrapper::Instance::REQUIRED_VK_API_VERSION), + VK_API_VERSION_PATCH(wrapper::Instance::REQUIRED_VK_API_VERSION)); + const auto &cam_pos = m_camera->position(); ImGui::Text("Camera position (%.2f, %.2f, %.2f)", cam_pos.x, cam_pos.y, cam_pos.z); - const auto cam_rot = m_camera->rotation(); + const auto &cam_rot = m_camera->rotation(); ImGui::Text("Camera rotation: (%.2f, %.2f, %.2f)", cam_rot.x, cam_rot.y, cam_rot.z); - const auto cam_front = m_camera->front(); + const auto &cam_front = m_camera->front(); ImGui::Text("Camera vector front: (%.2f, %.2f, %.2f)", cam_front.x, cam_front.y, cam_front.z); - const auto cam_right = m_camera->right(); + const auto &cam_right = m_camera->right(); ImGui::Text("Camera vector right: (%.2f, %.2f, %.2f)", cam_right.x, cam_right.y, cam_right.z); - const auto cam_up = m_camera->up(); + const auto &cam_up = m_camera->up(); ImGui::Text("Camera vector up (%.2f, %.2f, %.2f)", cam_up.x, cam_up.y, cam_up.z); ImGui::Text("Yaw: %.2f pitch: %.2f roll: %.2f", m_camera->yaw(), m_camera->pitch(), m_camera->roll()); const auto cam_fov = m_camera->fov(); ImGui::Text("Field of view: %d", static_cast(cam_fov)); - ImGui::PushItemWidth(150.0f * m_imgui_overlay->scale()); + ImGui::PushItemWidth(150.0f); ImGui::PopItemWidth(); - ImGui::End(); ImGui::PopStyleVar(); + ImGui::End(); + ImGui::EndFrame(); ImGui::Render(); - - m_imgui_overlay->update(); -} - -void Application::process_mouse_input() { - const auto cursor_pos_delta = m_input_data->calculate_cursor_position_delta(); - - if (m_camera->type() == CameraType::LOOK_AT && m_input_data->is_mouse_button_pressed(GLFW_MOUSE_BUTTON_LEFT)) { - m_camera->rotate(static_cast(cursor_pos_delta[0]), -static_cast(cursor_pos_delta[1])); - } - - m_camera->set_movement_state(CameraMovement::FORWARD, m_input_data->is_key_pressed(GLFW_KEY_W)); - m_camera->set_movement_state(CameraMovement::LEFT, m_input_data->is_key_pressed(GLFW_KEY_A)); - m_camera->set_movement_state(CameraMovement::BACKWARD, m_input_data->is_key_pressed(GLFW_KEY_S)); - m_camera->set_movement_state(CameraMovement::RIGHT, m_input_data->is_key_pressed(GLFW_KEY_D)); -} - -void Application::check_octree_collisions() { - // Check for collision between camera ray and every octree - for (const auto &world : m_worlds) { - const auto collision = ray_cube_collision_check(*world, m_camera->position(), m_camera->front()); - - if (collision) { - const auto intersection = collision.value().intersection(); - const auto face_normal = collision.value().face(); - const auto corner = collision.value().corner(); - const auto edge = collision.value().edge(); - - spdlog::trace("pos {} {} {} | face {} {} {} | corner {} {} {} | edge {} {} {}", intersection.x, - intersection.y, intersection.z, face_normal.x, face_normal.y, face_normal.z, corner.x, - corner.y, corner.z, edge.x, edge.y, edge.z); - - // Break after one collision. - break; - } - } -} - -void Application::run() { - spdlog::trace("Running Application"); - - while (!m_window->should_close()) { - m_window->poll(); - update_uniform_buffers(); - update_imgui_overlay(); - render_frame(); - process_mouse_input(); - if (m_input_data->was_key_pressed_once(GLFW_KEY_N)) { - load_octree_geometry(false); - generate_octree_indices(); - m_index_buffer->upload_data(m_octree_indices); - m_vertex_buffer->upload_data(m_octree_vertices); - } - m_camera->update(m_time_passed); - m_time_passed = m_stopwatch.time_step(); - check_octree_collisions(); - } } } // namespace inexor::vulkan_renderer diff --git a/src/vulkan-renderer/imgui.cpp b/src/vulkan-renderer/imgui.cpp deleted file mode 100644 index eeee2e05a..000000000 --- a/src/vulkan-renderer/imgui.cpp +++ /dev/null @@ -1,194 +0,0 @@ -#include "inexor/vulkan-renderer/imgui.hpp" - -#include "inexor/vulkan-renderer/wrapper/cpu_texture.hpp" -#include "inexor/vulkan-renderer/wrapper/descriptor_builder.hpp" -#include "inexor/vulkan-renderer/wrapper/make_info.hpp" - -#include -#include - -namespace inexor::vulkan_renderer { - -ImGUIOverlay::ImGUIOverlay(const wrapper::Device &device, const wrapper::Swapchain &swapchain, - RenderGraph *render_graph, TextureResource *back_buffer) - : m_device(device), m_swapchain(swapchain) { - spdlog::trace("Creating ImGUI context"); - ImGui::CreateContext(); - - ImGuiStyle &style = ImGui::GetStyle(); - style.Colors[ImGuiCol_TitleBg] = ImVec4(1.0f, 0.0f, 0.0f, 1.0f); - style.Colors[ImGuiCol_TitleBgActive] = ImVec4(1.0f, 0.0f, 0.0f, 1.0f); - style.Colors[ImGuiCol_TitleBgCollapsed] = ImVec4(1.0f, 0.0f, 0.0f, 0.1f); - style.Colors[ImGuiCol_MenuBarBg] = ImVec4(1.0f, 0.0f, 0.0f, 0.4f); - style.Colors[ImGuiCol_Header] = ImVec4(0.8f, 0.0f, 0.0f, 0.4f); - style.Colors[ImGuiCol_HeaderActive] = ImVec4(1.0f, 0.0f, 0.0f, 0.4f); - style.Colors[ImGuiCol_HeaderHovered] = ImVec4(1.0f, 0.0f, 0.0f, 0.4f); - style.Colors[ImGuiCol_FrameBg] = ImVec4(0.0f, 0.0f, 0.0f, 0.8f); - style.Colors[ImGuiCol_CheckMark] = ImVec4(1.0f, 0.0f, 0.0f, 0.8f); - style.Colors[ImGuiCol_SliderGrab] = ImVec4(1.0f, 0.0f, 0.0f, 0.4f); - style.Colors[ImGuiCol_SliderGrabActive] = ImVec4(1.0f, 0.0f, 0.0f, 0.8f); - style.Colors[ImGuiCol_FrameBgHovered] = ImVec4(1.0f, 1.0f, 1.0f, 0.1f); - style.Colors[ImGuiCol_FrameBgActive] = ImVec4(1.0f, 1.0f, 1.0f, 0.2f); - style.Colors[ImGuiCol_Button] = ImVec4(1.0f, 0.0f, 0.0f, 0.4f); - style.Colors[ImGuiCol_ButtonHovered] = ImVec4(1.0f, 0.0f, 0.0f, 0.6f); - style.Colors[ImGuiCol_ButtonActive] = ImVec4(1.0f, 0.0f, 0.0f, 0.8f); - - ImGuiIO &io = ImGui::GetIO(); - io.FontGlobalScale = m_scale; - - spdlog::trace("Loading ImGUI shaders"); - m_vertex_shader = std::make_unique(m_device, VK_SHADER_STAGE_VERTEX_BIT, "ImGUI vertex shader", - "shaders/ui.vert.spv"); - m_fragment_shader = std::make_unique(m_device, VK_SHADER_STAGE_FRAGMENT_BIT, - "ImGUI fragment shader", "shaders/ui.frag.spv"); - - // Load font texture - - // TODO: Move this data into a container class; have container class also support bold and italic. - constexpr const char *FONT_FILE_PATH = "assets/fonts/NotoSans-Bold.ttf"; - constexpr float FONT_SIZE = 18.0f; - - spdlog::trace("Loading front {}", FONT_FILE_PATH); - - ImFont *font = io.Fonts->AddFontFromFileTTF(FONT_FILE_PATH, FONT_SIZE); - - unsigned char *font_texture_data{}; - int font_texture_width{0}; - int font_texture_height{0}; - io.Fonts->GetTexDataAsRGBA32(&font_texture_data, &font_texture_width, &font_texture_height); - - if (font == nullptr || font_texture_data == nullptr) { - spdlog::error("Unable to load font {}. Falling back to error texture", FONT_FILE_PATH); - m_imgui_texture = std::make_unique(m_device, wrapper::CpuTexture()); - } else { - spdlog::trace("Creating ImGUI font texture"); - - // Our font textures always have 4 channels and a single mip level by definition. - constexpr int FONT_TEXTURE_CHANNELS{4}; - constexpr int FONT_MIP_LEVELS{1}; - - VkDeviceSize upload_size = static_cast(font_texture_width) * - static_cast(font_texture_height) * - static_cast(FONT_TEXTURE_CHANNELS); - - m_imgui_texture = std::make_unique( - m_device, font_texture_data, upload_size, font_texture_width, font_texture_height, FONT_TEXTURE_CHANNELS, - FONT_MIP_LEVELS, "ImGUI font texture"); - } - - // Create an instance of the resource descriptor builder. - // This allows us to make resource descriptors with the help of a builder pattern. - wrapper::DescriptorBuilder descriptor_builder(m_device); - - // Make use of the builder to create a resource descriptor for the combined image sampler. - m_descriptor = std::make_unique( - descriptor_builder.add_combined_image_sampler(m_imgui_texture->sampler(), m_imgui_texture->image_view(), 0) - .build("ImGUI")); - - m_index_buffer = render_graph->add("imgui index buffer", BufferUsage::INDEX_BUFFER); - m_vertex_buffer = render_graph->add("imgui vertex buffer", BufferUsage::VERTEX_BUFFER); - m_vertex_buffer->add_vertex_attribute(VK_FORMAT_R32G32_SFLOAT, offsetof(ImDrawVert, pos)); - m_vertex_buffer->add_vertex_attribute(VK_FORMAT_R32G32_SFLOAT, offsetof(ImDrawVert, uv)); - m_vertex_buffer->add_vertex_attribute(VK_FORMAT_R8G8B8A8_UNORM, offsetof(ImDrawVert, col)); - m_vertex_buffer->set_element_size(sizeof(ImDrawVert)); - - m_stage = render_graph->add("imgui stage"); - m_stage->writes_to(back_buffer); - m_stage->reads_from(m_index_buffer); - m_stage->reads_from(m_vertex_buffer); - m_stage->bind_buffer(m_vertex_buffer, 0); - m_stage->uses_shader(*m_vertex_shader); - m_stage->uses_shader(*m_fragment_shader); - - m_stage->add_descriptor_layout(m_descriptor->descriptor_set_layout()); - - // Setup push constant range for global translation and scale. - m_stage->add_push_constant_range({ - .stageFlags = VK_SHADER_STAGE_VERTEX_BIT, - .offset = 0, - .size = sizeof(PushConstBlock), - }); - - // Setup blend attachment. - m_stage->set_blend_attachment({ - .blendEnable = VK_TRUE, - .srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA, - .dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, - .colorBlendOp = VK_BLEND_OP_ADD, - .srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE, - .dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO, - .alphaBlendOp = VK_BLEND_OP_ADD, - }); -} - -ImGUIOverlay::~ImGUIOverlay() { - ImGui::DestroyContext(); -} - -void ImGUIOverlay::update() { - ImDrawData *imgui_draw_data = ImGui::GetDrawData(); - if (imgui_draw_data == nullptr) { - return; - } - - if (imgui_draw_data->TotalIdxCount == 0 || imgui_draw_data->TotalVtxCount == 0) { - return; - } - - bool should_update = false; - if (m_index_data.size() != imgui_draw_data->TotalIdxCount) { - m_index_data.clear(); - for (std::size_t i = 0; i < imgui_draw_data->CmdListsCount; i++) { - const ImDrawList *cmd_list = imgui_draw_data->CmdLists[i]; // NOLINT - for (std::size_t j = 0; j < cmd_list->IdxBuffer.Size; j++) { - m_index_data.push_back(cmd_list->IdxBuffer.Data[j]); // NOLINT - } - } - m_index_buffer->upload_data(m_index_data); - should_update = true; - } - - if (m_vertex_data.size() != imgui_draw_data->TotalVtxCount) { - m_vertex_data.clear(); - for (std::size_t i = 0; i < imgui_draw_data->CmdListsCount; i++) { - const ImDrawList *cmd_list = imgui_draw_data->CmdLists[i]; // NOLINT - for (std::size_t j = 0; j < cmd_list->VtxBuffer.Size; j++) { - m_vertex_data.push_back(cmd_list->VtxBuffer.Data[j]); // NOLINT - } - } - m_vertex_buffer->upload_data(m_vertex_data); - should_update = true; - } - - if (!should_update) { - return; - } - - m_stage->set_on_record([this](const PhysicalStage &physical, const wrapper::CommandBuffer &cmd_buf) { - ImDrawData *imgui_draw_data = ImGui::GetDrawData(); - if (imgui_draw_data == nullptr) { - return; - } - - const ImGuiIO &io = ImGui::GetIO(); - m_push_const_block.scale = glm::vec2(2.0f / io.DisplaySize.x, 2.0f / io.DisplaySize.y); - m_push_const_block.translate = glm::vec2(-1.0f); - cmd_buf.bind_descriptor_sets(m_descriptor->descriptor_sets(), physical.pipeline_layout()); - cmd_buf.push_constants(physical.pipeline_layout(), VK_SHADER_STAGE_VERTEX_BIT, sizeof(PushConstBlock), - &m_push_const_block); - - std::uint32_t index_offset = 0; - std::int32_t vertex_offset = 0; - for (std::size_t i = 0; i < imgui_draw_data->CmdListsCount; i++) { - const ImDrawList *cmd_list = imgui_draw_data->CmdLists[i]; // NOLINT - for (std::int32_t j = 0; j < cmd_list->CmdBuffer.Size; j++) { - const ImDrawCmd &draw_cmd = cmd_list->CmdBuffer[j]; - vkCmdDrawIndexed(cmd_buf.get(), draw_cmd.ElemCount, 1, index_offset, vertex_offset, 0); - index_offset += draw_cmd.ElemCount; - } - vertex_offset += cmd_list->VtxBuffer.Size; - } - }); -} - -} // namespace inexor::vulkan_renderer diff --git a/src/vulkan-renderer/io/io_exception.cpp b/src/vulkan-renderer/io/io_exception.cpp new file mode 100644 index 000000000..7494205ea --- /dev/null +++ b/src/vulkan-renderer/io/io_exception.cpp @@ -0,0 +1 @@ +#include "inexor/vulkan-renderer/io/io_exception.hpp" diff --git a/src/vulkan-renderer/io/nxoc_parser.cpp b/src/vulkan-renderer/io/nxoc_parser.cpp index 45ce9a12f..572c7e4f9 100644 --- a/src/vulkan-renderer/io/nxoc_parser.cpp +++ b/src/vulkan-renderer/io/nxoc_parser.cpp @@ -1,7 +1,7 @@ #include "inexor/vulkan-renderer/io/nxoc_parser.hpp" #include "inexor/vulkan-renderer/io/byte_stream.hpp" -#include "inexor/vulkan-renderer/io/exception.hpp" +#include "inexor/vulkan-renderer/io/io_exception.hpp" #include "inexor/vulkan-renderer/world/cube.hpp" #include diff --git a/src/vulkan-renderer/io/octree_parser.cpp b/src/vulkan-renderer/io/octree_parser.cpp new file mode 100644 index 000000000..aeb47fcb3 --- /dev/null +++ b/src/vulkan-renderer/io/octree_parser.cpp @@ -0,0 +1 @@ +#include "inexor/vulkan-renderer/io/octree_parser.hpp" diff --git a/src/vulkan-renderer/render-graph/buffer.cpp b/src/vulkan-renderer/render-graph/buffer.cpp new file mode 100644 index 000000000..0800d3363 --- /dev/null +++ b/src/vulkan-renderer/render-graph/buffer.cpp @@ -0,0 +1,197 @@ +#include "inexor/vulkan-renderer/render-graph/buffer.hpp" + +#include "inexor/vulkan-renderer/exception.hpp" +#include "inexor/vulkan-renderer/render-graph/buffer.hpp" +#include "inexor/vulkan-renderer/wrapper/commands/command_buffer.hpp" +#include "inexor/vulkan-renderer/wrapper/device.hpp" +#include "inexor/vulkan-renderer/wrapper/make_info.hpp" + +#include + +namespace inexor::vulkan_renderer::render_graph { + +Buffer::Buffer(const Device &device, std::string buffer_name, BufferType buffer_type, std::function on_update) + : m_device(device), m_name(std::move(buffer_name)), m_on_check_for_update(std::move(on_update)), + m_buffer_type(buffer_type) { + if (m_name.empty()) { + throw std::invalid_argument("[Buffer::Buffer] Error: Parameter 'name' is empty!"); + } +} + +Buffer::Buffer(Buffer &&other) noexcept : m_device(other.m_device) { + // TODO: Check me! + m_name = std::move(other.m_name); + m_buffer_type = other.m_buffer_type; + m_on_check_for_update = std::move(other.m_on_check_for_update); + m_buffer = std::exchange(other.m_buffer, VK_NULL_HANDLE); + m_alloc = std::exchange(other.m_alloc, VK_NULL_HANDLE); + m_alloc_info = other.m_alloc_info; + m_src_data = std::exchange(other.m_src_data, m_src_data); + m_src_data_size = other.m_src_data_size; + m_staging_buffer = std::exchange(other.m_staging_buffer, VK_NULL_HANDLE); + m_staging_buffer_alloc = std::exchange(other.m_staging_buffer_alloc, VK_NULL_HANDLE); + m_staging_buffer_alloc_info = std::move(other.m_staging_buffer_alloc_info); + m_update_requested = other.m_update_requested; +} + +Buffer::~Buffer() { + destroy_all(); +} + +void Buffer::create(const CommandBuffer &cmd_buf) { + if (m_src_data_size == 0) { + spdlog::warn("[Buffer::create_buffer] Warning: Can't create buffer of size 0!"); + return; + } + + // This helps us to find the correct VkBufferUsageFlags depending on the BufferType + const std::unordered_map BUFFER_USAGE{ + {BufferType::UNIFORM_BUFFER, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT}, + {BufferType::VERTEX_BUFFER, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT}, + {BufferType::INDEX_BUFFER, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT}, + }; + const auto buffer_ci = wrapper::make_info({ + .size = m_src_data_size, + .usage = BUFFER_USAGE.at(m_buffer_type), + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + }); + // This helps us to find the correct VmaMemoryUsage depending on the BufferType + const std::unordered_map MEMORY_USAGE{ + {BufferType::UNIFORM_BUFFER, VMA_MEMORY_USAGE_AUTO_PREFER_HOST}, + {BufferType::VERTEX_BUFFER, VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE}, + {BufferType::INDEX_BUFFER, VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE}, + }; + const VmaAllocationCreateInfo alloc_ci{ + .flags = (m_buffer_type == BufferType::UNIFORM_BUFFER) + ? static_cast(VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT) + : 0, + .usage = VMA_MEMORY_USAGE_AUTO, + }; + + // The memory for the buffer we would like to create can end up in mappable memory, which means we can simply use + // std::memcpy to copy the source date into it, or it ends up in non-mappable memory, which means we will need to + // use a staging buffer and a transfer operation (a copy command) to upload the data to gpu memory. Which memory is + // chosen by Vulkan Memory Allocator depends on the available memory and current memory usage. + if (const auto result = + vmaCreateBuffer(m_device.allocator(), &buffer_ci, &alloc_ci, &m_buffer, &m_alloc, &m_alloc_info); + result != VK_SUCCESS) { + throw VulkanException("Error: vmaCreateBuffer failed for buffer " + m_name + " !", result); + } + + // Set the buffer's internal debug name in Vulkan Memory Allocator (VMA) + vmaSetAllocationName(m_device.allocator(), m_alloc, m_name.c_str()); + // Set the buffer's internal debug name through Vulkan debug utils + m_device.set_debug_name(m_buffer, m_name); + + // Query memory property flags + VkMemoryPropertyFlags mem_prop_flags{}; + vmaGetAllocationMemoryProperties(m_device.allocator(), m_alloc, &mem_prop_flags); + + // Check if the allocation made by VMA ended up in mappable memory + if (mem_prop_flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) { + // The allocation ended up in mappable memory and it is already mapped + // This means we can simply use std::memcpy to copy the data from the source into it + std::memcpy(m_alloc_info.pMappedData, m_src_data, m_src_data_size); + + // After copying the data, we need to flush caches + // NOTE: vmaFlushAllocation checks internally if the memory is host coherent, in which case it don't flush + if (const auto result = vmaFlushAllocation(m_device.allocator(), m_alloc, 0, VK_WHOLE_SIZE); + result != VK_SUCCESS) { + throw VulkanException("Error: vmaFlushAllocation failed for buffer " + m_name + " !", result); + } + } else { + // Make sure to destroy the previous staging buffer + if (m_staging_buffer != VK_NULL_HANDLE) { + destroy_staging_buffer(); + } + // The allocation ended up in non-mappable memory and we need a staging buffer and a copy command to upload data + const auto staging_buf_ci = wrapper::make_info({ + // The size of the staging buffer must be the size of the actual buffer + .size = m_src_data_size, + // This is the buffer usage bit for staging buffers + .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT, + }); + + const VmaAllocationCreateInfo staging_buf_alloc_ci{ + .flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT, + .usage = VMA_MEMORY_USAGE_AUTO, + }; + + // Create the staging buffer which is used for the transfer of data into the actual buffer + if (const auto result = + vmaCreateBuffer(m_device.allocator(), &staging_buf_ci, &staging_buf_alloc_ci, &m_staging_buffer, + &m_staging_buffer_alloc, &m_staging_buffer_alloc_info); + result != VK_SUCCESS) { + throw VulkanException("Error: vmaCreateBuffer failed for staging buffer " + m_name + " !", result); + } + + const std::string staging_buf_name = "staging:" + m_name; + // Set the staging buffer's internal debug name in Vulkan Memory Allocator (VMA) + vmaSetAllocationName(m_device.allocator(), m_staging_buffer_alloc, staging_buf_name.c_str()); + // Set the staging buffer's internal debug name through Vulkan debug utils + m_device.set_debug_name(m_staging_buffer, staging_buf_name); + + // Copy the memory into the staging buffer + std::memcpy(m_staging_buffer_alloc_info.pMappedData, m_src_data, m_src_data_size); + + // NOTE: vmaFlushAllocation checks internally if the memory is host coherent, in which case it don't flush + if (const auto result = vmaFlushAllocation(m_device.allocator(), m_staging_buffer_alloc, 0, VK_WHOLE_SIZE); + result != VK_SUCCESS) { + throw VulkanException("Error: vmaFlushAllocation failed for staging buffer " + m_name + " !", result); + }; + + cmd_buf.insert_debug_label("[Buffer::staging-update|" + m_name + "]", + wrapper::get_debug_label_color(wrapper::DebugLabelColor::ORANGE)); + + cmd_buf.pipeline_buffer_memory_barrier_before_copy_buffer(m_staging_buffer) + .copy_buffer(m_staging_buffer, m_buffer, m_src_data_size) + .pipeline_buffer_memory_barrier_after_copy_buffer(m_buffer); + } + + // Update the descriptor buffer info + m_descriptor_buffer_info = { + .buffer = m_buffer, + .offset = 0, + .range = m_alloc_info.size, + }; + + // The update is finished + m_update_requested = false; + m_src_data = nullptr; + m_src_data_size = 0; + + // NOTE: The staging buffer needs to stay valid until command buffer finished executing! + // It will be destroyed either in the destructor or the next time create is called. + + // NOTE: Another option would have been to wrap each call to create() into its own single time command buffer, which + // would increase the total number of command buffer submissions though. +} + +void Buffer::destroy_all() { + destroy_buffer(); + destroy_staging_buffer(); +} + +void Buffer::destroy_buffer() { + vmaDestroyBuffer(m_device.allocator(), m_buffer, m_alloc); + m_buffer = VK_NULL_HANDLE; + m_alloc = VK_NULL_HANDLE; +} + +void Buffer::destroy_staging_buffer() { + vmaDestroyBuffer(m_device.allocator(), m_staging_buffer, m_staging_buffer_alloc); + m_staging_buffer = VK_NULL_HANDLE; + m_staging_buffer_alloc = VK_NULL_HANDLE; +} + +void Buffer::request_update(void *src_data, const std::size_t src_data_size) { + if (src_data == nullptr || src_data_size == 0) { + return; + } + m_src_data = src_data; + m_src_data_size = src_data_size; + m_update_requested = true; +} + +} // namespace inexor::vulkan_renderer::render_graph diff --git a/src/vulkan-renderer/render-graph/graphics_pass.cpp b/src/vulkan-renderer/render-graph/graphics_pass.cpp new file mode 100644 index 000000000..50a5a02ec --- /dev/null +++ b/src/vulkan-renderer/render-graph/graphics_pass.cpp @@ -0,0 +1,105 @@ +#include "inexor/vulkan-renderer/render-graph/graphics_pass.hpp" + +#include "inexor/vulkan-renderer/wrapper/make_info.hpp" + +#include + +namespace inexor::vulkan_renderer::render_graph { + +GraphicsPass::GraphicsPass( + std::string name, + OnRecordCommandBufferForPass on_record_cmd_buffer, + std::vector> graphics_pass_reads, + std::vector, std::optional>> write_attachments, + std::vector, std::optional>> write_swapchains, + const wrapper::DebugLabelColor pass_debug_label_color) { + // If an extent has already been specified, all attachments must match this! + if (m_extent.width != 0 && m_extent.height != 0) { + for (const auto &write_attachment : write_attachments) { + const auto &attachment = write_attachment.first.lock(); + if (attachment->m_width != m_extent.width) { + throw std::invalid_argument("[GraphicsPass::GraphicsPass] Error: Width of graphics pass " + m_name + + " is already specified (" + std::to_string(m_extent.width) + + "), but width of write attachment " + attachment->m_name + " (" + + std::to_string(attachment->m_width) + ") does not match!"); + } + if (attachment->m_height != m_extent.height) { + throw std::invalid_argument("[GraphicsPass::GraphicsPass] Error: Height of graphics pass " + m_name + + " is already specified (" + std::to_string(m_extent.height) + + "), but width of write attachment " + attachment->m_name + " (" + + std::to_string(attachment->m_height) + ") does not match!"); + } + } + for (const auto &write_swapchain : write_swapchains) { + const auto &swapchain = write_swapchain.first.lock(); + if (swapchain->m_extent.width != m_extent.width) { + throw std::invalid_argument("[GraphicsPass::GraphicsPass] Error: Width of graphics pass " + m_name + + " is already specified (" + std::to_string(m_extent.width) + + "), but width of write swapchain " + swapchain->m_name + " (" + + std::to_string(swapchain->m_extent.width) + ") does not match!"); + } + if (swapchain->m_extent.height != m_extent.height) { + throw std::invalid_argument("[GraphicsPass::GraphicsPass] Error: Height of graphics pass " + m_name + + " is already specified (" + std::to_string(m_extent.height) + + "), but width of write swapchain " + swapchain->m_name + " (" + + std::to_string(swapchain->m_extent.height) + ") does not match!"); + } + } + } + + // Pick any extent and store it, they must be all the same at this point + if (!write_attachments.empty()) { + const auto &attachment = write_attachments[0].first.lock(); + m_extent = { + .width = attachment->m_width, + .height = attachment->m_height, + }; + } else if (!write_swapchains.empty()) { + // No color attachments, so pick the extent from any of the swapchains specified + const auto &swapchain = write_swapchains[0].first.lock(); + m_extent = swapchain->m_extent; + } + // Check if either width or height is 0 + if (m_extent.width == 0) { + throw std::runtime_error("[GraphicsPass::GraphicsPass] Error: m_extent.width is 0!"); + } + if (m_extent.height == 0) { + throw std::runtime_error("[GraphicsPass::GraphicsPass] Error: m_extent.height is 0!"); + } + + // Store the other data + m_name = std::move(name); + m_on_record_cmd_buffer = std::move(on_record_cmd_buffer); + m_debug_label_color = wrapper::get_debug_label_color(pass_debug_label_color); + m_graphics_pass_reads = std::move(graphics_pass_reads); + m_write_attachments = std::move(write_attachments); + m_write_swapchains = std::move(write_swapchains); +} + +GraphicsPass::GraphicsPass(GraphicsPass &&other) noexcept { + // TODO: Check me! + m_name = std::move(other.m_name); + m_on_record_cmd_buffer = std::move(other.m_on_record_cmd_buffer); + m_descriptor_set_layout = std::exchange(other.m_descriptor_set_layout, nullptr); + m_descriptor_set = std::exchange(other.m_descriptor_set, VK_NULL_HANDLE); + m_rendering_info = std::move(other.m_rendering_info); + m_write_attachments = std::move(other.m_write_attachments); + m_write_swapchains = std::move(other.m_write_swapchains); + m_color_attachments = std::move(other.m_color_attachments); + m_depth_attachment = std::move(other.m_depth_attachment); + m_stencil_attachment = std::move(other.m_stencil_attachment); + m_graphics_pass_reads = std::move(other.m_graphics_pass_reads); + m_debug_label_color = other.m_debug_label_color; +} + +/// Reset the rendering info +void GraphicsPass::reset_rendering_info() { + m_rendering_info = wrapper::make_info(); + m_color_attachments.clear(); + m_has_depth_attachment = false; + m_depth_attachment = wrapper::make_info(); + m_has_stencil_attachment = false; + m_stencil_attachment = wrapper::make_info(); +} + +} // namespace inexor::vulkan_renderer::render_graph diff --git a/src/vulkan-renderer/render-graph/graphics_pass_builder.cpp b/src/vulkan-renderer/render-graph/graphics_pass_builder.cpp new file mode 100644 index 000000000..b3f813278 --- /dev/null +++ b/src/vulkan-renderer/render-graph/graphics_pass_builder.cpp @@ -0,0 +1,81 @@ +#include "inexor/vulkan-renderer/render-graph/graphics_pass_builder.hpp" + +#include + +namespace inexor::vulkan_renderer::render_graph { + +GraphicsPassBuilder::GraphicsPassBuilder() { + reset(); +} + +GraphicsPassBuilder::GraphicsPassBuilder(GraphicsPassBuilder &&other) noexcept { + m_on_record_cmd_buffer = std::move(other.m_on_record_cmd_buffer); + m_write_attachments = std::move(other.m_write_attachments); + m_write_swapchains = std::move(other.m_write_swapchains); + m_graphics_pass_reads = std::move(other.m_graphics_pass_reads); +} + +std::shared_ptr GraphicsPassBuilder::build(std::string name, const DebugLabelColor pass_debug_color) { + auto graphics_pass = std::make_shared( + std::move(name), std::move(m_on_record_cmd_buffer), std::move(m_graphics_pass_reads), + std::move(m_write_attachments), std::move(m_write_swapchains), pass_debug_color); + reset(); + return graphics_pass; +} + +GraphicsPassBuilder &GraphicsPassBuilder::conditionally_reads_from(std::weak_ptr graphics_pass, + const bool condition) { + if (!graphics_pass.expired() && condition) { + m_graphics_pass_reads.push_back(std::move(graphics_pass)); + } + // NOTE: No exception is thrown if this graphics pass is expired because it's an optional pass! + return *this; +} + +GraphicsPassBuilder &GraphicsPassBuilder::reads_from(std::weak_ptr graphics_pass) { + if (graphics_pass.expired()) { + throw std::invalid_argument("[GraphicsPassBuilder::reads_from] Error: 'graphics_pass' is an invalid pointer!"); + } + m_graphics_pass_reads.push_back(std::move(graphics_pass)); + return *this; +} + +void GraphicsPassBuilder::reset() { + m_on_record_cmd_buffer = {}; + m_graphics_pass_reads.clear(); + m_write_attachments.clear(); +} + +GraphicsPassBuilder &GraphicsPassBuilder::set_on_record(OnRecordCommandBufferForPass on_record_cmd_buffer) { + m_on_record_cmd_buffer = std::move(on_record_cmd_buffer); + return *this; +} + +GraphicsPassBuilder & +GraphicsPassBuilder::writes_to(std::variant, std::weak_ptr> write_attachment, + std::optional clear_value) { + // Check if this is a std::weak_ptr + if (std::holds_alternative>(write_attachment)) { + // This is a std::weak_ptr, but we need to check if it's a valid pointer + auto &texture = std::get>(write_attachment); + // Check if the std::weak_ptr is still a valid pointer + if (texture.expired()) { + throw std::invalid_argument( + "[GraphicsPassBuilder::writes_to] Error: parameter 'write_attachment' is an std::weak_ptr!"); + } + // It's a std::weak_ptr and the memory is valid + m_write_attachments.emplace_back(std::move(texture), std::move(clear_value)); + } else { + // Otherwise, this must be a std::weak_ptr! No need to check with std::holds_alternative explicitely. + auto &swapchain = std::get>(write_attachment); + // Check if the std::weak_ptr is still a valid pointer + if (swapchain.expired()) { + throw std::invalid_argument("[GraphicsPassBuilder::writes_to] Error: Parameter 'write_attachment' is an " + "invalid std::weak_ptr!"); + } + m_write_swapchains.emplace_back(std::move(swapchain), std::move(clear_value)); + } + return *this; +} + +} // namespace inexor::vulkan_renderer::render_graph diff --git a/src/vulkan-renderer/render-graph/image.cpp b/src/vulkan-renderer/render-graph/image.cpp new file mode 100644 index 000000000..fda807b0f --- /dev/null +++ b/src/vulkan-renderer/render-graph/image.cpp @@ -0,0 +1,68 @@ +#include "inexor/vulkan-renderer/render-graph/image.hpp" + +#include "inexor/vulkan-renderer/exception.hpp" +#include "inexor/vulkan-renderer/wrapper/device.hpp" +#include "inexor/vulkan-renderer/wrapper/sampler.hpp" + +namespace inexor::vulkan_renderer::render_graph { + +Image::Image(const Device &device, std::string name) : m_device(device), m_name(std::move(name)) {} + +Image::Image(Image &&other) noexcept : m_device(other.m_device), m_alloc_ci(other.m_alloc_ci) { + other.m_name = std::move(other.m_name); + m_img = std::exchange(other.m_img, VK_NULL_HANDLE); + m_img_view = std::exchange(other.m_img_view, VK_NULL_HANDLE); + m_alloc = std::exchange(other.m_alloc, VK_NULL_HANDLE); + m_alloc_info = other.m_alloc_info; + m_sampler = std::exchange(other.m_sampler, nullptr); +} + +Image::~Image() { + destroy(); +} + +void Image::create(VkImageCreateInfo img_ci, VkImageViewCreateInfo img_view_ci) { + m_img_ci = std::move(img_ci); + m_img_view_ci = std::move(img_view_ci); + + // Create the image + if (const auto result = + vmaCreateImage(m_device.allocator(), &m_img_ci, &m_alloc_ci, &m_img, &m_alloc, &m_alloc_info); + result != VK_SUCCESS) { + throw VulkanException("Error: vmaCreateImage failed for image " + m_name + "!", result); + } + // Set the image's internal debug name in Vulkan Memory Allocator (VMA) + vmaSetAllocationName(m_device.allocator(), m_alloc, m_name.c_str()); + // Set the image's internal debug name through Vulkan debug utils + m_device.set_debug_name(m_img, m_name); + + // Set the image in the VkImageViewCreateInfo + m_img_view_ci.image = m_img; + + // Create the image view + if (const auto result = vkCreateImageView(m_device.device(), &m_img_view_ci, nullptr, &m_img_view); + result != VK_SUCCESS) { + throw VulkanException("Error: vkCreateImageView failed for image view " + m_name + "!", result); + } + m_device.set_debug_name(m_img_view, m_name); + + // Create a default sampler + m_sampler = std::make_unique(m_device, "Default"); +} + +void Image::destroy() { + // Destroy the image view + vkDestroyImageView(m_device.device(), m_img_view, nullptr); + m_img_view = VK_NULL_HANDLE; + + // Destroy the image + vmaDestroyImage(m_device.allocator(), m_img, m_alloc); + m_img = VK_NULL_HANDLE; + m_alloc = VK_NULL_HANDLE; + + // Destroy the sampler + m_sampler.reset(); + m_sampler = nullptr; +} + +} // namespace inexor::vulkan_renderer::render_graph diff --git a/src/vulkan-renderer/render-graph/render_graph.cpp b/src/vulkan-renderer/render-graph/render_graph.cpp new file mode 100644 index 000000000..70ed55feb --- /dev/null +++ b/src/vulkan-renderer/render-graph/render_graph.cpp @@ -0,0 +1,435 @@ +#include "inexor/vulkan-renderer/render-graph/render_graph.hpp" + +#include "inexor/vulkan-renderer/exception.hpp" +#include "inexor/vulkan-renderer/wrapper/commands/command_buffer.hpp" +#include "inexor/vulkan-renderer/wrapper/device.hpp" +#include "inexor/vulkan-renderer/wrapper/pipelines/pipeline_builder.hpp" +#include "inexor/vulkan-renderer/wrapper/swapchain.hpp" + +#include + +namespace inexor::vulkan_renderer::render_graph { + +RenderGraph::RenderGraph(Device &device) + : m_device(device), m_graphics_pipeline_builder(device), m_descriptor_set_layout_builder(device), + m_descriptor_set_allocator(m_device), m_write_descriptor_set_builder(m_device) {} + +std::weak_ptr RenderGraph::add_graphics_pass(std::shared_ptr pass) { + m_graphics_passes.emplace_back(std::move(pass)); + return m_graphics_passes.back(); +} + +void RenderGraph::add_graphics_pipeline(OnCreateGraphicsPipeline on_pipeline_create) { + m_pipeline_create_functions.emplace_back(std::move(on_pipeline_create)); +} + +std::weak_ptr +RenderGraph::add_buffer(std::string name, const BufferType type, std::function on_update) { + m_buffers.emplace_back(std::make_shared(m_device, std::move(name), type, std::move(on_update))); + return m_buffers.back(); +} + +void RenderGraph::allocate_descriptor_sets() { + for (const auto &descriptor : m_resource_descriptors) { + // Call descriptor set allocation function of each resource descriptor + std::invoke(std::get<1>(descriptor), m_descriptor_set_allocator); + } +} + +void RenderGraph::add_resource_descriptor(OnBuildDescriptorSetLayout on_build_descriptor_set_layout, + OnAllocateDescriptorSet on_allocate_descriptor_set, + OnBuildWriteDescriptorSets on_update_descriptor_set) { + m_resource_descriptors.emplace_back(std::move(on_build_descriptor_set_layout), + std::move(on_allocate_descriptor_set), std::move(on_update_descriptor_set)); +} + +std::weak_ptr RenderGraph::add_texture(std::string name, + const TextureUsage usage, + const VkFormat format, + const std::uint32_t width, + const std::uint32_t height, + const std::uint32_t channels, + const VkSampleCountFlagBits sample_count, + std::function m_on_check_for_updates) { + m_textures.emplace_back(std::make_shared(m_device, std::move(name), usage, format, width, height, channels, + sample_count, std::move(m_on_check_for_updates))); + return m_textures.back(); +} + +void RenderGraph::check_for_cycles() { + std::unordered_set> visited; + std::unordered_set> recursion_stack; + + std::function &)> dfs_detect_cycle = + [&](const std::shared_ptr &pass) { + if (recursion_stack.find(pass) != recursion_stack.end()) { + return true; // Cycle detected + } + if (visited.find(pass) != visited.end()) { + return false; // Already visited, no cycle + } + visited.insert(pass); + recursion_stack.insert(pass); + for (const auto &parent : pass->m_graphics_pass_reads) { + if (dfs_detect_cycle(parent.lock())) { + return true; // Cycle detected in one of the parents + } + } + recursion_stack.erase(pass); + return false; // No cycle found for this pass + }; + + for (const auto &pass : m_graphics_passes) { + if (dfs_detect_cycle(pass)) { + throw std::runtime_error("[RenderGraph::check_for_cycles] Error: Rendergraph is not acyclic! " + "A cycle was detected for graphics pass " + + pass->m_name + "!"); + } + } +} + +void RenderGraph::collect_swapchain_image_available_semaphores() { + m_swapchains_imgs_available.clear(); + // Use an std::unordered_set to make sure every swapchain image available semaphore is in there only once! + std::unordered_set unique_semaphores; + for (const auto &pass : m_graphics_passes) { + for (const auto &swapchain : pass->m_write_swapchains) { + unique_semaphores.emplace(swapchain.first.lock()->m_img_available->m_semaphore); + } + } + // Convert the unordered_set into the std::vector so we can pass it in command buffer submission + m_swapchains_imgs_available = std::vector(unique_semaphores.begin(), unique_semaphores.end()); +} + +void RenderGraph::compile() { + // TODO: What needs to be re-done when swapchain is recreated? + check_for_cycles(); + determine_pass_order(); + update_buffers(); + update_textures(); + create_descriptor_set_layouts(); + allocate_descriptor_sets(); + create_graphics_pipelines(); + collect_swapchain_image_available_semaphores(); +} + +void RenderGraph::create_descriptor_set_layouts() { + for (const auto &descriptor : m_resource_descriptors) { + // Call descriptor set layout create function for each descriptor + std::invoke(std::get<0>(descriptor), m_descriptor_set_layout_builder); + } +} + +void RenderGraph::create_graphics_pipelines() { + for (const auto &create_func : m_pipeline_create_functions) { + // Call the graphics pipeline create function + create_func(m_graphics_pipeline_builder); + } +} + +void RenderGraph::determine_pass_order() { + // Pop elements from the stack to get the correct order + std::vector> ordered_passes; + std::unordered_map, bool> visited; + + // Reserve memory for the sorted graphics passes + ordered_passes.reserve(m_graphics_passes.size()); + + // Lambda function for DFS + std::function &)> dfs = [&](const std::shared_ptr &pass) { + // If the pass has already been visited, return + if (visited[pass]) { + return; + } + // Mark the pass as visited + visited[pass] = true; + + // Visit all passes that this pass reads from + for (const auto &weak_read_pass : pass->m_graphics_pass_reads) { + if (auto read_pass = weak_read_pass.lock()) { + dfs(read_pass); + } + } + // All dependencies of this pass have been visited, now push this pass onto the stack + ordered_passes.push_back(pass); + }; + + // Initialize visited map for all passes + for (const auto &pass : m_graphics_passes) { + visited[pass] = false; + } + // Perform DFS from each pass + for (const auto &pass : m_graphics_passes) { + if (!visited[pass]) { + dfs(pass); + } + } + // Update the member variable with the sorted passes + m_graphics_passes = std::move(ordered_passes); + + // Let each graphics pass know about its next pass, except the last one which has none + for (std::size_t pass_index = 0; pass_index < m_graphics_passes.size() - 1; pass_index++) { + m_graphics_passes[pass_index]->m_next_pass = m_graphics_passes[pass_index + 1]; + } +} + +void RenderGraph::fill_graphics_pass_rendering_info(GraphicsPass &pass) { + pass.reset_rendering_info(); + + /// Fill the VkRenderingattachmentInfo for a color, depth, or stencil attachment + /// @param write_attachment The attachment this graphics pass writes to + /// + auto fill_rendering_info_for_attachment = [&](const std::weak_ptr &write_attachment, + const std::optional &clear_value) { + const auto &attachment = write_attachment.lock(); + auto get_image_layout = [&]() { + switch (attachment->m_usage) { + case TextureUsage::COLOR_ATTACHMENT: + case TextureUsage::NORMAL: { + return VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + } + case TextureUsage::DEPTH_ATTACHMENT: + case TextureUsage::STENCIL_ATTACHMENT: { + return VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; + } + default: + return VK_IMAGE_LAYOUT_UNDEFINED; + } + }; + + // TODO: Support MSAA again! + return wrapper::make_info({ + // TODO: Implement m_current_img_view when double/triple buffering and do this on init, not per-frame? + .imageView = attachment->m_image->m_img_view, + .imageLayout = get_image_layout(), + .resolveMode = VK_RESOLVE_MODE_NONE, + .resolveImageView = nullptr, + .loadOp = clear_value ? VK_ATTACHMENT_LOAD_OP_CLEAR : VK_ATTACHMENT_LOAD_OP_LOAD, + .storeOp = VK_ATTACHMENT_STORE_OP_STORE, + .clearValue = clear_value.value_or(VkClearValue{}), + }); + }; + + // Step 1: Process all write attachments (color, depth, stencil) of the graphics pass into VkRenderingInfo + for (const auto &write_attachment : pass.m_write_attachments) { + // What type of attachment is this? + const auto &attachment = write_attachment.first; + const auto &clear_value = write_attachment.second; + const auto rendering_info = fill_rendering_info_for_attachment(attachment, clear_value); + + switch (attachment.lock()->m_usage) { + case TextureUsage::COLOR_ATTACHMENT: + case TextureUsage::NORMAL: { + pass.m_color_attachments.push_back(rendering_info); + break; + } + case TextureUsage::DEPTH_ATTACHMENT: { + pass.m_depth_attachment = rendering_info; + pass.m_has_depth_attachment = true; + break; + } + case TextureUsage::STENCIL_ATTACHMENT: { + pass.m_stencil_attachment = rendering_info; + pass.m_has_stencil_attachment = true; + break; + } + default: + continue; + } + } + + /// Fill the VkRenderingAttachmentInfo for a swapchain + /// @param write_swapchain The swapchain to which this graphics pass writes to + /// @param clear_value The optional clear value for the swapchain image + auto fill_write_info_for_swapchain = [&](const std::weak_ptr &write_swapchain, + const std::optional &clear_value) { + // TODO: Support MSAA again! + return wrapper::make_info({ + // TODO: Does this mean we can do this on init now? Not on a per-frame basis? + .imageView = write_swapchain.lock()->m_current_swapchain_img_view, + .imageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + .resolveMode = VK_RESOLVE_MODE_NONE, + .resolveImageView = nullptr, + .loadOp = clear_value ? VK_ATTACHMENT_LOAD_OP_CLEAR : VK_ATTACHMENT_LOAD_OP_LOAD, + .storeOp = VK_ATTACHMENT_STORE_OP_STORE, + .clearValue = clear_value.value_or(VkClearValue{}), + }); + }; + + // TODO: Step 2: Process all swapchain writes of the graphics pass into VkRenderingInfo + for (const auto &write_swapchain : pass.m_write_swapchains) { + const auto &swapchain = write_swapchain.first.lock(); + const auto &clear_value = write_swapchain.second; + pass.m_color_attachments.push_back(fill_write_info_for_swapchain(swapchain, clear_value)); + } + + // TODO: If a pass has multiple color attachments those are multiple swapchains, does that mean we must group + // rendering by swapchains because there is no guarantee that they all have the same swapchain extent? + + // Step 3: Fill the rendering info + pass.m_rendering_info = wrapper::make_info({ + .renderArea = + { + .extent = pass.m_extent, + }, + .layerCount = 1, + .colorAttachmentCount = static_cast(pass.m_color_attachments.size()), + .pColorAttachments = (pass.m_color_attachments.size() > 0) ? pass.m_color_attachments.data() : nullptr, + .pDepthAttachment = pass.m_has_depth_attachment ? &pass.m_depth_attachment : nullptr, + .pStencilAttachment = pass.m_has_stencil_attachment ? &pass.m_stencil_attachment : nullptr, + }); +} + +void RenderGraph::record_command_buffer_for_pass(const CommandBuffer &cmd_buf, GraphicsPass &pass) { + cmd_buf.set_suboperation_debug_name("[Pass:" + pass.m_name + "]"); + // Start a new debug label for this graphics pass (visible in graphics debuggers like RenderDoc) + cmd_buf.begin_debug_label_region(pass.m_name, pass.m_debug_label_color); + + // Fill the VKRenderingInfo of the graphics pass + fill_graphics_pass_rendering_info(pass); + + // If there are writes to swapchains, the image layout of the swapchain must be changed because it comes back in + // undefined layout after presenting + for (const auto &swapchain : pass.m_write_swapchains) { + // NOTE: We don't need to check if the previous pass wrote to this swapchain because we already check in the + // code below if the next pass (if any) will write to this swapchain again, so if the last pass already wrote to + // this swapchain, calling change_image_layout_to_prepare_for_rendering will not do anything. + swapchain.first.lock()->change_image_layout_to_prepare_for_rendering(cmd_buf); + } + + // Start dynamic rendering with the compiled rendering info + cmd_buf.begin_rendering(pass.m_rendering_info); + + // NOTE: Pipeline barriers must not be placed inside of dynamic rendering instances! + + // Call the command buffer recording function of this graphics pass. In this function, the actual rendering takes + // place: the programmer binds pipelines, descriptor sets, buffers, and calls Vulkan commands. Note that rendergraph + // does not bind any pipelines, descriptor sets, or buffers automatically! + std::invoke(pass.m_on_record_cmd_buffer, cmd_buf); + + // End dynamic rendering + cmd_buf.end_rendering(); + + // TODO: Not only check for next pass, but check all following passes! For example if pass A writes to swapchain, + // pass B doesn't, but pass C does again, we would unnecessarily transition between A and B, then B and C, and after + // C again! + + // Change the swapchain image layouts to prepare the swapchains for presenting + for (const auto &swapchain : pass.m_write_swapchains) { + // TODO: Check if next pass (if any) writes to that swapchain as well! + bool next_pass_writes_to_this_swapchain = false; + if (!pass.m_next_pass.expired()) { + const auto &next_pass = pass.m_next_pass.lock(); + for (const auto &next_pass_write_swapchain : next_pass->m_write_swapchains) { + if (next_pass_write_swapchain.first.lock() == swapchain.first.lock()) { + next_pass_writes_to_this_swapchain = true; + } + } + } + // NOTE: If the next pass writes to this swapchain as well, we can keep it in the current image layout. + // Only otherwise, we change the image layout to prepare the swapchain image for presenting. + if (!next_pass_writes_to_this_swapchain) { + swapchain.first.lock()->change_image_layout_to_prepare_for_presenting(cmd_buf); + } + } + + // End the debug label for this graphics pass + cmd_buf.end_debug_label_region(); +} + +void RenderGraph::render() { + update_buffers(); + update_textures(); + // TODO: Optimize this: Only call if any data changed and try to accumulate write descriptor sets + update_write_descriptor_sets(); + + m_device.execute( + "[RenderGraph::render]", VK_QUEUE_GRAPHICS_BIT, DebugLabelColor::CYAN, + [&](const CommandBuffer &cmd_buf) { + // Call the command buffer recording function of every graphics pass + for (auto &pass : m_graphics_passes) { + record_command_buffer_for_pass(cmd_buf, *pass); + } + }, + m_swapchains_imgs_available); +} + +void RenderGraph::reset() { + // TODO: Implement me! +} + +void RenderGraph::update_buffers() { + // Check if there is any update required + bool any_update_required = false; + for (const auto &buffer : m_buffers) { + std::invoke(buffer->m_on_check_for_update); + if (buffer->m_update_requested) { + any_update_required = true; + } + } + // Only start recording and submitting a command buffer on transfer queue if any update is required + if (any_update_required) { + m_device.execute("[RenderGraph::update_buffers]", VK_QUEUE_GRAPHICS_BIT, DebugLabelColor::MAGENTA, + [&](const CommandBuffer &cmd_buf) { + for (const auto &buffer : m_buffers) { + if (buffer->m_update_requested) { + cmd_buf.set_suboperation_debug_name("[Buffer|Destroy:" + buffer->m_name + "]"); + buffer->destroy_all(); + cmd_buf.set_suboperation_debug_name("[Buffer|Update:" + buffer->m_name + "]"); + buffer->create(cmd_buf); + } + } + }); + } + // NOTE: For the "else" case: We can't insert a debug label here telling us that there are no buffer updates + // required because that command itself would require a command buffer to be in recording state +} + +void RenderGraph::update_textures() { + // Check if there is any update required + bool any_update_required = false; + for (const auto &texture : m_textures) { + // Check if this texture needs an update + if (texture->m_usage == TextureUsage::NORMAL) { + texture->m_on_check_for_updates(); + } + if (texture->m_update_requested) { + any_update_required = true; + } + } + // Only start recording and submitting a command buffer on transfer queue if any update is required + if (any_update_required) { + m_device.execute("[RenderGraph::update_textures]", VK_QUEUE_GRAPHICS_BIT, DebugLabelColor::LIME, + [&](const CommandBuffer &cmd_buf) { + for (const auto &texture : m_textures) { + if (texture->m_update_requested) { + cmd_buf.set_suboperation_debug_name("[Texture|Destroy:" + texture->m_name + "]"); + texture->destroy(); + cmd_buf.set_suboperation_debug_name("[Texture|Create:" + texture->m_name + "]"); + texture->create(); + texture->update(cmd_buf); + } + } + }); + } + // NOTE: For the "else" case: We can't insert a debug label here telling us that there are no buffer updates + // required because that command itself would require a command buffer to be in recording state +} + +void RenderGraph::update_write_descriptor_sets() { + m_write_descriptor_sets.clear(); + // NOTE: We don't reserve memory for the vector because we don't know how many write descriptor sets will exist in + // total. Because we call update_descriptor_sets() only once during rendergraph compilation, this is not a problem. + for (const auto &descriptor : m_resource_descriptors) { + // Call descriptor set builder function for each descriptor + auto write_descriptor_sets = std::invoke(std::get<2>(descriptor), m_write_descriptor_set_builder); + // Store the results of the function that was called + std::move(write_descriptor_sets.begin(), write_descriptor_sets.end(), + std::back_inserter(m_write_descriptor_sets)); + } + // NOTE: We batch all descriptor set updates into one function call for optimal performance + vkUpdateDescriptorSets(m_device.device(), static_cast(m_write_descriptor_sets.size()), + m_write_descriptor_sets.data(), 0, nullptr); +} + +} // namespace inexor::vulkan_renderer::render_graph diff --git a/src/vulkan-renderer/render-graph/texture.cpp b/src/vulkan-renderer/render-graph/texture.cpp new file mode 100644 index 000000000..738b8489b --- /dev/null +++ b/src/vulkan-renderer/render-graph/texture.cpp @@ -0,0 +1,206 @@ +#include "inexor/vulkan-renderer/render-graph/texture.hpp" + +#include "inexor/vulkan-renderer/exception.hpp" +#include "inexor/vulkan-renderer/wrapper/commands/command_buffer.hpp" +#include "inexor/vulkan-renderer/wrapper/device.hpp" +#include "inexor/vulkan-renderer/wrapper/make_info.hpp" +#include "inexor/vulkan-renderer/wrapper/sampler.hpp" + +#include +#include + +namespace inexor::vulkan_renderer::render_graph { + +Texture::Texture(const Device &device, + std::string name, + const TextureUsage usage, + const VkFormat format, + const std::uint32_t width, + const std::uint32_t height, + const std::uint32_t channels, + const VkSampleCountFlagBits samples, + std::function on_check_for_updates) + : m_device(device), m_name(std::move(name)), m_usage(usage), m_format(format), m_width(width), m_height(height), + m_channels(channels), m_samples(samples), m_on_check_for_updates(std::move(on_check_for_updates)) { + if (m_name.empty()) { + throw std::invalid_argument("[Texture::Texture] Error: Parameter 'name' is empty!"); + } + m_image = std::make_shared(m_device, m_name); + + if (samples > VK_SAMPLE_COUNT_1_BIT) { + m_msaa_image = std::make_shared(m_device, m_name); + } +} + +Texture::Texture(Texture &&other) noexcept : m_device(other.m_device) { + // TODO: Implement me! +} + +Texture::~Texture() { + destroy(); +} + +void Texture::create() { + auto img_ci = wrapper::make_info({ + .imageType = VK_IMAGE_TYPE_2D, + .format = m_format, + .extent = + { + .width = m_width, + .height = m_height, + .depth = 1, + }, + .mipLevels = 1, + .arrayLayers = 1, + .samples = VK_SAMPLE_COUNT_1_BIT, + .tiling = VK_IMAGE_TILING_OPTIMAL, + .usage = [&]() -> VkImageUsageFlags { + switch (m_usage) { + case TextureUsage::NORMAL: { + return VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; + } + case TextureUsage::COLOR_ATTACHMENT: { + return VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; + } + default: { + // TextureUsage::DEPTH_STENCIL_BUFFER + return VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; + } + } + }(), + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, + }); + + const auto img_view_ci = wrapper::make_info({ + // NOTE: .image will be filled by the Image wrapper + .viewType = VK_IMAGE_VIEW_TYPE_2D, + .format = m_format, + .subresourceRange = + { + .aspectMask = [&]() -> VkImageAspectFlags { + switch (m_usage) { + case TextureUsage::DEPTH_ATTACHMENT: { + return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; + } + default: { + // TextureUsage::NORMAL and TextureUsage::BACK_BUFFER + return VK_IMAGE_ASPECT_COLOR_BIT; + } + } + }(), + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1, + }, + }); + + // Create the texture + m_image->create(img_ci, img_view_ci); + + // If MSAA is enabled, create the MSAA texture as well + if (m_samples > VK_SAMPLE_COUNT_1_BIT) { + // Just overwrite the sample count and re-use the image create info + img_ci.samples = m_samples; + m_msaa_image->create(img_ci, img_view_ci); + } +} + +void Texture::destroy() { + m_image->destroy(); + if (m_msaa_image) { + m_msaa_image->destroy(); + } + destroy_staging_buffer(); +} + +void Texture::destroy_staging_buffer() { + vmaDestroyBuffer(m_device.allocator(), m_staging_buffer, m_staging_buffer_alloc); + m_staging_buffer = VK_NULL_HANDLE; + m_staging_buffer_alloc = VK_NULL_HANDLE; +} + +void Texture::update(const CommandBuffer &cmd_buf) { + if (m_src_texture_data_size == 0) { + // We can't create buffers of size 0! + return; + } + if (m_staging_buffer != VK_NULL_HANDLE) { + destroy_staging_buffer(); + } + const auto staging_buffer_ci = wrapper::make_info({ + .size = m_width * m_height * m_channels, + .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + }); + + const VmaAllocationCreateInfo staging_buffer_alloc_ci{ + .flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT, + .usage = VMA_MEMORY_USAGE_AUTO, + }; + + // Create a staging buffer for uploading the texture data + if (const auto result = vmaCreateBuffer(m_device.allocator(), &staging_buffer_ci, &staging_buffer_alloc_ci, + &m_staging_buffer, &m_staging_buffer_alloc, &m_staging_buffer_alloc_info); + result != VK_SUCCESS) { + throw VulkanException("Error: vmaCreateBuffer failed for staging buffer " + m_name + "!", result); + } + + // Copy the texture data into the staging buffer + std::memcpy(m_staging_buffer_alloc_info.pMappedData, m_src_texture_data, m_src_texture_data_size); + + // After copying the data, we need to flush caches + // NOTE: vmaFlushAllocation checks internally if the memory is host coherent, in which case it don't flush + if (const auto result = vmaFlushAllocation(m_device.allocator(), m_staging_buffer_alloc, 0, VK_WHOLE_SIZE); + result != VK_SUCCESS) { + throw VulkanException("Error: vmaFlushAllocation failed for buffer " + m_name + " !", result); + } + + const std::string staging_buf_name = "staging:" + m_name; + // Set the buffer's internal debug name in Vulkan Memory Allocator (VMA) + vmaSetAllocationName(m_device.allocator(), m_staging_buffer_alloc, staging_buf_name.c_str()); + // Set the buffer's internal debug name through Vulkan debug utils + m_device.set_debug_name(m_staging_buffer, staging_buf_name); + + cmd_buf.insert_debug_label("[Texture::staging-update|" + m_name + "]", + wrapper::get_debug_label_color(wrapper::DebugLabelColor::ORANGE)); + + // TODO: Check on which queue the udpate is carried out and adjust the stages in the pipeline barrier accordingly + cmd_buf.pipeline_image_memory_barrier_before_copy_buffer_to_image(m_image->m_img) + .copy_buffer_to_image(m_staging_buffer, m_image) + .pipeline_image_memory_barrier_after_copy_buffer_to_image(m_image->m_img); + + // Update the descriptor image info + // TODO: Does this mean we can this in create() function, not on a per-frame basis? + m_descriptor_img_info = { + .sampler = m_image->m_sampler->m_sampler, + .imageView = m_image->m_img_view, + .imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + }; + + // The update is finished + m_update_requested = false; + m_src_texture_data = nullptr; + m_src_texture_data_size = 0; + + // NOTE: The staging buffer needs to stay valid until command buffer finished executing! + // It will be destroyed either in the destructor or the next time execute_update is called. + + // NOTE: Another option would have been to wrap each call to execute_update() into its own single time + // command buffer, which would increase the total number of command buffer submissions though. +} + +void Texture::request_update(void *src_texture_data, const std::size_t src_texture_data_size) { + if (src_texture_data == nullptr) { + throw std::invalid_argument("[Texture::request_update] Error: Parameter 'texture_src_data' is nullptr!"); + } + if (src_texture_data_size == 0) { + throw std::invalid_argument("[Texture::request_update] Error: Parameter 'src_texture_data_size' is 0!"); + } + m_update_requested = true; + m_src_texture_data = src_texture_data; + m_src_texture_data_size = src_texture_data_size; +} + +} // namespace inexor::vulkan_renderer::render_graph diff --git a/src/vulkan-renderer/render_graph.cpp b/src/vulkan-renderer/render_graph.cpp deleted file mode 100644 index 8138aeaf7..000000000 --- a/src/vulkan-renderer/render_graph.cpp +++ /dev/null @@ -1,555 +0,0 @@ -#include "inexor/vulkan-renderer/render_graph.hpp" - -#include "inexor/vulkan-renderer/exception.hpp" -#include "inexor/vulkan-renderer/wrapper/command_buffer.hpp" -#include "inexor/vulkan-renderer/wrapper/make_info.hpp" -#include "inexor/vulkan-renderer/wrapper/shader.hpp" - -#include -#include -#include - -#include -#include -#include -#include -#include - -namespace inexor::vulkan_renderer { - -void BufferResource::add_vertex_attribute(VkFormat format, std::uint32_t offset) { - m_vertex_attributes.push_back({ - .location = static_cast(m_vertex_attributes.size()), - .format = format, - .offset = offset, - }); -} - -void RenderStage::writes_to(const RenderResource *resource) { - m_writes.push_back(resource); -} - -void RenderStage::reads_from(const RenderResource *resource) { - m_reads.push_back(resource); -} - -void GraphicsStage::bind_buffer(const BufferResource *buffer, const std::uint32_t binding) { - m_buffer_bindings.emplace(buffer, binding); -} - -void GraphicsStage::uses_shader(const wrapper::Shader &shader) { - m_shaders.push_back(wrapper::make_info({ - .stage = shader.type(), - .module = shader.module(), - .pName = shader.entry_point().c_str(), - })); -} - -PhysicalBuffer::~PhysicalBuffer() { - vmaDestroyBuffer(m_device.allocator(), m_buffer, m_allocation); -} - -PhysicalImage::~PhysicalImage() { - vkDestroyImageView(m_device.device(), m_image_view, nullptr); - vmaDestroyImage(m_device.allocator(), m_image, m_allocation); -} - -PhysicalStage::~PhysicalStage() { - vkDestroyPipeline(m_device.device(), m_pipeline, nullptr); - vkDestroyPipelineLayout(m_device.device(), m_pipeline_layout, nullptr); -} - -PhysicalGraphicsStage::~PhysicalGraphicsStage() { - vkDestroyRenderPass(m_device.device(), m_render_pass, nullptr); -} - -void RenderGraph::build_buffer(const BufferResource &buffer_resource, PhysicalBuffer &physical) const { - // TODO: Don't always create mapped. - const VmaAllocationCreateInfo alloc_ci{ - .flags = VMA_ALLOCATION_CREATE_MAPPED_BIT, - .usage = VMA_MEMORY_USAGE_CPU_TO_GPU, - }; - - auto buffer_ci = wrapper::make_info({ - .size = buffer_resource.m_data_size, - .sharingMode = VK_SHARING_MODE_EXCLUSIVE, - }); - - switch (buffer_resource.m_usage) { - case BufferUsage::INDEX_BUFFER: - buffer_ci.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; - break; - case BufferUsage::VERTEX_BUFFER: - buffer_ci.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; - break; - default: - assert(false); - } - - if (const auto result = vmaCreateBuffer(m_device.allocator(), &buffer_ci, &alloc_ci, &physical.m_buffer, - &physical.m_allocation, &physical.m_alloc_info); - result != VK_SUCCESS) { - throw VulkanException("Failed to create buffer!", result); - } - - // TODO: Use a better naming system for memory resources inside of rendergraph - vmaSetAllocationName(m_device.allocator(), physical.m_allocation, "rendergraph buffer"); -} - -void RenderGraph::build_image(const TextureResource &texture_resource, PhysicalImage &physical, - VmaAllocationCreateInfo *alloc_ci) const { - const auto image_ci = wrapper::make_info({ - .imageType = VK_IMAGE_TYPE_2D, - .format = texture_resource.m_format, - .extent{ - // TODO: Support textures with dimensions not equal to back buffer size. - .width = m_swapchain.extent().width, - .height = m_swapchain.extent().height, - .depth = 1, - }, - .mipLevels = 1, - .arrayLayers = 1, - .samples = VK_SAMPLE_COUNT_1_BIT, - .tiling = VK_IMAGE_TILING_OPTIMAL, - .usage = texture_resource.m_usage == TextureUsage::DEPTH_STENCIL_BUFFER - ? static_cast(VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) - : static_cast(VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT), - .sharingMode = VK_SHARING_MODE_EXCLUSIVE, - .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, - }); - - VmaAllocationInfo alloc_info; - // TODO: Assign proper name to this image inside of rendergraph - if (const auto result = vmaCreateImage(m_device.allocator(), &image_ci, alloc_ci, &physical.m_image, - &physical.m_allocation, &alloc_info); - result != VK_SUCCESS) { - throw VulkanException("Error: vkCreateImage failed for rendergraph image", result); - } - - // TODO: Use a better naming system for memory resources inside of rendergraph - vmaSetAllocationName(m_device.allocator(), physical.m_allocation, "rendergraph image"); -} - -void RenderGraph::build_image_view(const TextureResource &texture_resource, PhysicalImage &physical) const { - const auto image_view_ci = wrapper::make_info({ - .image = physical.m_image, - .viewType = VK_IMAGE_VIEW_TYPE_2D, - .format = texture_resource.m_format, - .subresourceRange{ - .aspectMask = static_cast(texture_resource.m_usage == TextureUsage::DEPTH_STENCIL_BUFFER - ? VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT - : VK_IMAGE_ASPECT_COLOR_BIT), - .levelCount = 1, - .layerCount = 1, - }, - }); - - if (const auto result = vkCreateImageView(m_device.device(), &image_view_ci, nullptr, &physical.m_image_view); - result != VK_SUCCESS) { - throw VulkanException("Error: vkCreateImageView failed for image view " + texture_resource.m_name + "!", - result); - } -} - -void RenderGraph::build_pipeline_layout(const RenderStage *stage, PhysicalStage &physical) const { - const auto pipeline_layout_ci = wrapper::make_info({ - .setLayoutCount = static_cast(stage->m_descriptor_layouts.size()), - .pSetLayouts = stage->m_descriptor_layouts.data(), - .pushConstantRangeCount = static_cast(stage->m_push_constant_ranges.size()), - .pPushConstantRanges = stage->m_push_constant_ranges.data(), - }); - - if (const auto result = - vkCreatePipelineLayout(m_device.device(), &pipeline_layout_ci, nullptr, &physical.m_pipeline_layout); - result != VK_SUCCESS) { - throw VulkanException("Error: vkCreatePipelineLayout failed for pipeline layout " + stage->name() + "!", - result); - } -} - -void RenderGraph::record_command_buffer(const RenderStage *stage, const wrapper::CommandBuffer &cmd_buf, - const std::uint32_t image_index) const { - const PhysicalStage &physical = *stage->m_physical; - - // Record render pass for graphics stages. - const auto *graphics_stage = stage->as(); - if (graphics_stage != nullptr) { - const auto *phys_graphics_stage = physical.as(); - assert(phys_graphics_stage != nullptr); - - std::array clear_values{}; - if (graphics_stage->m_clears_screen) { - clear_values[0].color = {0, 0, 0, 0}; - clear_values[1].depthStencil = {1.0f, 0}; - } - - cmd_buf.begin_render_pass(wrapper::make_info({ - .renderPass = phys_graphics_stage->m_render_pass, - .framebuffer = phys_graphics_stage->m_framebuffers.at(image_index).get(), - .renderArea{ - .extent = m_swapchain.extent(), - }, - .clearValueCount = static_cast(clear_values.size()), - .pClearValues = clear_values.data(), - })); - } - - std::vector vertex_buffers; - for (const auto *resource : stage->m_reads) { - const auto *buffer_resource = resource->as(); - if (buffer_resource == nullptr) { - continue; - } - - auto *physical_buffer = buffer_resource->m_physical->as(); - if (physical_buffer->m_buffer == nullptr) { - continue; - } - if (buffer_resource->m_usage == BufferUsage::INDEX_BUFFER) { - cmd_buf.bind_index_buffer(physical_buffer->m_buffer); - } else if (buffer_resource->m_usage == BufferUsage::VERTEX_BUFFER) { - vertex_buffers.push_back(physical_buffer->m_buffer); - } - } - - if (!vertex_buffers.empty()) { - cmd_buf.bind_vertex_buffers(vertex_buffers); - } - - cmd_buf.bind_pipeline(physical.m_pipeline); - stage->m_on_record(physical, cmd_buf); - - if (graphics_stage != nullptr) { - cmd_buf.end_render_pass(); - } - - // TODO: Find a more performant solution instead of placing a full memory barrier after each stage! - cmd_buf.full_barrier(); -} - -void RenderGraph::build_render_pass(const GraphicsStage *stage, PhysicalGraphicsStage &physical) const { - std::vector attachments; - std::vector colour_refs; - std::vector depth_refs; - - // Build vulkan attachments. For every texture resource that stage writes to, we create a corresponding - // VkAttachmentDescription and attach it to the render pass. - // TODO(GH-203): Support multisampled attachments. - // TODO: Use range-based for loop initialization statements when we switch to C++ 20. - for (std::size_t i = 0; i < stage->m_writes.size(); i++) { - const auto *resource = stage->m_writes[i]; - const auto *texture = resource->as(); - if (texture == nullptr) { - continue; - } - - VkAttachmentDescription attachment{ - .format = texture->m_format, - .samples = VK_SAMPLE_COUNT_1_BIT, - .loadOp = stage->m_clears_screen ? VK_ATTACHMENT_LOAD_OP_CLEAR : VK_ATTACHMENT_LOAD_OP_DONT_CARE, - .storeOp = VK_ATTACHMENT_STORE_OP_STORE, - .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE, - .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE, - .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, - }; - - switch (texture->m_usage) { - case TextureUsage::BACK_BUFFER: - if (!stage->m_clears_screen) { - attachment.initialLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; - attachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; - } - attachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; - colour_refs.push_back({static_cast(i), VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}); - break; - case TextureUsage::DEPTH_STENCIL_BUFFER: - attachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; - depth_refs.push_back({static_cast(i), attachment.finalLayout}); - break; - default: - attachment.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; - colour_refs.push_back({static_cast(i), attachment.finalLayout}); - break; - } - attachments.push_back(attachment); - } - - // Build a simple subpass that just waits for the output colour vector to be written by the fragment shader. In the - // future, we may want to make use of subpasses more. - const VkSubpassDependency subpass_dependency{ - .srcSubpass = VK_SUBPASS_EXTERNAL, - .srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, - .dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, - .dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, - }; - - const VkSubpassDescription subpass_description{ - .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS, - .colorAttachmentCount = static_cast(colour_refs.size()), - .pColorAttachments = colour_refs.data(), - .pDepthStencilAttachment = !depth_refs.empty() ? depth_refs.data() : nullptr, - }; - - const auto render_pass_ci = wrapper::make_info({ - .attachmentCount = static_cast(attachments.size()), - .pAttachments = attachments.data(), - .subpassCount = 1, - .pSubpasses = &subpass_description, - .dependencyCount = 1, - .pDependencies = &subpass_dependency, - }); - - if (const auto result = vkCreateRenderPass(m_device.device(), &render_pass_ci, nullptr, &physical.m_render_pass); - result != VK_SUCCESS) { - throw VulkanException("Error: vkCreateRenderPass failed for renderpass " + stage->name() + " !", result); - } -} - -void RenderGraph::build_graphics_pipeline(const GraphicsStage *stage, PhysicalGraphicsStage &physical) const { - // Build buffer and vertex layout bindings. For every buffer resource that stage reads from, we create a - // corresponding attribute binding and vertex binding description. - std::vector attribute_bindings; - std::vector vertex_bindings; - for (const auto *resource : stage->m_reads) { - const auto *buffer_resource = resource->as(); - if (buffer_resource == nullptr) { - continue; - } - - // Don't mess with index buffers here. - if (buffer_resource->m_usage == BufferUsage::INDEX_BUFFER) { - continue; - } - - // We use std::unordered_map::at() here to ensure that a binding value exists for buffer_resource. - const std::uint32_t binding = stage->m_buffer_bindings.at(buffer_resource); - for (auto attribute_binding : buffer_resource->m_vertex_attributes) { - attribute_binding.binding = binding; - attribute_bindings.push_back(attribute_binding); - } - - vertex_bindings.push_back({ - .binding = binding, - .stride = static_cast(buffer_resource->m_element_size), - .inputRate = VK_VERTEX_INPUT_RATE_VERTEX, - }); - } - - const auto vertex_input = wrapper::make_info({ - .vertexBindingDescriptionCount = static_cast(vertex_bindings.size()), - .pVertexBindingDescriptions = vertex_bindings.data(), - .vertexAttributeDescriptionCount = static_cast(attribute_bindings.size()), - .pVertexAttributeDescriptions = attribute_bindings.data(), - }); - - // TODO: Support primitives other than triangles. - const auto input_assembly = wrapper::make_info({ - .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, - .primitiveRestartEnable = VK_FALSE, - }); - - // TODO: Also allow depth compare func to be changed? - const auto depth_stencil = wrapper::make_info({ - .depthTestEnable = stage->m_depth_test ? VK_TRUE : VK_FALSE, - .depthWriteEnable = stage->m_depth_write ? VK_TRUE : VK_FALSE, - .depthCompareOp = VK_COMPARE_OP_LESS_OR_EQUAL, - }); - - // TODO: Allow culling to be disabled. - // TODO: Wireframe rendering. - const auto rasterization_state = wrapper::make_info({ - .polygonMode = VK_POLYGON_MODE_FILL, - .cullMode = VK_CULL_MODE_BACK_BIT, - .frontFace = VK_FRONT_FACE_CLOCKWISE, - .lineWidth = 1.0f, - }); - - // TODO(GH-203): Support multisampling again. - const auto multisample_state = wrapper::make_info({ - .rasterizationSamples = VK_SAMPLE_COUNT_1_BIT, - .minSampleShading = 1.0f, - }); - - auto blend_attachment = stage->m_blend_attachment; - blend_attachment.colorWriteMask = - VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; - - const auto blend_state = wrapper::make_info({ - .attachmentCount = 1, - .pAttachments = &blend_attachment, - }); - - const VkRect2D scissor{ - .extent = m_swapchain.extent(), - }; - - const VkViewport viewport{ - .width = static_cast(m_swapchain.extent().width), - .height = static_cast(m_swapchain.extent().height), - .maxDepth = 1.0f, - }; - - // TODO: Custom scissors? - const auto viewport_state = wrapper::make_info({ - .viewportCount = 1, - .pViewports = &viewport, - .scissorCount = 1, - .pScissors = &scissor, - }); - - const auto pipeline_ci = wrapper::make_info({ - .stageCount = static_cast(stage->m_shaders.size()), - .pStages = stage->m_shaders.data(), - .pVertexInputState = &vertex_input, - .pInputAssemblyState = &input_assembly, - .pViewportState = &viewport_state, - .pRasterizationState = &rasterization_state, - .pMultisampleState = &multisample_state, - .pDepthStencilState = &depth_stencil, - .pColorBlendState = &blend_state, - .layout = physical.m_pipeline_layout, - .renderPass = physical.m_render_pass, - }); - - // TODO: Pipeline caching (basically load the render graph from a file) - if (const auto result = - vkCreateGraphicsPipelines(m_device.device(), nullptr, 1, &pipeline_ci, nullptr, &physical.m_pipeline); - result != VK_SUCCESS) { - throw VulkanException("Error: vkCreateGraphicsPipelines failed for pipeline " + stage->name() + " !", result); - } -} - -void RenderGraph::compile(const RenderResource *target) { - // TODO(GH-204): Better logging and input validation. - // TODO: Many opportunities for optimisation. - - // Build a simple helper map to lookup a resource's writers. - std::unordered_map> writers; - for (auto &stage : m_stages) { - for (const auto *resource : stage->m_writes) { - writers[resource].push_back(stage.get()); - } - } - - // Post order depth first search. Note that this doesn't do any colouring, so it only works on acyclic graphs. - // TODO(GH-204): Stage graph validation (ensuring no cycles, etc.). - // TODO: Move away from recursive dfs algo. - std::function dfs = [&](RenderStage *stage) { - for (const auto *resource : stage->m_reads) { - for (auto *writer : writers[resource]) { - dfs(writer); - } - } - m_stage_stack.push_back(stage); - }; - - // DFS starting from writers of target (initial stage executants). - for (auto *stage : writers[target]) { - dfs(stage); - } - - m_log->trace("Final stage order:"); - for (auto *stage : m_stage_stack) { - m_log->trace(" - {}", stage->m_name); - } - - // Create physical resources. For now, each buffer or texture resource maps directly to either a VkBuffer or VkImage - // respectively. Every physical resource also has a VmaAllocation. - // TODO: Resource aliasing (i.e. reusing the same physical resource for multiple resources). - m_log->trace("Allocating physical resource for buffers:"); - - for (auto &buffer_resource : m_buffer_resources) { - m_log->trace(" - {}", buffer_resource->m_name); - buffer_resource->m_physical = std::make_shared(m_device); - } - - m_log->trace("Allocating physical resource for texture:"); - - for (auto &texture_resource : m_texture_resources) { - m_log->trace(" - {}", texture_resource->m_name); - // Back buffer gets special handling. - if (texture_resource->m_usage == TextureUsage::BACK_BUFFER) { - // TODO: Move image views from wrapper::Swapchain to PhysicalBackBuffer. - texture_resource->m_physical = std::make_shared(m_device, m_swapchain); - continue; - } - - // TODO: Use a constexpr bool. - VmaAllocationCreateInfo alloc_ci{}; - alloc_ci.usage = VMA_MEMORY_USAGE_GPU_ONLY; - - auto physical = std::make_shared(m_device); - texture_resource->m_physical = physical; - build_image(*texture_resource, *physical, &alloc_ci); - build_image_view(*texture_resource, *physical); - } - - // Create physical stages. Each render stage maps to a vulkan pipeline (either compute or graphics) and a list of - // command buffers. Each graphics stage also maps to a vulkan render pass. - for (auto *stage : m_stage_stack) { - if (auto *graphics_stage = stage->as()) { - auto physical_ptr = std::make_unique(m_device); - auto &physical = *physical_ptr; - graphics_stage->m_physical = std::move(physical_ptr); - - build_render_pass(graphics_stage, physical); - build_pipeline_layout(graphics_stage, physical); - build_graphics_pipeline(graphics_stage, physical); - - // If we write to at least one texture, we need to make framebuffers. - if (!stage->m_writes.empty()) { - // For every texture that this stage writes to, we need to attach it to the framebuffer. - std::vector back_buffers; - std::vector images; - for (const auto *resource : stage->m_writes) { - if (const auto *texture = resource->as()) { - const auto &physical_texture = *texture->m_physical; - if (const auto *back_buffer = physical_texture.as()) { - back_buffers.push_back(back_buffer); - } else if (const auto *image = physical_texture.as()) { - images.push_back(image); - } - } - } - - std::vector image_views; - image_views.reserve(back_buffers.size() + images.size()); - for (auto *const img_view : m_swapchain.image_views()) { - std::fill_n(std::back_inserter(image_views), back_buffers.size(), img_view); - for (const auto *image : images) { - image_views.push_back(image->m_image_view); - } - physical.m_framebuffers.emplace_back(m_device, physical.m_render_pass, image_views, m_swapchain, - "Framebuffer"); - image_views.clear(); - } - } - } - } -} - -void RenderGraph::render(const std::uint32_t image_index, const wrapper::CommandBuffer &cmd_buf) { - // Update dynamic buffers. - for (auto &buffer_resource : m_buffer_resources) { - if (buffer_resource->m_data_upload_needed) { - auto &physical = *buffer_resource->m_physical->as(); - - if (physical.m_buffer != nullptr) { - vmaDestroyBuffer(m_device.allocator(), physical.m_buffer, physical.m_allocation); - } - - build_buffer(*buffer_resource, physical); - - // Upload new data. - assert(physical.m_alloc_info.pMappedData != nullptr); - std::memcpy(physical.m_alloc_info.pMappedData, buffer_resource->m_data, buffer_resource->m_data_size); - buffer_resource->m_data_upload_needed = false; - } - } - - for (const auto &stage : m_stage_stack) { - record_command_buffer(stage, cmd_buf, image_index); - } -} - -} // namespace inexor::vulkan_renderer diff --git a/src/vulkan-renderer/renderer.cpp b/src/vulkan-renderer/renderer.cpp index e3d5a0fb3..5f282702b 100644 --- a/src/vulkan-renderer/renderer.cpp +++ b/src/vulkan-renderer/renderer.cpp @@ -1,143 +1 @@ -#include "inexor/vulkan-renderer/renderer.hpp" - -#include "inexor/vulkan-renderer/exception.hpp" -#include "inexor/vulkan-renderer/standard_ubo.hpp" -#include "inexor/vulkan-renderer/wrapper/make_info.hpp" - -namespace inexor::vulkan_renderer { - -void VulkanRenderer::setup_render_graph() { - m_back_buffer = m_render_graph->add("back buffer", TextureUsage::BACK_BUFFER); - m_back_buffer->set_format(m_swapchain->image_format()); - - auto *depth_buffer = m_render_graph->add("depth buffer", TextureUsage::DEPTH_STENCIL_BUFFER); - depth_buffer->set_format(VK_FORMAT_D32_SFLOAT_S8_UINT); - - m_index_buffer = m_render_graph->add("index buffer", BufferUsage::INDEX_BUFFER); - m_index_buffer->upload_data(m_octree_indices); - - m_vertex_buffer = m_render_graph->add("vertex buffer", BufferUsage::VERTEX_BUFFER); - m_vertex_buffer->add_vertex_attribute(VK_FORMAT_R32G32B32_SFLOAT, offsetof(OctreeGpuVertex, position)); // NOLINT - m_vertex_buffer->add_vertex_attribute(VK_FORMAT_R32G32B32_SFLOAT, offsetof(OctreeGpuVertex, color)); // NOLINT - m_vertex_buffer->upload_data(m_octree_vertices); - - auto *main_stage = m_render_graph->add("main stage"); - main_stage->writes_to(m_back_buffer); - main_stage->writes_to(depth_buffer); - main_stage->reads_from(m_index_buffer); - main_stage->reads_from(m_vertex_buffer); - main_stage->bind_buffer(m_vertex_buffer, 0); - main_stage->set_clears_screen(true); - main_stage->set_depth_options(true, true); - main_stage->set_on_record([&](const PhysicalStage &physical, const wrapper::CommandBuffer &cmd_buf) { - cmd_buf.bind_descriptor_sets(m_descriptors[0].descriptor_sets(), physical.pipeline_layout()); - cmd_buf.draw_indexed(static_cast(m_octree_indices.size())); - }); - - for (const auto &shader : m_shaders) { - main_stage->uses_shader(shader); - } - - main_stage->add_descriptor_layout(m_descriptors[0].descriptor_set_layout()); -} - -void VulkanRenderer::generate_octree_indices() { - auto old_vertices = std::move(m_octree_vertices); - m_octree_indices.clear(); - m_octree_vertices.clear(); - std::unordered_map vertex_map; - for (auto &vertex : old_vertices) { - // TODO: Use std::unordered_map::contains() when we switch to C++ 20. - if (vertex_map.count(vertex) == 0) { - assert(vertex_map.size() < std::numeric_limits::max() && "Octree too big!"); - vertex_map.emplace(vertex, static_cast(vertex_map.size())); - m_octree_vertices.push_back(vertex); - } - m_octree_indices.push_back(vertex_map.at(vertex)); - } - spdlog::trace("Reduced octree by {} vertices (from {} to {})", old_vertices.size() - m_octree_vertices.size(), - old_vertices.size(), m_octree_vertices.size()); - spdlog::trace("Total indices {} ", m_octree_indices.size()); -} - -void VulkanRenderer::recreate_swapchain() { - m_window->wait_for_focus(); - m_device->wait_idle(); - - // Query the framebuffer size here again although the window width is set during framebuffer resize callback - // The reason for this is that the framebuffer size could already be different again because we missed a poll - // This seems to be an issue on Linux only though - int window_width = 0; - int window_height = 0; - glfwGetFramebufferSize(m_window->get(), &window_width, &window_height); - - // TODO: This is quite naive, we don't need to recompile the whole render graph on swapchain invalidation. - m_render_graph.reset(); - // Recreate the swapchain - m_swapchain->setup_swapchain(window_width, window_height, m_vsync_enabled); - m_render_graph = std::make_unique(*m_device, *m_swapchain); - setup_render_graph(); - - m_camera = std::make_unique(glm::vec3(6.0f, 10.0f, 2.0f), 180.0f, 0.0f, - static_cast(m_window->width()), static_cast(m_window->height())); - - m_camera->set_movement_speed(5.0f); - m_camera->set_rotation_speed(0.5f); - - m_imgui_overlay.reset(); - m_imgui_overlay = std::make_unique(*m_device, *m_swapchain, m_render_graph.get(), m_back_buffer); - m_render_graph->compile(m_back_buffer); -} - -void VulkanRenderer::render_frame() { - if (m_window_resized) { - m_window_resized = false; - recreate_swapchain(); - return; - } - - const auto image_index = m_swapchain->acquire_next_image_index(); - const auto &cmd_buf = m_device->request_command_buffer("rendergraph"); - - m_render_graph->render(image_index, cmd_buf); - - const std::array stage_mask{VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT}; - - cmd_buf.submit_and_wait(wrapper::make_info({ - .waitSemaphoreCount = 1, - .pWaitSemaphores = m_swapchain->image_available_semaphore(), - .pWaitDstStageMask = stage_mask.data(), - .commandBufferCount = 1, - .pCommandBuffers = cmd_buf.ptr(), - })); - - m_swapchain->present(image_index); - - if (auto fps_value = m_fps_counter.update()) { - m_window->set_title("Inexor Vulkan API renderer demo - " + std::to_string(*fps_value) + " FPS"); - spdlog::trace("FPS: {}, window size: {} x {}", *fps_value, m_window->width(), m_window->height()); - } -} - -VulkanRenderer::~VulkanRenderer() { - spdlog::trace("Shutting down vulkan renderer"); - - if (m_device == nullptr) { - return; - } - - m_device->wait_idle(); - - if (!m_debug_report_callback_initialised) { - return; - } - - // TODO(): Is there a better way to do this? Maybe add a helper function to wrapper::Instance? - auto vk_destroy_debug_report_callback = reinterpret_cast( // NOLINT - vkGetInstanceProcAddr(m_instance->instance(), "vkDestroyDebugReportCallbackEXT")); - if (vk_destroy_debug_report_callback != nullptr) { - vk_destroy_debug_report_callback(m_instance->instance(), m_debug_report_callback, nullptr); - } -} - -} // namespace inexor::vulkan_renderer + \ No newline at end of file diff --git a/src/vulkan-renderer/renderers/imgui.cpp b/src/vulkan-renderer/renderers/imgui.cpp new file mode 100644 index 000000000..7fe1cc97c --- /dev/null +++ b/src/vulkan-renderer/renderers/imgui.cpp @@ -0,0 +1,236 @@ +#include "inexor/vulkan-renderer/renderers/imgui.hpp" + +#include "inexor/vulkan-renderer/render-graph/graphics_pass_builder.hpp" +#include "inexor/vulkan-renderer/render-graph/render_graph.hpp" +#include "inexor/vulkan-renderer/wrapper/make_info.hpp" +#include "inexor/vulkan-renderer/wrapper/shader.hpp" +#include "inexor/vulkan-renderer/wrapper/swapchain.hpp" + +#include +#include +#include + +namespace inexor::vulkan_renderer::renderers { + +ImGuiRenderer::ImGuiRenderer(const Device &device, + std::weak_ptr render_graph, + std::weak_ptr previous_pass, + std::weak_ptr swapchain, + std::function on_update_user_data) + : m_on_update_user_data(std::move(on_update_user_data)), m_previous_pass(std::move(previous_pass)), + m_swapchain(std::move(swapchain)) { + + if (render_graph.expired()) { + throw std::invalid_argument( + "[ImGuiRenderer::ImGuiRenderer] Error: Parameter 'render_graph' is an invalid pointer!"); + } + if (m_swapchain.expired()) { + throw std::invalid_argument( + "[ImGuiRenderer::ImGuiRenderer] Error: Parameter 'm_swapchain' is an invalid pointer!"); + } + // NOTE: It's valid for previous_pass to be an invalid pointer (in that case there is no previous pass!) + + spdlog::trace("Creating ImGui context"); + m_imgui_context = ImGui::CreateContext(); + ImGui::SetCurrentContext(m_imgui_context); + + spdlog::trace("Loading ImGui font texture"); + load_font_data_from_file(); + + spdlog::trace("Setting ImGui style"); + set_imgui_style(); + + auto graph = render_graph.lock(); + auto target_swapchain = m_swapchain.lock(); + + using render_graph::BufferType; + m_vertex_buffer = graph->add_buffer("ImGui|Vertex", BufferType::VERTEX_BUFFER, [&]() { + m_on_update_user_data(); + const ImDrawData *draw_data = ImGui::GetDrawData(); + + m_index_data.clear(); + m_vertex_data.clear(); + // We need to collect the vertices and indices generated by ImGui + // because it does not store them in one array, but rather in chunks (command lists) + for (std::size_t i = 0; i < draw_data->CmdListsCount; i++) { + const ImDrawList *cmd_list = draw_data->CmdLists[i]; // NOLINT + for (std::size_t j = 0; j < cmd_list->IdxBuffer.Size; j++) { + m_index_data.push_back(cmd_list->IdxBuffer.Data[j]); // NOLINT + } + for (std::size_t j = 0; j < cmd_list->VtxBuffer.Size; j++) { + m_vertex_data.push_back(cmd_list->VtxBuffer.Data[j]); // NOLINT + } + } + if (m_vertex_data.size() > 0) { + // Request rendergraph to do an update of the vertex buffer + m_vertex_buffer.lock()->request_update(m_vertex_data); + } + }); + + m_index_buffer = graph->add_buffer("ImGui|Index", BufferType::INDEX_BUFFER, [&]() { + if (m_index_data.size() > 0) { + // Request rendergraph to do an update of the index buffer + m_index_buffer.lock()->request_update(m_index_data); + } + }); + + m_vertex_shader = + std::make_shared(device, "ImGui", VK_SHADER_STAGE_VERTEX_BIT, "shaders/ui.vert.spv"); + m_fragment_shader = + std::make_shared(device, "ImGui", VK_SHADER_STAGE_FRAGMENT_BIT, "shaders/ui.frag.spv"); + + m_imgui_texture = graph->add_texture("ImGui-Font", render_graph::TextureUsage::NORMAL, VK_FORMAT_R8G8B8A8_UNORM, + m_font_texture_width, m_font_texture_height, 4, VK_SAMPLE_COUNT_1_BIT, [&]() { + // Initialize the ImGui font texture only once in the update function + if (!m_font_texture_initialized) { + m_imgui_texture.lock()->request_update(m_font_texture_data, + m_font_texture_data_size); + m_font_texture_initialized = true; + } + }); + + graph->add_resource_descriptor( + [&](wrapper::descriptors::DescriptorSetLayoutBuilder &builder) { + m_descriptor_set_layout = builder.add_combined_image_sampler(VK_SHADER_STAGE_FRAGMENT_BIT).build("ImGui"); + }, + [&](wrapper::descriptors::DescriptorSetAllocator &allocator) { + m_descriptor_set = allocator.allocate("ImGui", m_descriptor_set_layout); + }, + [&](wrapper::descriptors::WriteDescriptorSetBuilder &builder) -> std::vector { + return builder.add_combined_image_sampler_update(m_descriptor_set, m_imgui_texture).build(); + }); + + graph->add_graphics_pipeline([&](wrapper::pipelines::GraphicsPipelineBuilder &builder) { + m_imgui_pipeline = + builder + .set_vertex_input_bindings({ + { + .binding = 0, + .stride = sizeof(ImDrawVert), + .inputRate = VK_VERTEX_INPUT_RATE_VERTEX, + }, + }) + .set_vertex_input_attributes({ + { + .location = 0, + .format = VK_FORMAT_R32G32_SFLOAT, + .offset = offsetof(ImDrawVert, pos), + }, + { + .location = 1, + .format = VK_FORMAT_R32G32_SFLOAT, + .offset = offsetof(ImDrawVert, uv), + }, + { + .location = 2, + .format = VK_FORMAT_R8G8B8A8_UNORM, + .offset = offsetof(ImDrawVert, col), + }, + }) + .add_default_color_blend_attachment() + .add_color_attachment_format(m_swapchain.lock()->image_format()) + .set_dynamic_states({ + VK_DYNAMIC_STATE_VIEWPORT, + VK_DYNAMIC_STATE_SCISSOR, + }) + // NOTE: Even thoguh viewport and scissor are dynamic states, we still need to specify 1 for each + .set_scissor(m_swapchain.lock()->extent()) + .set_viewport(m_swapchain.lock()->extent()) + .add_shader(m_vertex_shader) + .add_shader(m_fragment_shader) + .set_descriptor_set_layout(m_descriptor_set_layout) + .add_push_constant_range(VK_SHADER_STAGE_VERTEX_BIT, sizeof(m_push_const_block)) + .build("ImGui"); + return m_imgui_pipeline; + }); + + m_imgui_pass = graph->add_graphics_pass( + graph->get_graphics_pass_builder() + .writes_to(m_swapchain) + .conditionally_reads_from(m_previous_pass, !m_previous_pass.expired()) + .set_on_record([&](const wrapper::commands::CommandBuffer &cmd_buf) { + ImDrawData *draw_data = ImGui::GetDrawData(); + if (draw_data == nullptr || draw_data->TotalIdxCount == 0 || draw_data->TotalVtxCount == 0) { + m_on_update_user_data(); + return; + } + const ImGuiIO &io = ImGui::GetIO(); + m_push_const_block.scale = glm::vec2(2.0f / io.DisplaySize.x, 2.0f / io.DisplaySize.y); + + cmd_buf.bind_pipeline(m_imgui_pipeline) + .bind_descriptor_set(m_descriptor_set, m_imgui_pipeline) + .push_constant(m_imgui_pipeline, m_push_const_block, VK_SHADER_STAGE_VERTEX_BIT) + .bind_vertex_buffer(m_vertex_buffer) + .bind_index_buffer(m_index_buffer) + .set_viewport(VkViewport{.width = ImGui::GetIO().DisplaySize.x, + .height = ImGui::GetIO().DisplaySize.y, + .minDepth = 0.0f, + .maxDepth = 1.0f}); + + std::uint32_t index_offset = 0; + std::int32_t vertex_offset = 0; + for (std::size_t i = 0; i < draw_data->CmdListsCount; i++) { + const ImDrawList *cmd_list = draw_data->CmdLists[i]; + for (std::int32_t j = 0; j < cmd_list->CmdBuffer.Size; j++) { + const ImDrawCmd &draw_cmd = cmd_list->CmdBuffer[j]; + VkRect2D scissor{}; + scissor.offset.x = std::max((int32_t)(draw_cmd.ClipRect.x), 0); + scissor.offset.y = std::max((int32_t)(draw_cmd.ClipRect.y), 0); + scissor.extent.width = (uint32_t)(draw_cmd.ClipRect.z - draw_cmd.ClipRect.x); + scissor.extent.height = (uint32_t)(draw_cmd.ClipRect.w - draw_cmd.ClipRect.y); + cmd_buf.set_scissor(scissor); + cmd_buf.draw_indexed(draw_cmd.ElemCount, 1, index_offset, vertex_offset); + index_offset += draw_cmd.ElemCount; + } + vertex_offset += cmd_list->VtxBuffer.Size; + } + }) + .build("ImGui", render_graph::DebugLabelColor::BLUE)); +} + +ImGuiRenderer::ImGuiRenderer(ImGuiRenderer &&other) noexcept { + // TODO: Implement me! +} + +ImGuiRenderer::~ImGuiRenderer() { + ImGui::DestroyContext(m_imgui_context); + m_imgui_context = nullptr; +} + +void ImGuiRenderer::load_font_data_from_file() { + ImGuiIO &io = ImGui::GetIO(); + io.FontGlobalScale = 1.0f; + + // This is here because it doesn't need to be member data + constexpr const char *FONT_FILE_PATH = "assets/fonts/NotoSans-Bold.ttf"; + constexpr float FONT_SIZE = 18.0f; + + spdlog::trace("Loading front {} with size {}", FONT_FILE_PATH, FONT_SIZE); + ImFont *font = io.Fonts->AddFontFromFileTTF(FONT_FILE_PATH, FONT_SIZE); + io.Fonts->GetTexDataAsRGBA32(&m_font_texture_data, &m_font_texture_width, &m_font_texture_height); + + constexpr int FONT_TEXTURE_CHANNELS = 4; + m_font_texture_data_size = m_font_texture_width * m_font_texture_height * FONT_TEXTURE_CHANNELS; +} + +void ImGuiRenderer::set_imgui_style() { + ImGuiStyle &style = ImGui::GetStyle(); + style.Colors[ImGuiCol_TitleBg] = ImVec4(1.0f, 0.0f, 0.0f, 1.0f); + style.Colors[ImGuiCol_TitleBgActive] = ImVec4(1.0f, 0.0f, 0.0f, 1.0f); + style.Colors[ImGuiCol_TitleBgCollapsed] = ImVec4(1.0f, 0.0f, 0.0f, 0.1f); + style.Colors[ImGuiCol_MenuBarBg] = ImVec4(1.0f, 0.0f, 0.0f, 0.4f); + style.Colors[ImGuiCol_Header] = ImVec4(0.8f, 0.0f, 0.0f, 0.4f); + style.Colors[ImGuiCol_HeaderActive] = ImVec4(1.0f, 0.0f, 0.0f, 0.4f); + style.Colors[ImGuiCol_HeaderHovered] = ImVec4(1.0f, 0.0f, 0.0f, 0.4f); + style.Colors[ImGuiCol_FrameBg] = ImVec4(0.0f, 0.0f, 0.0f, 0.8f); + style.Colors[ImGuiCol_CheckMark] = ImVec4(1.0f, 0.0f, 0.0f, 0.8f); + style.Colors[ImGuiCol_SliderGrab] = ImVec4(1.0f, 0.0f, 0.0f, 0.4f); + style.Colors[ImGuiCol_SliderGrabActive] = ImVec4(1.0f, 0.0f, 0.0f, 0.8f); + style.Colors[ImGuiCol_FrameBgHovered] = ImVec4(1.0f, 1.0f, 1.0f, 0.1f); + style.Colors[ImGuiCol_FrameBgActive] = ImVec4(1.0f, 1.0f, 1.0f, 0.2f); + style.Colors[ImGuiCol_Button] = ImVec4(1.0f, 0.0f, 0.0f, 0.4f); + style.Colors[ImGuiCol_ButtonHovered] = ImVec4(1.0f, 0.0f, 0.0f, 0.6f); + style.Colors[ImGuiCol_ButtonActive] = ImVec4(1.0f, 0.0f, 0.0f, 0.8f); +} + +} // namespace inexor::vulkan_renderer::renderers diff --git a/src/vulkan-renderer/renderers/imgui_renderer.cpp b/src/vulkan-renderer/renderers/imgui_renderer.cpp new file mode 100644 index 000000000..794f7323e --- /dev/null +++ b/src/vulkan-renderer/renderers/imgui_renderer.cpp @@ -0,0 +1,200 @@ +#include "inexor/vulkan-renderer/renderer-components/imgui_renderer.hpp" + +#include + +namespace inexor::vulkan_renderer::renderer_components { + +ImGuiRenderer::ImGuiRenderer(const wrapper::Device &device, render_graph::RenderGraph *render_graph) + // Load the vertex shader and fragment shader for ImGui rendering + : m_vertex_shader(m_device, VK_SHADER_STAGE_VERTEX_BIT, "ImGUI", "shaders/ui.vert.spv"), + m_fragment_shader(m_device, VK_SHADER_STAGE_FRAGMENT_BIT, "ImGUI", "shaders/ui.frag.spv") { + + initialize_imgui(); + + spdlog::trace("Setting graphics stage for ImGui"); + + // Create the vertex and index buffer resource for ImGui in the rendergraph + m_vertex_buffer = render_graph->add_buffer(render_graph::BufferUsage::VERTEX_BUFFER, "ImGui"); + m_index_buffer = render_graph->add_buffer(render_graph::BufferUsage::INDEX_BUFFER, "ImGui"); + + // Give me the graphics stage builder + const auto &builder = render_graph->graphics_stage_builder(); + + // TODO: Abstract descriptor builder into rendergraph! + + // Create the graphics stage for ImGui + render_graph->add_stage( + builder.uses_shader(m_vertex_shader) + .uses_shader(m_fragment_shader) + .add_push_constant_block() + .reads_from(m_vertex_buffer) + .reads_from(m_index_buffer) + .writes_to(depth_buffer) + .writes_to(back_buffer) + .set_vertex_attribute_layout({ + {VK_FORMAT_R32G32_SFLOAT, offsetof(ImDrawVert, pos)}, + {VK_FORMAT_R32G32_SFLOAT, offsetof(ImDrawVert, uv)}, + {VK_FORMAT_R8G8B8A8_UNORM, offsetof(ImDrawVert, col)}, + }) + .set_on_record([&](const wrapper::CommandBuffer &cmd_buf) { + // Adjust the push constant block according to the window size + const ImGuiIO &io = ImGui::GetIO(); + m_push_const_block.scale = glm::vec2(2.0f / io.DisplaySize.x, 2.0f / io.DisplaySize.y); + m_push_const_block.translate = glm::vec2(-1.0f); + + // TODO: Move push constants into rendergraph? + cmd_buf.bind_descriptor_sets(m_descriptor->descriptor_sets(), physical.pipeline_layout()); + cmd_buf.push_constants(physical.pipeline_layout(), VK_SHADER_STAGE_VERTEX_BIT, sizeof(PushConstBlock), + &m_push_const_block); + + std::uint32_t index_offset = 0; + std::int32_t vertex_offset = 0; + for (std::size_t i = 0; i < m_imgui_draw_data->CmdListsCount; i++) { + const ImDrawList *cmd_list = m_imgui_draw_data->CmdLists[i]; // NOLINT + for (std::int32_t j = 0; j < cmd_list->CmdBuffer.Size; j++) { + const ImDrawCmd &draw_cmd = cmd_list->CmdBuffer[j]; + vkCmdDrawIndexed(cmd_buf.get(), draw_cmd.ElemCount, 1, index_offset, vertex_offset, 0); + index_offset += draw_cmd.ElemCount; + } + vertex_offset += cmd_list->VtxBuffer.Size; + } + }) + .set_on_update([&]() { + // + m_imgui_draw_data = ImGui::GetDrawData(); + if (m_imgui_draw_data == nullptr) { + return; + } + if (m_imgui_draw_data->TotalVtxCount != 0) { + m_update_vertices = true; + } + if (m_imgui_draw_data->TotalIdxCount != 0) { + m_update_indices = true; + } + + if (m_vertex_data.size() != imgui_draw_data->TotalVtxCount) { + m_vertex_data.clear(); + for (std::size_t i = 0; i < imgui_draw_data->CmdListsCount; i++) { + const ImDrawList *cmd_list = imgui_draw_data->CmdLists[i]; // NOLINT + for (std::size_t j = 0; j < cmd_list->VtxBuffer.Size; j++) { + m_vertex_data.push_back(cmd_list->VtxBuffer.Data[j]); // NOLINT + } + } + } + + if (m_index_data.size() != imgui_draw_data->TotalIdxCount) { + m_index_data.clear(); + for (std::size_t i = 0; i < imgui_draw_data->CmdListsCount; i++) { + const ImDrawList *cmd_list = imgui_draw_data->CmdLists[i]; // NOLINT + for (std::size_t j = 0; j < cmd_list->IdxBuffer.Size; j++) { + m_index_data.push_back(cmd_list->IdxBuffer.Data[j]); // NOLINT + } + } + } + }) + .build("ImGui")); +} + +void ImGuiRenderer::initialize_imgui() { + spdlog::trace("Creating ImGUI context"); + ImGui::CreateContext(); + + spdlog::trace("Setting ImGUI styles"); + ImGuiStyle &style = ImGui::GetStyle(); + style.Colors[ImGuiCol_TitleBg] = ImVec4(1.0f, 0.0f, 0.0f, 1.0f); + style.Colors[ImGuiCol_TitleBgActive] = ImVec4(1.0f, 0.0f, 0.0f, 1.0f); + style.Colors[ImGuiCol_TitleBgCollapsed] = ImVec4(1.0f, 0.0f, 0.0f, 0.1f); + style.Colors[ImGuiCol_MenuBarBg] = ImVec4(1.0f, 0.0f, 0.0f, 0.4f); + style.Colors[ImGuiCol_Header] = ImVec4(0.8f, 0.0f, 0.0f, 0.4f); + style.Colors[ImGuiCol_HeaderActive] = ImVec4(1.0f, 0.0f, 0.0f, 0.4f); + style.Colors[ImGuiCol_HeaderHovered] = ImVec4(1.0f, 0.0f, 0.0f, 0.4f); + style.Colors[ImGuiCol_FrameBg] = ImVec4(0.0f, 0.0f, 0.0f, 0.8f); + style.Colors[ImGuiCol_CheckMark] = ImVec4(1.0f, 0.0f, 0.0f, 0.8f); + style.Colors[ImGuiCol_SliderGrab] = ImVec4(1.0f, 0.0f, 0.0f, 0.4f); + style.Colors[ImGuiCol_SliderGrabActive] = ImVec4(1.0f, 0.0f, 0.0f, 0.8f); + style.Colors[ImGuiCol_FrameBgHovered] = ImVec4(1.0f, 1.0f, 1.0f, 0.1f); + style.Colors[ImGuiCol_FrameBgActive] = ImVec4(1.0f, 1.0f, 1.0f, 0.2f); + style.Colors[ImGuiCol_Button] = ImVec4(1.0f, 0.0f, 0.0f, 0.4f); + style.Colors[ImGuiCol_ButtonHovered] = ImVec4(1.0f, 0.0f, 0.0f, 0.6f); + style.Colors[ImGuiCol_ButtonActive] = ImVec4(1.0f, 0.0f, 0.0f, 0.8f); + + ImGuiIO &io = ImGui::GetIO(); + io.FontGlobalScale = 1.0f; + + constexpr const char *FONT_FILE_PATH = "assets/fonts/NotoSans-Bold.ttf"; + constexpr float FONT_SIZE = 18.0f; + spdlog::trace("Loading ImGui front {}", FONT_FILE_PATH); + + ImFont *font = io.Fonts->AddFontFromFileTTF(FONT_FILE_PATH, FONT_SIZE); + + unsigned char *font_texture_data{}; + int font_texture_width{0}; + int font_texture_height{0}; + io.Fonts->GetTexDataAsRGBA32(&font_texture_data, &font_texture_width, &font_texture_height); + + if (font == nullptr || font_texture_data == nullptr) { + spdlog::error("Unable to load font {}. Falling back to error texture", FONT_FILE_PATH); + m_imgui_texture = std::make_unique(m_device, wrapper::CpuTexture()); + } else { + spdlog::trace("Creating ImGUI font texture"); + + // Our font textures always have 4 channels and a single mip level by definition. + constexpr int FONT_TEXTURE_CHANNELS{4}; + constexpr int FONT_MIP_LEVELS{1}; + + VkDeviceSize upload_size = static_cast(font_texture_width) * + static_cast(font_texture_height) * + static_cast(FONT_TEXTURE_CHANNELS); + + m_imgui_texture = std::make_unique( + m_device, font_texture_data, upload_size, font_texture_width, font_texture_height, FONT_TEXTURE_CHANNELS, + FONT_MIP_LEVELS, "ImGUI font texture"); + } +} + +void ImGuiRenderer::setup_stage(render_graph::RenderGraph *render_graph, GraphicsStageBuilder &stage_builder, + DescriptorBuilder &descriptor_builder) { + + render_graph->add_graphics_stage( + stage_builder.uses_shader(m_vertex_shader) + .uses_shader(m_fragment_shader) + .reads_from(vertex_buffer) + .reads_from(index_buffer) + .bind_buffer(vertex_buffer, 0) + .writes_to(back_buffer) + .writes_to(depth_buffer) + .set_blend_attachment({ + .blendEnable = VK_TRUE, + .srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA, + .dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, + .colorBlendOp = VK_BLEND_OP_ADD, + .srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE, + .dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO, + .alphaBlendOp = VK_BLEND_OP_ADD, + }) + + .set_on_update([&] { + update_imgui(); // TODO: Can we use a class method as std::function and remove the lambda? + }) + .add_descriptor_set_layout( + // TODO: Further abstract descriptors? + descriptor_builder + .add_combined_image_sampler(m_imgui_texture->sampler(), m_imgui_texture->image_view(), 0) + .build("ImGui")) + .build("ImGui")); +} + +void ImGuiRenderer::update_imgui() { + m_imgui_draw_data = ImGui::GetDrawData(); + if (m_imgui_draw_data == nullptr) { + return; + } + if (m_imgui_draw_data->TotalVtxCount != 0) { + m_update_vertices = true; + } + if (m_imgui_draw_data->TotalIdxCount != 0) { + m_update_indices = true; + } +} + +} // namespace inexor::vulkan_renderer::renderer_components diff --git a/src/vulkan-renderer/renderers/octree_renderer.cpp b/src/vulkan-renderer/renderers/octree_renderer.cpp new file mode 100644 index 000000000..f0dd8da2a --- /dev/null +++ b/src/vulkan-renderer/renderers/octree_renderer.cpp @@ -0,0 +1,142 @@ +#include "inexor/vulkan-renderer/renderers/octree_renderer.hpp" + +#include + +#include + +namespace inexor::vulkan_renderer::render_components { + +OctreeRenderer::OctreeRenderer(const wrapper::Device &device, render_graph::RenderGraph *render_graph) + // Load the vertex shader and fragment shader for octree rendering + : m_vertex_shader(m_device, VK_SHADER_STAGE_VERTEX_BIT, "Octree", "shaders/main.vert.spv"), + m_fragment_shader(m_device, VK_SHADER_STAGE_FRAGMENT_BIT, "Octree", "shaders/main.frag.spv") { + + // Reserve memory for the vertex and index buffers + m_vertex_buffers.reserve(m_worlds.size()); + m_index_buffers.reserve(m_worlds.size()); + + spdlog::trace("Creating vertex and index buffer resources"); + + // Create one vertex buffer and one index buffer per octree + for (std::size_t octree_index = 0; octree_index < m_octrees; octree_index++) { + m_vertex_buffer.emplace_back(render_graph::BufferUsage::VERTEX_BUFFER, + std::string("Octree ") + std::to_string(octree_index), std::nullopt); + m_index_buffer.emplace_back(render_graph::BufferUsage::INDEX_BUFFER, + std::string("Octree ") + std::to_string(octree_index), std::nullopt) + } + + spdlog::trace("Setting graphics stage for octree"); + + const auto &stage_builder = render_graph->stage_builder(); + // Create the graphics stage for octree rendering + render_graph->add_stage( + // TODO: Implement graphics stage builder! + stage_builder + // TODO: .set_all_other_stuff + // TODO: Descriptor management! + // TODO: How to bind vertex and index buffers? + .uses_shader(m_vertex_shader) + .uses_shader(m_fragment_shader) + .set_on_record([&](const render_graph::GraphicsStage &stage, const wrapper::CommandBuffer &cmd_buf) { + // TODO: We will support multiple pipelines, right? + // TODO: Do we even need the pipeline wrapper here? + // TODO: How to pass world and perspective matrix into here? + // TODO: How to pass back buffer in here? + // TODO: How to pass depth buffer in here? + // I think the OctreeRenderer should accept those two resources instead of abstracting/hiding it into + // rendergraph! + // TODO: render each octree! (bind vertex/index buffers, pipeline... draw) + }) + // TODO: It is important not to use any objects whose lifetime would end with the constructor in here + .set_on_update([&]() { + // This update is being called before every frame + // Loop through all octrees and check if we need to update vertices and indices + for (std::size_t octree_index = 0; octree_index < m_octrees.size(); octree_index++) { + if (update_needed[octree_index]) { + // Update the vertices and indices of this octree + generate_octree_vertices(octree_index); + generate_octree_indices(octree_index); + + // Copy the new data into the rendergraph + render_graph->update_buffer(m_vertex_buffers[octree_index], + m_vertex_buffers[octree_index].data() sizeof(world::OctreeVertex) * + m_vertex_buffers.size()); + render_graph->update_buffer(m_index_buffers[octree_index] m_index_buffers[octree_index].data(), + sizeof(std::uint32_t) * m_index_buffers.size()); + + // Update is finished + update_needed[octree_index] = false; + } + } + }) + .build("Octree")); +} + +void OctreeRenderer::regenerate_random_octree_geometry() { + m_octrees.clear(); + // We currently create 2 random octrees + // TODO: Abstract this into octree manager! + m_octrees.push_back( + world::create_random_world(2, {0.0f, 0.0f, 0.0f}, initialize ? std::optional(42) : std::nullopt)); + m_octrees.push_back( + world::create_random_world(2, {10.0f, 0.0f, 0.0f}, initialize ? std::optional(60) : std::nullopt)); +} + +void OctreeRenderer::generate_octree_vertices(const std::size_t octree_index) { + m_octree_vertices[octree_index].clear(); + for (const auto &polygons : m_octrees[octree_index]->polygons(true)) { + for (const auto &triangle : *polygons) { + for (const auto &vertex : triangle) { + // TODO: Improve random color generation (use C++11 random tools) + // TODO: Implement generate_random_color()? + glm::vec3 color = { + static_cast(rand()) / static_cast(RAND_MAX), + static_cast(rand()) / static_cast(RAND_MAX), + static_cast(rand()) / static_cast(RAND_MAX), + }; + m_octree_vertices[octree_index].emplace_back(vertex, color); + } + } + } +} + +void OctreeRenderer::generate_octree_indices(const std::size_t octree_index) { + auto old_vertices = std::move(m_octree_vertices[octree_index]); + + // Note: In case you are wondering if we are still allowed to use m_octree_vertices after the std::move: + // The C++ standard does not define the specific state of an object after it has been moved from. The moved-from + // object's state is unspecified, and it may be in a valid but unpredictable state depending on the implementation + // We can bring it back into a specified state by calling clear() on it + m_octree_vertices[octree_index].clear(); + m_octree_indices[octree_index].clear(); + + std::unordered_map vertex_map; + for (auto &vertex : old_vertices) { + if (vertex_map.contains(vertex)) { + if (vertex_map.size() < std::numeric_limits::max()) { + throw std::runtime_error("Error: The octree is too big!"); + } + vertex_map.emplace(vertex, static_cast(vertex_map.size())); + m_octree_vertices[octree_index].push_back(vertex); + } + m_octree_indices[octree_index].push_back(vertex_map.at(vertex)); + } + spdlog::trace("Reduced octree by {} vertices (from {} to {})", + old_vertices.size() - m_octree_vertices[octree_index].size(), old_vertices.size(), + m_octree_vertices.size()); + spdlog::trace("Total indices {} ", m_octree_indices[octree_index].size()); +} + +void OctreeRenderer::regenerate_all_octree_vertices() { + for (std::size_t octree_index = 0; octree_index < m_octrees.size(); octree_index++) { + generate_octree_vertices(octree_index); + } +} + +void OctreeRenderer::regenerate_all_octree_indices() { + for (std::size_t octree_index = 0; octree_index < m_octrees.size(); octree_index++) { + generate_octree_indices(octree_index); + } +} + +} // namespace inexor::vulkan_renderer::render_components diff --git a/src/vulkan-renderer/tools/file.cpp b/src/vulkan-renderer/tools/file.cpp deleted file mode 100644 index eb94f80f5..000000000 --- a/src/vulkan-renderer/tools/file.cpp +++ /dev/null @@ -1,49 +0,0 @@ -#include "inexor/vulkan-renderer/tools/file.hpp" - -#include -#include -#include -#include - -namespace inexor::vulkan_renderer::tools { - -std::string get_file_extension_lowercase(const std::string &file_name) { - assert(!file_name.empty()); - - // Extract the file extension - std::string file_extension = std::filesystem::path(file_name).extension().string(); - - if (file_extension.empty()) { - return ""; - } - - // Convert file extension string to lowercase - std::transform(file_extension.begin(), file_extension.end(), file_extension.begin(), - [](unsigned char c) { return std::tolower(c); }); - - return file_extension; -} - -std::vector read_file_binary_data(const std::string &file_name) { - - // Open stream at the end of the file to read it's size. - std::ifstream file(file_name.c_str(), std::ios::ate | std::ios::binary | std::ios::in); - - if (!file) { - throw std::runtime_error("Error: Could not open file " + file_name + "!"); - } - - // Read the size of the file - const auto file_size = file.tellg(); - - std::vector buffer(file_size); - - // Set the file read position to the beginning of the file - file.seekg(0); - - file.read(buffer.data(), file_size); - - return buffer; -} - -} // namespace inexor::vulkan_renderer::tools diff --git a/src/vulkan-renderer/vk_tools/representation.cpp b/src/vulkan-renderer/vk_tools/representation.cpp index dda58d3ce..a618d552f 100644 --- a/src/vulkan-renderer/vk_tools/representation.cpp +++ b/src/vulkan-renderer/vk_tools/representation.cpp @@ -770,7 +770,7 @@ std::string_view as_string(const VkSurfaceTransformFlagBitsKHR flag) { } std::string_view get_device_feature_description(const std::size_t index) { - constexpr std::array FEATURE_DESCRIPTIONS{ + std::array FEATURE_DESCRIPTIONS{ // robustBufferAccess "accesses to buffers which are bounds-checked against the range of the buffer descriptor", // fullDrawIndexUint32 @@ -888,6 +888,121 @@ std::string_view get_device_feature_description(const std::size_t index) { return FEATURE_DESCRIPTIONS[index]; } +template <> +VkObjectType get_vulkan_object_type(VkBuffer buf) { + return VK_OBJECT_TYPE_BUFFER; +} + +template <> +VkObjectType get_vulkan_object_type(VkCommandBuffer cmd_buf) { + return VK_OBJECT_TYPE_COMMAND_BUFFER; +} + +template <> +VkObjectType get_vulkan_object_type(VkCommandPool cmd_pool) { + return VK_OBJECT_TYPE_COMMAND_POOL; +} + +template <> +VkObjectType get_vulkan_object_type(VkInstance inst) { + return VK_OBJECT_TYPE_INSTANCE; +} + +template <> +VkObjectType get_vulkan_object_type(VkPhysicalDevice phys_device) { + return VK_OBJECT_TYPE_PHYSICAL_DEVICE; +} + +template <> +VkObjectType get_vulkan_object_type(VkDescriptorPool desc_pool) { + return VK_OBJECT_TYPE_DESCRIPTOR_POOL; +} + +template <> +VkObjectType get_vulkan_object_type(VkDescriptorSet descriptor_set) { + return VK_OBJECT_TYPE_DESCRIPTOR_SET; +} + +template <> +VkObjectType get_vulkan_object_type(VkDescriptorSetLayout desc_set_layout) { + return VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT; +} + +template <> +VkObjectType get_vulkan_object_type(VkDevice device) { + return VK_OBJECT_TYPE_DEVICE; +} + +template <> +VkObjectType get_vulkan_object_type(VkEvent event) { + return VK_OBJECT_TYPE_EVENT; +} + +template <> +VkObjectType get_vulkan_object_type(VkFence fence) { + return VK_OBJECT_TYPE_FENCE; +} + +template <> +VkObjectType get_vulkan_object_type(VkImage img) { + return VK_OBJECT_TYPE_IMAGE; +} + +template <> +VkObjectType get_vulkan_object_type(VkImageView img_view) { + return VK_OBJECT_TYPE_IMAGE_VIEW; +} + +template <> +VkObjectType get_vulkan_object_type(VkPipeline pipeline) { + return VK_OBJECT_TYPE_PIPELINE; +} + +template <> +VkObjectType get_vulkan_object_type(VkPipelineCache pipeline_cache) { + return VK_OBJECT_TYPE_PIPELINE_CACHE; +} + +template <> +VkObjectType get_vulkan_object_type(VkPipelineLayout pipeline_layout) { + return VK_OBJECT_TYPE_PIPELINE_LAYOUT; +} + +template <> +VkObjectType get_vulkan_object_type(VkQueryPool query_pool) { + return VK_OBJECT_TYPE_QUERY_POOL; +} + +template <> +VkObjectType get_vulkan_object_type(VkQueue queue) { + return VK_OBJECT_TYPE_QUEUE; +} + +template <> +VkObjectType get_vulkan_object_type(VkSampler sampler) { + return VK_OBJECT_TYPE_SAMPLER; +} + +template <> +VkObjectType get_vulkan_object_type(VkSemaphore semaphore) { + return VK_OBJECT_TYPE_SEMAPHORE; +} + +template <> +VkObjectType get_vulkan_object_type(VkShaderModule shader_module) { + return VK_OBJECT_TYPE_SHADER_MODULE; +} + +template <> +VkObjectType get_vulkan_object_type(VkSurfaceKHR surface) { + return VK_OBJECT_TYPE_SURFACE_KHR; +} + +template <> +VkObjectType get_vulkan_object_type(VkSwapchainKHR swapchain) { + return VK_OBJECT_TYPE_SWAPCHAIN_KHR; +} + std::string_view result_to_description(const VkResult result) { switch (result) { case VK_SUCCESS: diff --git a/src/vulkan-renderer/wrapper/command_buffer.cpp b/src/vulkan-renderer/wrapper/command_buffer.cpp deleted file mode 100644 index 050384091..000000000 --- a/src/vulkan-renderer/wrapper/command_buffer.cpp +++ /dev/null @@ -1,328 +0,0 @@ -#include "inexor/vulkan-renderer/wrapper/command_buffer.hpp" - -#include "inexor/vulkan-renderer/exception.hpp" -#include "inexor/vulkan-renderer/wrapper/device.hpp" -#include "inexor/vulkan-renderer/wrapper/make_info.hpp" - -#include -#include -#include - -namespace inexor::vulkan_renderer::wrapper { - -CommandBuffer::CommandBuffer(const Device &device, const VkCommandPool cmd_pool, std::string name) - : m_device(device), m_name(std::move(name)) { - const auto cmd_buf_ai = make_info({ - .commandPool = cmd_pool, - .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, - .commandBufferCount = 1, - }); - - if (const auto result = vkAllocateCommandBuffers(m_device.device(), &cmd_buf_ai, &m_command_buffer); - result != VK_SUCCESS) { - throw VulkanException("Error: vkAllocateCommandBuffers failed!", result); - } - - m_wait_fence = std::make_unique(m_device, m_name, false); -} - -CommandBuffer::CommandBuffer(CommandBuffer &&other) noexcept : m_device(other.m_device) { - m_command_buffer = std::exchange(other.m_command_buffer, VK_NULL_HANDLE); - m_name = std::move(other.m_name); - m_wait_fence = std::exchange(other.m_wait_fence, nullptr); - m_staging_bufs = std::move(other.m_staging_bufs); -} - -const CommandBuffer &CommandBuffer::begin_command_buffer(const VkCommandBufferUsageFlags flags) const { - const auto begin_info = make_info({ - .flags = flags, - }); - vkBeginCommandBuffer(m_command_buffer, &begin_info); - - // We must clear the staging buffers which could be left over from previous use of this command buffer - m_staging_bufs.clear(); - return *this; -} - -const CommandBuffer &CommandBuffer::begin_render_pass(const VkRenderPassBeginInfo &render_pass_bi, - const VkSubpassContents subpass_contents) const { - vkCmdBeginRenderPass(m_command_buffer, &render_pass_bi, subpass_contents); - return *this; -} - -const CommandBuffer &CommandBuffer::bind_descriptor_sets(const std::span desc_sets, - const VkPipelineLayout layout, - const VkPipelineBindPoint bind_point, - const std::uint32_t first_set, - const std::span dyn_offsets) const { - assert(layout); - assert(!desc_sets.empty()); - vkCmdBindDescriptorSets(m_command_buffer, bind_point, layout, first_set, - static_cast(desc_sets.size()), desc_sets.data(), - static_cast(dyn_offsets.size()), dyn_offsets.data()); - return *this; -} - -const CommandBuffer &CommandBuffer::bind_index_buffer(const VkBuffer buf, const VkIndexType index_type, - const VkDeviceSize offset) const { - assert(buf); - vkCmdBindIndexBuffer(m_command_buffer, buf, offset, index_type); - return *this; -} - -const CommandBuffer &CommandBuffer::bind_pipeline(const VkPipeline pipeline, - const VkPipelineBindPoint bind_point) const { - assert(pipeline); - vkCmdBindPipeline(m_command_buffer, bind_point, pipeline); - return *this; -} - -const CommandBuffer &CommandBuffer::bind_vertex_buffers(const std::span bufs, - const std::uint32_t first_binding, - const std::span offsets) const { - assert(!bufs.empty()); - vkCmdBindVertexBuffers(m_command_buffer, first_binding, static_cast(bufs.size()), bufs.data(), - offsets.empty() ? std::vector(bufs.size(), 0).data() : offsets.data()); - return *this; -} - -const CommandBuffer &CommandBuffer::change_image_layout(const VkImage image, const VkImageLayout old_layout, - const VkImageLayout new_layout, - const VkImageSubresourceRange subres_range, - const VkPipelineStageFlags src_mask, - const VkPipelineStageFlags dst_mask) const { - assert(new_layout != old_layout); - - auto barrier = make_info({ - .oldLayout = old_layout, - .newLayout = new_layout, - .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .image = image, - .subresourceRange = subres_range, - }); - - switch (old_layout) { - case VK_IMAGE_LAYOUT_UNDEFINED: - barrier.srcAccessMask = 0; - break; - case VK_IMAGE_LAYOUT_PREINITIALIZED: - barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; - break; - case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: - barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; - break; - case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: - barrier.srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; - break; - case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: - barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT; - break; - case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: - barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; - break; - case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: - barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT; - break; - default: - break; - } - - switch (new_layout) { - case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: - barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; - break; - case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: - barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; - break; - case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: - barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; - break; - case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: - barrier.dstAccessMask = barrier.dstAccessMask | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; - break; - case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: - if (barrier.srcAccessMask == 0) { - barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT; - } - barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; - break; - default: - break; - } - - return pipeline_image_memory_barrier(src_mask, dst_mask, barrier); -} - -const CommandBuffer & -CommandBuffer::change_image_layout(const VkImage image, const VkImageLayout old_layout, const VkImageLayout new_layout, - const std::uint32_t mip_level_count, const std::uint32_t array_layer_count, - const std::uint32_t base_mip_level, const std::uint32_t base_array_layer, - const VkPipelineStageFlags src_mask, const VkPipelineStageFlags dst_mask) const { - return change_image_layout(image, old_layout, new_layout, - {.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, - .baseMipLevel = base_mip_level, - .levelCount = mip_level_count, - .baseArrayLayer = base_array_layer, - .layerCount = array_layer_count}, - src_mask, dst_mask); -} - -const CommandBuffer &CommandBuffer::copy_buffer(const VkBuffer src_buf, const VkBuffer dst_buf, - const std::span copy_regions) const { - assert(src_buf); - assert(dst_buf); - assert(!copy_regions.empty()); - vkCmdCopyBuffer(m_command_buffer, src_buf, dst_buf, static_cast(copy_regions.size()), - copy_regions.data()); - return *this; -} - -const CommandBuffer &CommandBuffer::copy_buffer(const VkBuffer src_buf, const VkBuffer dst_buf, - const VkBufferCopy ©_region) const { - return copy_buffer(src_buf, dst_buf, {©_region, 1}); -} - -const CommandBuffer &CommandBuffer::copy_buffer(const VkBuffer src_buf, const VkBuffer dst_buf, - const VkDeviceSize src_buf_size) const { - return copy_buffer(src_buf, dst_buf, {.size = src_buf_size}); -} - -const CommandBuffer &CommandBuffer::copy_buffer_to_image(const VkBuffer src_buf, const VkImage dst_img, - const std::span copy_regions) const { - assert(src_buf); - assert(dst_img); - vkCmdCopyBufferToImage(m_command_buffer, src_buf, dst_img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, - static_cast(copy_regions.size()), copy_regions.data()); - return *this; -} - -const CommandBuffer &CommandBuffer::copy_buffer_to_image(const VkBuffer src_buf, const VkImage dst_img, - const VkBufferImageCopy ©_region) const { - assert(src_buf); - assert(dst_img); - vkCmdCopyBufferToImage(m_command_buffer, src_buf, dst_img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ©_region); - return *this; -} - -const CommandBuffer &CommandBuffer::copy_buffer_to_image(const void *data, - const VkDeviceSize data_size, // NOLINT - const VkImage dst_img, const VkBufferImageCopy ©_region, - const std::string &name) const { - return copy_buffer_to_image(create_staging_buffer(data, data_size, name), dst_img, copy_region); -} - -const CommandBuffer &CommandBuffer::draw(const std::uint32_t vert_count, const std::uint32_t inst_count, - const std::uint32_t first_vert, const std::uint32_t first_inst) const { - vkCmdDraw(m_command_buffer, vert_count, inst_count, first_vert, first_inst); - return *this; -} - -const CommandBuffer &CommandBuffer::draw_indexed(const std::uint32_t index_count, const std::uint32_t inst_count, - const std::uint32_t first_index, const std::int32_t vert_offset, - const std::uint32_t first_inst) const { - vkCmdDrawIndexed(m_command_buffer, index_count, inst_count, first_index, vert_offset, first_inst); - return *this; -} - -const CommandBuffer &CommandBuffer::end_command_buffer() const { - vkEndCommandBuffer(m_command_buffer); - return *this; -} - -const CommandBuffer &CommandBuffer::end_render_pass() const { - vkCmdEndRenderPass(m_command_buffer); - return *this; -} - -const CommandBuffer &CommandBuffer::pipeline_barrier(const VkPipelineStageFlags src_stage_flags, - const VkPipelineStageFlags dst_stage_flags, - const std::span img_mem_barriers, - const std::span mem_barriers, - const std::span buf_mem_barriers, - const VkDependencyFlags dep_flags) const { - // One barrier must be set at least - assert(!(img_mem_barriers.empty() && mem_barriers.empty()) && buf_mem_barriers.empty()); - - vkCmdPipelineBarrier(m_command_buffer, src_stage_flags, dst_stage_flags, dep_flags, - static_cast(mem_barriers.size()), mem_barriers.data(), - static_cast(buf_mem_barriers.size()), buf_mem_barriers.data(), - static_cast(img_mem_barriers.size()), img_mem_barriers.data()); - return *this; -} - -const CommandBuffer &CommandBuffer::pipeline_image_memory_barrier(const VkPipelineStageFlags src_stage_flags, - const VkPipelineStageFlags dst_stage_flags, - const VkImageMemoryBarrier &img_barrier) const { - return pipeline_barrier(src_stage_flags, dst_stage_flags, {&img_barrier, 1}); -} - -const CommandBuffer &CommandBuffer::pipeline_memory_barrier(VkPipelineStageFlags src_stage_flags, - VkPipelineStageFlags dst_stage_flags, - const VkMemoryBarrier &mem_barrier) const { - return pipeline_barrier(src_stage_flags, dst_stage_flags, {}, {&mem_barrier, 1}); -} - -const CommandBuffer &CommandBuffer::full_barrier() const { - return pipeline_memory_barrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, - make_info({ - .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, - .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT, - })); -} - -const CommandBuffer &CommandBuffer::push_constants(const VkPipelineLayout layout, const VkShaderStageFlags stage, - const std::uint32_t size, const void *data, - const VkDeviceSize offset) const { - assert(layout); - assert(size > 0); - assert(data); - vkCmdPushConstants(m_command_buffer, layout, stage, static_cast(offset), size, data); - return *this; -} - -const CommandBuffer &CommandBuffer::reset_fence() const { - m_wait_fence->reset(); - return *this; -} - -const CommandBuffer &CommandBuffer::submit(const std::span submit_infos) const { - assert(!submit_infos.empty()); - end_command_buffer(); - - if (const auto result = vkQueueSubmit(m_device.graphics_queue(), static_cast(submit_infos.size()), - submit_infos.data(), m_wait_fence->get())) { - throw VulkanException("Error: vkQueueSubmit failed!", result); - } - return *this; -} - -const CommandBuffer &CommandBuffer::submit(const VkSubmitInfo submit_info) const { - return submit({&submit_info, 1}); -} - -const CommandBuffer &CommandBuffer::submit() const { - return submit(make_info({ - .commandBufferCount = 1, - .pCommandBuffers = &m_command_buffer, - })); -} - -const CommandBuffer &CommandBuffer::submit_and_wait(const std::span submit_infos) const { - submit(submit_infos); - m_wait_fence->block(); - return *this; -} - -const CommandBuffer &CommandBuffer::submit_and_wait(const VkSubmitInfo submit_info) const { - return submit_and_wait({&submit_info, 1}); -} - -const CommandBuffer &CommandBuffer::submit_and_wait() const { - return submit_and_wait(make_info({ - .commandBufferCount = 1, - .pCommandBuffers = &m_command_buffer, - })); -} - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/src/vulkan-renderer/wrapper/command_pool.cpp b/src/vulkan-renderer/wrapper/command_pool.cpp deleted file mode 100644 index 799420834..000000000 --- a/src/vulkan-renderer/wrapper/command_pool.cpp +++ /dev/null @@ -1,59 +0,0 @@ -#include "inexor/vulkan-renderer/wrapper/command_pool.hpp" - -#include "inexor/vulkan-renderer/exception.hpp" -#include "inexor/vulkan-renderer/wrapper/device.hpp" -#include "inexor/vulkan-renderer/wrapper/make_info.hpp" - -#include - -#include - -namespace inexor::vulkan_renderer::wrapper { - -CommandPool::CommandPool(const Device &device, std::string name) : m_device(device), m_name(std::move(name)) { - const auto cmd_pool_ci = make_info({ - .flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT | VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, - .queueFamilyIndex = device.graphics_queue_family_index(), - }); - - // Get the thread id as string for naming the command pool and the command buffers - const std::size_t thread_id = std::hash{}(std::this_thread::get_id()); - spdlog::trace("Creating command pool for thread {}", thread_id); - - if (const auto result = vkCreateCommandPool(m_device.device(), &cmd_pool_ci, nullptr, &m_cmd_pool); - result != VK_SUCCESS) { - throw VulkanException("Error: vkCreateCommandPool failed for command pool " + m_name + "!", result); - } -} - -CommandPool::CommandPool(CommandPool &&other) noexcept : m_device(other.m_device) { - m_cmd_pool = std::exchange(other.m_cmd_pool, nullptr); -} - -CommandPool::~CommandPool() { - vkDestroyCommandPool(m_device.device(), m_cmd_pool, nullptr); -} - -const CommandBuffer &CommandPool::request_command_buffer(const std::string &name) { - // Try to find a command buffer which is currently not used - for (const auto &cmd_buf : m_cmd_bufs) { - if (cmd_buf->fence_status() == VK_SUCCESS) { - // Reset the command buffer's fence to make it usable again - cmd_buf->reset_fence(); - m_device.set_debug_marker_name(*cmd_buf->ptr(), VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, name); - cmd_buf->begin_command_buffer(); - return *cmd_buf; - } - } - - // We need to create a new command buffer because no free one was found - // Note that there is currently no method for shrinking m_cmd_bufs, but this should not be a problem - m_cmd_bufs.emplace_back(std::make_unique(m_device, m_cmd_pool, "command buffer")); - - spdlog::trace("Creating new command buffer #{}", m_cmd_bufs.size()); - - m_cmd_bufs.back()->begin_command_buffer(); - return *m_cmd_bufs.back(); -} - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/src/vulkan-renderer/wrapper/commands/command_buffer.cpp b/src/vulkan-renderer/wrapper/commands/command_buffer.cpp new file mode 100644 index 000000000..5ad45d536 --- /dev/null +++ b/src/vulkan-renderer/wrapper/commands/command_buffer.cpp @@ -0,0 +1,555 @@ +#include "inexor/vulkan-renderer/wrapper/commands/command_buffer.hpp" + +#include "inexor/vulkan-renderer/exception.hpp" +#include "inexor/vulkan-renderer/render-graph/buffer.hpp" +#include "inexor/vulkan-renderer/wrapper/device.hpp" +#include "inexor/vulkan-renderer/wrapper/make_info.hpp" +#include "inexor/vulkan-renderer/wrapper/pipelines/pipeline.hpp" + +#include +#include +#include + +namespace inexor::vulkan_renderer::wrapper::commands { + +CommandBuffer::CommandBuffer(const wrapper::Device &device, const VkCommandPool cmd_pool, std::string name) + : m_device(device), m_name(std::move(name)) { + if (m_name.empty()) { + throw std::invalid_argument("[CommandBuffer::CommandBuffer] Error: Parameter 'name' is empty!"); + } + if (!cmd_pool) { + throw std::invalid_argument( + "[CommandBuffer::CommandBuffer] Error: Parameter 'cmd_pool' is an invalid pointer!"); + } + + const auto cmd_buf_ai = make_info({ + .commandPool = cmd_pool, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandBufferCount = 1, + }); + + // Note that command buffers are allocated from a command pool, meaning the memory required for this will be + // freed if the corresponding command pool is destroyed. Command buffers are not freed in the destructor. + if (const auto result = vkAllocateCommandBuffers(m_device.device(), &cmd_buf_ai, &m_cmd_buf); + result != VK_SUCCESS) { + throw VulkanException("Error: vkAllocateCommandBuffers failed!", result); + } + m_device.set_debug_name(m_cmd_buf, m_name); + m_cmd_buf_execution_completed = std::make_unique(m_device, m_name, false); +} + +CommandBuffer::CommandBuffer(CommandBuffer &&other) noexcept : m_device(other.m_device) { + // TODO: Check me! + m_cmd_buf = std::exchange(other.m_cmd_buf, VK_NULL_HANDLE); + m_name = std::move(other.m_name); + m_cmd_buf_execution_completed = std::exchange(other.m_cmd_buf_execution_completed, nullptr); +} + +const CommandBuffer &CommandBuffer::begin_command_buffer(const VkCommandBufferUsageFlags flags) const { + const auto begin_info = make_info({ + .flags = flags, + }); + vkBeginCommandBuffer(m_cmd_buf, &begin_info); + return *this; +} + +const CommandBuffer &CommandBuffer::begin_debug_label_region(std::string name, std::array color) const { + if (name.empty()) { + // NOTE: Despite Vulkan spec allowing name to be empty, we strictly enforce this rule in our code base! + throw std::invalid_argument("[CommandBuffer::begin_debug_label_region] Error: Parameter 'name' is empty!"); + } + auto label = make_info({ + .pLabelName = name.c_str(), + .color = {color[0], color[1], color[2], color[3]}, + }); + vkCmdBeginDebugUtilsLabelEXT(m_cmd_buf, &label); + return *this; +} + +const CommandBuffer &CommandBuffer::begin_rendering(const VkRenderingInfo &rendering_info) const { + vkCmdBeginRendering(m_cmd_buf, &rendering_info); + return *this; +}; + +const CommandBuffer &CommandBuffer::bind_descriptor_set(const VkDescriptorSet descriptor_set, + const std::weak_ptr pipeline) const { + if (!descriptor_set) { + throw std::invalid_argument( + "[CommandBuffer::bind_descriptor_set] Error: Parameter 'descriptor_set' is invalid!"); + } + if (pipeline.expired()) { + throw std::invalid_argument("[CommandBuffer::bind_descriptor_set] Error: Parameter 'pipeline' is invalid!"); + } + vkCmdBindDescriptorSets(m_cmd_buf, VK_PIPELINE_BIND_POINT_GRAPHICS, + pipeline.lock()->m_pipeline_layout->m_pipeline_layout, 0, 1, &descriptor_set, 0, nullptr); + return *this; +} + +const CommandBuffer &CommandBuffer::bind_index_buffer(const std::weak_ptr buffer, + const VkIndexType index_type, + const VkDeviceSize offset) const { + if (buffer.expired()) { + throw std::invalid_argument("[CommandBuffer::bind_index_buffer] Error: Parameter 'buffer' is invalid!"); + } + if (buffer.lock()->m_buffer_type != BufferType::INDEX_BUFFER) { + throw std::invalid_argument("Error: Rendergraph buffer resource " + buffer.lock()->m_name + + " is not an index buffer!"); + } + vkCmdBindIndexBuffer(m_cmd_buf, buffer.lock()->m_buffer, offset, index_type); + return *this; +} + +const CommandBuffer &CommandBuffer::bind_pipeline(const std::weak_ptr pipeline, + const VkPipelineBindPoint bind_point) const { + if (pipeline.expired()) { + throw std::invalid_argument("[CommandBuffer::bind_pipeline] Error: Parameter 'pipeline' is invalid!"); + } + vkCmdBindPipeline(m_cmd_buf, bind_point, pipeline.lock()->m_pipeline); + return *this; +} + +const CommandBuffer &CommandBuffer::bind_vertex_buffer(const std::weak_ptr buffer) const { + if (buffer.expired()) { + throw std::invalid_argument("[CommandBuffer::bind_vertex_buffer] Error: Parameter 'buffer' is invaldi!"); + } + if (buffer.lock()->m_buffer_type != BufferType::VERTEX_BUFFER) { + throw std::invalid_argument("Error: Rendergraph buffer resource " + buffer.lock()->m_name + + " is not a vertex buffer!"); + } + vkCmdBindVertexBuffers(m_cmd_buf, 0, 1, &buffer.lock()->m_buffer, std::vector(1, 0).data()); + return *this; +} + +const CommandBuffer &CommandBuffer::change_image_layout(const VkImage img, + const VkImageLayout old_layout, + const VkImageLayout new_layout, + const VkImageSubresourceRange subres_range, + const VkPipelineStageFlags src_mask, + const VkPipelineStageFlags dst_mask) const { + if (!img) { + throw std::invalid_argument("[CommandBuffer::change_image_layout] Error: Parameter 'img' is invalid!"); + } + + auto barrier = make_info({ + .oldLayout = old_layout, + .newLayout = new_layout, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = img, + .subresourceRange = subres_range, + }); + + switch (old_layout) { + case VK_IMAGE_LAYOUT_UNDEFINED: + barrier.srcAccessMask = 0; + break; + case VK_IMAGE_LAYOUT_PREINITIALIZED: + barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; + break; + case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: + barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; + break; + case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: + barrier.srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; + break; + case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: + barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + break; + case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: + barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; + break; + case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: + barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT; + break; + default: + break; + } + + switch (new_layout) { + case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: + barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; + break; + case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: + barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; + break; + case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: + barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; + break; + case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: + barrier.dstAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; + break; + case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: + if (barrier.srcAccessMask == 0) { + barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT; + } + barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; + break; + default: + break; + } + + return pipeline_image_memory_barrier(src_mask, dst_mask, barrier); +} + +const CommandBuffer &CommandBuffer::change_image_layout(const VkImage img, + const VkImageLayout old_layout, + const VkImageLayout new_layout, + const std::uint32_t mip_level_count, + const std::uint32_t array_layer_count, + const std::uint32_t base_mip_level, + const std::uint32_t base_array_layer, + const VkPipelineStageFlags src_mask, + const VkPipelineStageFlags dst_mask) const { + // NOTE: We delegate error checks to the other overload + return change_image_layout(img, old_layout, new_layout, + { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = base_mip_level, + .levelCount = mip_level_count, + .baseArrayLayer = base_array_layer, + .layerCount = array_layer_count, + }, + src_mask, dst_mask); +} + +const CommandBuffer &CommandBuffer::copy_buffer(const VkBuffer src_buf, + const VkBuffer dst_buf, + const std::span copy_regions) const { + if (!src_buf) { + throw std::invalid_argument("[CommandBuffer::copy_buffer] Error: Parameter 'src_buf' is invalid!"); + } + if (!dst_buf) { + throw std::invalid_argument("[CommandBuffer::copy_buffer] Error: Parameter 'dst_buf' is invalid!"); + } + vkCmdCopyBuffer(m_cmd_buf, src_buf, dst_buf, static_cast(copy_regions.size()), copy_regions.data()); + return *this; +} + +const CommandBuffer & +CommandBuffer::copy_buffer(const VkBuffer src_buf, const VkBuffer dst_buf, const VkBufferCopy ©_region) const { + // NOTE: We delegate error checks to the other function overload + return copy_buffer(src_buf, dst_buf, {©_region, 1}); +} + +const CommandBuffer & +CommandBuffer::copy_buffer(const VkBuffer src_buf, const VkBuffer dst_buf, const VkDeviceSize src_buf_size) const { + // NOTE: We delegate error checks to the other function overload + return copy_buffer(src_buf, dst_buf, {.size = src_buf_size}); +} + +const CommandBuffer &CommandBuffer::copy_buffer_to_image(const VkBuffer src_buf, + const VkImage dst_img, + const VkBufferImageCopy ©_region) const { + if (!src_buf) { + throw std::invalid_argument("[CommandBuffer::copy_buffer_to_image] Error: Parameter 'src_buf' is invalid!"); + } + if (!dst_img) { + throw std::invalid_argument("[CommandBuffer::copy_buffer_to_image] Error: Parameter 'dst_img' is invalid!"); + } + vkCmdCopyBufferToImage(m_cmd_buf, src_buf, dst_img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ©_region); + return *this; +} + +const CommandBuffer & +CommandBuffer::copy_buffer_to_image(const VkBuffer buffer, const VkImage img, const VkExtent3D extent) const { + // NOTE: We delegate error checks to the other function overload + return copy_buffer_to_image(buffer, img, + { + .imageSubresource = + { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .layerCount = 1, + }, + .imageExtent = + { + .width = extent.width, + .height = extent.height, + .depth = 1, + }, + }); +} + +const CommandBuffer &CommandBuffer::copy_buffer_to_image(const VkBuffer src_buf, const std::weak_ptr img) const { + // NOTE: We delegate error checks to the other function overload + const auto &image = img.lock(); + return copy_buffer_to_image(src_buf, image->m_img, + { + .width = image->m_img_ci.extent.width, + .height = image->m_img_ci.extent.height, + .depth = 1, + }); +} + +const CommandBuffer &CommandBuffer::draw(const std::uint32_t vert_count, + const std::uint32_t inst_count, + const std::uint32_t first_vert, + const std::uint32_t first_inst) const { + vkCmdDraw(m_cmd_buf, vert_count, inst_count, first_vert, first_inst); + return *this; +} + +const CommandBuffer &CommandBuffer::draw_indexed(const std::uint32_t index_count, + const std::uint32_t inst_count, + const std::uint32_t first_index, + const std::int32_t vert_offset, + const std::uint32_t first_inst) const { + vkCmdDrawIndexed(m_cmd_buf, index_count, inst_count, first_index, vert_offset, first_inst); + return *this; +} + +const CommandBuffer &CommandBuffer::end_command_buffer() const { + vkEndCommandBuffer(m_cmd_buf); + return *this; +} + +const CommandBuffer &CommandBuffer::end_debug_label_region() const { + vkCmdEndDebugUtilsLabelEXT(m_cmd_buf); + return *this; +} + +const CommandBuffer &CommandBuffer::end_rendering() const { + vkCmdEndRendering(m_cmd_buf); + return *this; +} + +// TODO: Reduce number of methods which deal with barriers to only those which are really needed! + +const CommandBuffer &CommandBuffer::pipeline_barrier(const VkPipelineStageFlags src_stage_flags, + const VkPipelineStageFlags dst_stage_flags, + const std::span img_mem_barriers, + const std::span mem_barriers, + const std::span buf_mem_barriers, + const VkDependencyFlags dep_flags) const { + vkCmdPipelineBarrier(m_cmd_buf, src_stage_flags, dst_stage_flags, dep_flags, + static_cast(mem_barriers.size()), mem_barriers.data(), + static_cast(buf_mem_barriers.size()), buf_mem_barriers.data(), + static_cast(img_mem_barriers.size()), img_mem_barriers.data()); + return *this; +} + +const CommandBuffer & +CommandBuffer::pipeline_buffer_memory_barrier(const VkPipelineStageFlags src_stage_flags, + const VkPipelineStageFlags dst_stage_flags, + const VkBufferMemoryBarrier &buffer_mem_barrier) const { + return pipeline_barrier(src_stage_flags, dst_stage_flags, {}, {}, {&buffer_mem_barrier, 1}); +} + +const CommandBuffer &CommandBuffer::pipeline_buffer_memory_barrier(VkPipelineStageFlags src_stage_flags, + VkPipelineStageFlags dst_stage_flags, + VkAccessFlags src_access_flags, + VkAccessFlags dst_access_flags, + VkBuffer buffer, + VkDeviceSize size, + VkDeviceSize offset) const { + return pipeline_buffer_memory_barrier(src_stage_flags, dst_stage_flags, + wrapper::make_info({ + .srcAccessMask = src_access_flags, + .dstAccessMask = dst_access_flags, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .buffer = buffer, + .offset = offset, + .size = size, + })); +} + +const CommandBuffer & +CommandBuffer::pipeline_buffer_memory_barriers(const VkPipelineStageFlags src_stage_flags, + const VkPipelineStageFlags dst_stage_flags, + const std::span buffer_mem_barriers) const { + return pipeline_barrier(src_stage_flags, dst_stage_flags, {}, {}, buffer_mem_barriers); +} + +const CommandBuffer &CommandBuffer::pipeline_buffer_memory_barrier_before_copy_buffer(const VkBuffer buffer) const { + return pipeline_buffer_memory_barrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, + VK_ACCESS_MEMORY_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, buffer); +} + +const CommandBuffer &CommandBuffer::pipeline_buffer_memory_barrier_after_copy_buffer(const VkBuffer buffer) const { + return pipeline_buffer_memory_barrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, + VK_ACCESS_TRANSFER_WRITE_BIT, + VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT, buffer); +} + +const CommandBuffer &CommandBuffer::pipeline_image_memory_barrier(const VkPipelineStageFlags src_stage_flags, + const VkPipelineStageFlags dst_stage_flags, + const VkImageMemoryBarrier &img_barrier) const { + return pipeline_barrier(src_stage_flags, dst_stage_flags, {&img_barrier, 1}); +} + +const CommandBuffer &CommandBuffer::pipeline_image_memory_barrier(const VkPipelineStageFlags src_stage_flags, + const VkPipelineStageFlags dst_stage_flags, + const VkAccessFlags src_access_flags, + const VkAccessFlags dst_access_flags, + const VkImageLayout old_img_layout, + const VkImageLayout new_img_layout, + const VkImage img) const { + if (!img) { + throw std::invalid_argument( + "[CommandBuffer::pipeline_image_memory_barrier] Error: Parameter 'img' is invalid!"); + } + return pipeline_image_memory_barrier(src_stage_flags, dst_stage_flags, + wrapper::make_info({ + .srcAccessMask = src_access_flags, + .dstAccessMask = dst_access_flags, + .oldLayout = old_img_layout, + .newLayout = new_img_layout, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = img, + .subresourceRange = + { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1, + }, + })); +} + +const CommandBuffer &CommandBuffer::pipeline_image_memory_barrier_after_copy_buffer_to_image(const VkImage img) const { + if (!img) { + throw std::invalid_argument("[CommandBuffer::pipeline_image_memory_barrier_after_copy_buffer_to_image] Error: " + "Parameter 'img' is invalid!"); + } + return pipeline_image_memory_barrier(VK_PIPELINE_STAGE_TRANSFER_BIT, // src_stage_flags + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, // dst_stage_flags + VK_ACCESS_TRANSFER_WRITE_BIT, // src_access_flags + VK_ACCESS_SHADER_READ_BIT, // dst_access_flags + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // old_img_layout + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // new_img_layout + img); +} + +const CommandBuffer &CommandBuffer::pipeline_image_memory_barrier_before_copy_buffer_to_image(const VkImage img) const { + if (!img) { + throw std::invalid_argument("[CommandBuffer::pipeline_image_memory_barrier_before_copy_buffer_to_image] Error: " + "Parameter 'img' is invalid!"); + } + return pipeline_image_memory_barrier(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, // src_stage_flags + VK_PIPELINE_STAGE_TRANSFER_BIT, // dst_stage_flags + 0, // src_access_flags + VK_ACCESS_TRANSFER_WRITE_BIT, // dst_access_flags + VK_IMAGE_LAYOUT_UNDEFINED, // old_img_layout + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // new_img_layout + img); +} + +const CommandBuffer & +CommandBuffer::pipeline_image_memory_barriers(const VkPipelineStageFlags src_stage_flags, + const VkPipelineStageFlags dst_stage_flags, + const std::span img_barrier) const { + return pipeline_barrier(src_stage_flags, dst_stage_flags, img_barrier); +} + +const CommandBuffer &CommandBuffer::pipeline_memory_barrier(const VkPipelineStageFlags src_stage_flags, + const VkPipelineStageFlags dst_stage_flags, + const VkMemoryBarrier &mem_barrier) const { + return pipeline_barrier(src_stage_flags, dst_stage_flags, {}, {&mem_barrier, 1}); +} + +const CommandBuffer & +CommandBuffer::pipeline_memory_barriers(const VkPipelineStageFlags src_stage_flags, + const VkPipelineStageFlags dst_stage_flags, + const std::span mem_barriers) const { + return pipeline_barrier(src_stage_flags, dst_stage_flags, {}, mem_barriers); +} + +const CommandBuffer &CommandBuffer::full_barrier() const { + return pipeline_memory_barrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, + make_info({ + .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT, + .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT, + })); +} + +const CommandBuffer &CommandBuffer::insert_debug_label(std::string name, std::array color) const { + if (name.empty()) { + throw std::invalid_argument("[] Error: Parameter 'name' is empty!"); + } + auto label = make_info({ + .pLabelName = name.c_str(), + .color = {color[0], color[1], color[2], color[3]}, + }); + vkCmdInsertDebugUtilsLabelEXT(m_cmd_buf, &label); + return *this; +} + +const CommandBuffer &CommandBuffer::push_constants(const VkPipelineLayout layout, + const VkShaderStageFlags stage, + const std::uint32_t size, + const void *data, + const VkDeviceSize offset) const { + if (!layout) { + throw std::invalid_argument("[CommandBuffer::push_constants] Error: Parameter 'layout' is invalid!"); + } + if (size == 0) { + throw std::invalid_argument("[CommandBuffer::push_constants] Error: Parameter 'size' is 0!"); + } + if (!data) { + throw std::invalid_argument("[CommandBuffer::push_constants] Error: Parameter 'data' is 0!"); + } + vkCmdPushConstants(m_cmd_buf, layout, stage, static_cast(offset), size, data); + return *this; +} + +const CommandBuffer &CommandBuffer::set_scissor(const VkRect2D scissor) const { + vkCmdSetScissor(m_cmd_buf, 0, 1, &scissor); + return *this; +} + +const CommandBuffer &CommandBuffer::set_suboperation_debug_name(std::string name) const { + m_device.set_debug_name(m_cmd_buf, m_name + name); + return *this; +} + +const CommandBuffer &CommandBuffer::set_viewport(const VkViewport viewport) const { + vkCmdSetViewport(m_cmd_buf, 0, 1, &viewport); + return *this; +} + +void CommandBuffer::submit_and_wait(const VkQueueFlagBits queue_type, + const std::span wait_semaphores, + const std::span signal_semaphores) const { + end_command_buffer(); + + // TODO: What to do here with graphics queue? + + // NOTE: We must specify as many pipeline stage flags as there are wait semaphores! + std::vector wait_stages(wait_semaphores.size(), + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT); + + const auto submit_info = make_info({ + .waitSemaphoreCount = static_cast(wait_semaphores.size()), + .pWaitSemaphores = wait_semaphores.data(), + .pWaitDstStageMask = wait_stages.data(), + .commandBufferCount = 1, + .pCommandBuffers = &m_cmd_buf, + .signalSemaphoreCount = static_cast(signal_semaphores.size()), + .pSignalSemaphores = signal_semaphores.data(), + }); + + // TODO: Support VK_QUEUE_SPARSE_BINDING_BIT if required + auto get_queue = [&]() { + switch (queue_type) { + case VK_QUEUE_TRANSFER_BIT: { + return m_device.m_transfer_queue; + } + case VK_QUEUE_COMPUTE_BIT: { + return m_device.m_compute_queue; + } + default: { + // VK_QUEUE_GRAPHICS_BIT and rest + return m_device.m_graphics_queue; + } + } + }; + + if (const auto result = vkQueueSubmit(get_queue(), 1, &submit_info, m_cmd_buf_execution_completed->m_fence)) { + throw VulkanException("[CommandBuffer::submit] Error: vkQueueSubmit failed!", result); + } + m_cmd_buf_execution_completed->wait(); +} + +} // namespace inexor::vulkan_renderer::wrapper::commands diff --git a/src/vulkan-renderer/wrapper/commands/command_pool.cpp b/src/vulkan-renderer/wrapper/commands/command_pool.cpp new file mode 100644 index 000000000..7ad81bc2c --- /dev/null +++ b/src/vulkan-renderer/wrapper/commands/command_pool.cpp @@ -0,0 +1,83 @@ +#include "inexor/vulkan-renderer/wrapper/commands/command_pool.hpp" + +#include "inexor/vulkan-renderer/exception.hpp" +#include "inexor/vulkan-renderer/wrapper/device.hpp" +#include "inexor/vulkan-renderer/wrapper/make_info.hpp" + +#include + +#include + +namespace inexor::vulkan_renderer::wrapper::commands { + +CommandPool::CommandPool(const Device &device, const VkQueueFlagBits queue_type, std::string name) + : m_device(device), m_queue_type(queue_type), m_name(std::move(name)) { + + auto get_queue_family_index = [&]() { + switch (queue_type) { + case VK_QUEUE_TRANSFER_BIT: { + return m_device.m_transfer_queue_family_index; + } + case VK_QUEUE_COMPUTE_BIT: { + return m_device.m_compute_queue_family_index; + } + default: { + // VK_QUEUE_GRAPHICS_BIT and rest + return m_device.m_graphics_queue_family_index; + } + } + }; + + const auto cmd_pool_ci = make_info({ + .flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT | VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, + .queueFamilyIndex = get_queue_family_index(), + }); + + // Get the thread id as string for naming the command pool and the command buffers + std::ostringstream thread_id; + thread_id << std::this_thread::get_id(); + spdlog::trace("Creating {} command pool for thread ID {}", vk_tools::as_string(queue_type), thread_id.str()); + + if (const auto result = vkCreateCommandPool(m_device.device(), &cmd_pool_ci, nullptr, &m_cmd_pool); + result != VK_SUCCESS) { + throw VulkanException("Error: vkCreateCommandPool failed for command pool " + m_name + "!", result); + } + m_device.set_debug_name(m_cmd_pool, m_name); +} + +CommandPool::CommandPool(CommandPool &&other) noexcept : m_device(other.m_device) { + m_cmd_pool = std::exchange(other.m_cmd_pool, nullptr); + m_name = std::move(other.m_name); + m_cmd_bufs = std::move(other.m_cmd_bufs); + m_queue_type = other.m_queue_type; +} + +CommandPool::~CommandPool() { + vkDestroyCommandPool(m_device.device(), m_cmd_pool, nullptr); +} + +CommandBuffer &CommandPool::request_command_buffer(const std::string &name) const { + // Try to find a command buffer which is currently not used + for (const auto &cmd_buf : m_cmd_bufs) { + if (cmd_buf->m_cmd_buf_execution_completed->status() == VK_SUCCESS) { + // Reset the command buffer's fence to make it usable again + cmd_buf->m_cmd_buf_execution_completed->reset_fence(); + cmd_buf->begin_command_buffer(); + m_device.set_debug_name(cmd_buf->m_cmd_buf, name); + return *cmd_buf; + } + } + + spdlog::trace("Creating {} new command buffer #{}", vk_tools::as_string(m_queue_type), 1 + m_cmd_bufs.size()); + + // No free command buffer was found so we need to create a new one + // Note that there is currently no method for shrinking m_cmd_bufs, but this should not be a problem + m_cmd_bufs.emplace_back(std::make_unique(m_device, m_cmd_pool, name)); + + auto &new_cmd_buf = *m_cmd_bufs.back(); + new_cmd_buf.begin_command_buffer(); + m_device.set_debug_name(new_cmd_buf.m_cmd_buf, name); + return new_cmd_buf; +} + +} // namespace inexor::vulkan_renderer::wrapper::commands diff --git a/src/vulkan-renderer/wrapper/cpu_texture.cpp b/src/vulkan-renderer/wrapper/cpu_texture.cpp deleted file mode 100644 index 5385de23e..000000000 --- a/src/vulkan-renderer/wrapper/cpu_texture.cpp +++ /dev/null @@ -1,89 +0,0 @@ -#include "inexor/vulkan-renderer/wrapper/cpu_texture.hpp" - -#define STB_IMAGE_IMPLEMENTATION -#include -#include - -#include -#include -#include - -namespace inexor::vulkan_renderer::wrapper { - -CpuTexture::CpuTexture() : m_name("default texture") { - generate_error_texture_data(); -} - -CpuTexture::CpuTexture(const std::string &file_name, std::string name) : m_name(std::move(name)) { - assert(!file_name.empty()); - assert(!m_name.empty()); - - // Load the texture file using stb_image library. - // Force stb_image to load an alpha channel as well. - m_texture_data = stbi_load(file_name.c_str(), &m_texture_width, &m_texture_height, nullptr, STBI_rgb_alpha); - - if (m_texture_data == nullptr) { - spdlog::error("Could not load texture file {} using stbi_load! Falling back to error texture", file_name); - generate_error_texture_data(); - } else { - - // TODO: We are currently hard coding the number of channels with STBI_rgb_alpha. - // Eventually, we probably need to pass this information into this class from - // a higher level class - like a material loader class. - // So, as an example, if the material loader is loading a normal map, we know - // we need to tell this class to load a 3 channel texture. If it can, great. - // If it can not, then this class probably needs to load a 3 channel error texture. - m_texture_channels = 4; - - // TODO: We are currently only supporting 1 mip level. - m_mip_levels = 1; - - spdlog::trace("Texture dimensions: width: {}, height: {}, channels: {} mip levels: {}", m_texture_width, - m_texture_height, m_texture_channels, m_mip_levels); - } -} - -CpuTexture::CpuTexture(CpuTexture &&other) noexcept { - m_name = std::move(other.m_name); - m_texture_width = other.m_texture_width; - m_texture_height = other.m_texture_height; - m_texture_channels = other.m_texture_channels; - m_mip_levels = other.m_mip_levels; - m_texture_data = other.m_texture_data; -} - -CpuTexture::~CpuTexture() { - stbi_image_free(m_texture_data); -} - -void CpuTexture::generate_error_texture_data() { - assert(m_texture_data == nullptr); - - m_texture_width = 512; - m_texture_height = 512; - m_texture_channels = 4; - m_mip_levels = 1; - - // Create an 8x8 checkerboard pattern of squares. - constexpr int SQUARE_DIMENSION{64}; - // pink, purple - constexpr std::array, 2> COLORS{{{0xFF, 0x69, 0xB4, 0xFF}, {0x94, 0x00, 0xD3, 0xFF}}}; - - const auto get_color = [](int x, int y, int square_dimension, std::size_t colors) -> int { - return static_cast( - (static_cast(x / square_dimension) + static_cast(y / square_dimension)) % colors); - }; - - // Note: Using the stb library function since we are freeing with stbi_image_free. - m_texture_data = static_cast(STBI_MALLOC(data_size())); // NOLINT - - // Performance could be improved by copying complete rows after one or two rows are created with the loops. - for (int y = 0; y < m_texture_height; y++) { - for (int x = 0; x < m_texture_width; x++) { - const int color_id = get_color(x, y, SQUARE_DIMENSION, COLORS.size()); - std::memcpy(m_texture_data, &COLORS[color_id][0], 4 * sizeof(COLORS[color_id][0])); - } - } -} - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/src/vulkan-renderer/wrapper/descriptor.cpp b/src/vulkan-renderer/wrapper/descriptor.cpp deleted file mode 100644 index 4bbcd0349..000000000 --- a/src/vulkan-renderer/wrapper/descriptor.cpp +++ /dev/null @@ -1,94 +0,0 @@ -#include "inexor/vulkan-renderer/wrapper/descriptor.hpp" - -#include "inexor/vulkan-renderer/exception.hpp" -#include "inexor/vulkan-renderer/wrapper/device.hpp" -#include "inexor/vulkan-renderer/wrapper/make_info.hpp" - -#include -#include - -namespace inexor::vulkan_renderer::wrapper { - -ResourceDescriptor::ResourceDescriptor(ResourceDescriptor &&other) noexcept : m_device(other.m_device) { - m_name = std::move(other.m_name); - m_descriptor_pool = std::exchange(other.m_descriptor_pool, nullptr); - m_descriptor_set_layout = std::exchange(other.m_descriptor_set_layout, nullptr); - m_descriptor_set_layout_bindings = std::move(other.m_descriptor_set_layout_bindings); - m_write_descriptor_sets = std::move(other.m_write_descriptor_sets); - m_descriptor_sets = std::move(other.m_descriptor_sets); -} - -ResourceDescriptor::ResourceDescriptor(const Device &device, - std::vector &&layout_bindings, - std::vector &&descriptor_writes, std::string &&name) - : m_device(device), m_name(name), m_write_descriptor_sets(descriptor_writes), - m_descriptor_set_layout_bindings(layout_bindings) { - assert(device.device()); - assert(!layout_bindings.empty()); - assert(!m_write_descriptor_sets.empty()); - assert(layout_bindings.size() == m_write_descriptor_sets.size()); - - for (std::size_t i = 0; i < layout_bindings.size(); i++) { - if (layout_bindings[i].descriptorType != descriptor_writes[i].descriptorType) { - throw std::runtime_error( - "VkDescriptorType mismatch in descriptor set layout binding and write descriptor set!"); - } - } - - std::vector pool_sizes; - - pool_sizes.reserve(layout_bindings.size()); - - for (const auto &descriptor_pool_type : layout_bindings) { - pool_sizes.emplace_back(VkDescriptorPoolSize{descriptor_pool_type.descriptorType, 1}); - } - - m_device.create_descriptor_pool(make_info({ - .maxSets = 1, - .poolSizeCount = static_cast(pool_sizes.size()), - .pPoolSizes = pool_sizes.data(), - }), - &m_descriptor_pool, m_name); - - m_device.create_descriptor_set_layout( - make_info({ - .bindingCount = static_cast(m_descriptor_set_layout_bindings.size()), - .pBindings = m_descriptor_set_layout_bindings.data(), - }), - &m_descriptor_set_layout, m_name); - - const std::vector descriptor_set_layouts(1, m_descriptor_set_layout); - - const auto descriptor_set_ai = make_info({ - .descriptorPool = m_descriptor_pool, - .descriptorSetCount = 1, - .pSetLayouts = descriptor_set_layouts.data(), - }); - - m_descriptor_sets.resize(1); - - if (const auto result = vkAllocateDescriptorSets(device.device(), &descriptor_set_ai, m_descriptor_sets.data()); - result != VK_SUCCESS) { - throw VulkanException("Error: vkAllocateDescriptorSets failed for descriptor " + m_name + " !", result); - } - - for (const auto &descriptor_set : m_descriptor_sets) { - // Assign an internal name using Vulkan debug markers. - m_device.set_debug_marker_name(descriptor_set, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, m_name); - } - - for (std::size_t j = 0; j < m_write_descriptor_sets.size(); j++) { - m_write_descriptor_sets[j].dstBinding = static_cast(j); - m_write_descriptor_sets[j].dstSet = m_descriptor_sets[0]; - } - - vkUpdateDescriptorSets(device.device(), static_cast(m_write_descriptor_sets.size()), - m_write_descriptor_sets.data(), 0, nullptr); -} - -ResourceDescriptor::~ResourceDescriptor() { - vkDestroyDescriptorSetLayout(m_device.device(), m_descriptor_set_layout, nullptr); - vkDestroyDescriptorPool(m_device.device(), m_descriptor_pool, nullptr); -} - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/src/vulkan-renderer/wrapper/descriptor_builder.cpp b/src/vulkan-renderer/wrapper/descriptor_builder.cpp deleted file mode 100644 index 819306047..000000000 --- a/src/vulkan-renderer/wrapper/descriptor_builder.cpp +++ /dev/null @@ -1,61 +0,0 @@ -#include "inexor/vulkan-renderer/wrapper/descriptor_builder.hpp" - -#include "inexor/vulkan-renderer/wrapper/descriptor.hpp" -#include "inexor/vulkan-renderer/wrapper/device.hpp" - -#include - -namespace inexor::vulkan_renderer::wrapper { - -DescriptorBuilder::DescriptorBuilder(const Device &device) : m_device(device) { - assert(m_device.device()); -} - -ResourceDescriptor DescriptorBuilder::build(std::string name) { - assert(!m_layout_bindings.empty()); - assert(!m_write_sets.empty()); - assert(m_write_sets.size() == m_layout_bindings.size()); - - // Generate a new resource descriptor. - ResourceDescriptor generated_descriptor(m_device, std::move(m_layout_bindings), std::move(m_write_sets), - std::move(name)); - - m_descriptor_buffer_infos.clear(); - m_descriptor_image_infos.clear(); - - return std::move(generated_descriptor); -} - -DescriptorBuilder &DescriptorBuilder::add_combined_image_sampler(const VkSampler image_sampler, - const VkImageView image_view, - const std::uint32_t binding, - const VkShaderStageFlagBits shader_stage) { - assert(image_sampler); - assert(image_view); - - m_layout_bindings.push_back({ - .binding = 0, - .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, - .descriptorCount = 1, - .stageFlags = static_cast(shader_stage), - }); - - m_descriptor_image_infos.push_back({ - .sampler = image_sampler, - .imageView = image_view, - .imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, - }); - - m_write_sets.push_back(make_info({ - .dstSet = nullptr, - .dstBinding = binding, - .dstArrayElement = 0, - .descriptorCount = 1, - .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, - .pImageInfo = &m_descriptor_image_infos.back(), - })); - - return *this; -} - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/src/vulkan-renderer/wrapper/descriptors/descriptor_pool.cpp b/src/vulkan-renderer/wrapper/descriptors/descriptor_pool.cpp new file mode 100644 index 000000000..b60615752 --- /dev/null +++ b/src/vulkan-renderer/wrapper/descriptors/descriptor_pool.cpp @@ -0,0 +1,46 @@ +#include "inexor/vulkan-renderer/wrapper/descriptors/descriptor_pool.hpp" + +#include "inexor/vulkan-renderer/exception.hpp" +#include "inexor/vulkan-renderer/wrapper/device.hpp" +#include "inexor/vulkan-renderer/wrapper/make_info.hpp" + +#include + +namespace inexor::vulkan_renderer::wrapper::descriptors { + +DescriptorPool::DescriptorPool(const Device &device, + std::vector pool_sizes, + const std::uint32_t max_sets, + std::string name) + : m_device(device), m_pool_sizes(pool_sizes), m_name(std::move(name)) { + if (m_name.empty()) { + throw std::invalid_argument("Error: Internal debug name for descriptor pool must not be empty!"); + } + if (m_pool_sizes.empty()) { + throw std::invalid_argument("Error: Descriptor pool sizes must not be empty!"); + } + + const auto descriptor_pool_ci = make_info({ + .maxSets = max_sets, + .poolSizeCount = static_cast(m_pool_sizes.size()), + .pPoolSizes = m_pool_sizes.data(), + }); + + if (const auto result = vkCreateDescriptorPool(m_device.device(), &descriptor_pool_ci, nullptr, &m_descriptor_pool); + result != VK_SUCCESS) { + throw VulkanException("Error: vkCreateDescriptorPool failed for descriptor pool " + m_name + " !", result); + } + m_device.set_debug_name(m_descriptor_pool, m_name); +} + +DescriptorPool::DescriptorPool(DescriptorPool &&other) noexcept : m_device(other.m_device) { + m_name = std::move(other.m_name); + m_descriptor_pool = std::exchange(other.m_descriptor_pool, VK_NULL_HANDLE); + m_pool_sizes = std::move(other.m_pool_sizes); +} + +DescriptorPool::~DescriptorPool() { + vkDestroyDescriptorPool(m_device.device(), m_descriptor_pool, nullptr); +} + +} // namespace inexor::vulkan_renderer::wrapper::descriptors diff --git a/src/vulkan-renderer/wrapper/descriptors/descriptor_pool_allocator.cpp b/src/vulkan-renderer/wrapper/descriptors/descriptor_pool_allocator.cpp new file mode 100644 index 000000000..1bf72dbe6 --- /dev/null +++ b/src/vulkan-renderer/wrapper/descriptors/descriptor_pool_allocator.cpp @@ -0,0 +1,42 @@ +#include "inexor/vulkan-renderer/wrapper/descriptors/descriptor_pool_allocator.hpp" + +#include + +#include + +namespace inexor::vulkan_renderer::wrapper::descriptors { + +DescriptorPoolAllocator::DescriptorPoolAllocator(const Device &device) : m_device(device) {} + +DescriptorPoolAllocator::DescriptorPoolAllocator(DescriptorPoolAllocator &&other) noexcept : m_device(other.m_device) { + m_pools = std::move(other.m_pools); +} + +VkDescriptorPool DescriptorPoolAllocator::request_new_descriptor_pool() { + // When creating a new descriptor pool we use these pool sizes as default values + // Adapt to other pool types as needed in the future + const std::vector DEFAULT_POOL_SIZES{ + { + .type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + .descriptorCount = 1024, + }, + { + .type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .descriptorCount = 1024, + }, + }; + + // TODO: Maybe rendergraph can reason about descriptor pool sizes ahead of descriptor pool allocation? + + // When creating a new descriptor pool, we specify a maximum of 1024 descriptor sets to be used + const std::uint32_t DEFAULT_MAX_DESCRIPTOR_COUNT{1024}; + + // This might fail because there's not enough memory left for creating the new descriptor pool + // In this case, DescriptorPool wrapper will throw a VulkanException + m_pools.emplace_back(m_device, DEFAULT_POOL_SIZES, DEFAULT_MAX_DESCRIPTOR_COUNT, "descriptor pool"); + + // Return the descriptor pool that was just created + return m_pools.back().descriptor_pool(); +} + +} // namespace inexor::vulkan_renderer::wrapper::descriptors diff --git a/src/vulkan-renderer/wrapper/descriptors/descriptor_set_allocator.cpp b/src/vulkan-renderer/wrapper/descriptors/descriptor_set_allocator.cpp new file mode 100644 index 000000000..f76e92511 --- /dev/null +++ b/src/vulkan-renderer/wrapper/descriptors/descriptor_set_allocator.cpp @@ -0,0 +1,66 @@ +#include "inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_allocator.hpp" + +#include "inexor/vulkan-renderer/exception.hpp" +#include "inexor/vulkan-renderer/wrapper/device.hpp" +#include "inexor/vulkan-renderer/wrapper/make_info.hpp" + +#include + +#include + +namespace inexor::vulkan_renderer::wrapper::descriptors { + +DescriptorSetAllocator::DescriptorSetAllocator(const Device &device) + : m_device(device), m_descriptor_pool_allocator(device) { + m_current_pool = m_descriptor_pool_allocator.request_new_descriptor_pool(); + if (m_current_pool == VK_NULL_HANDLE) { + throw std::runtime_error( + "[DescriptorSetAllocator::DescriptorSetAllocator] Error: Failed to create descriptor pool!"); + } +} + +DescriptorSetAllocator::DescriptorSetAllocator(DescriptorSetAllocator &&other) noexcept + : m_device(other.m_device), m_descriptor_pool_allocator(std::move(other.m_descriptor_pool_allocator)) { + m_current_pool = std::exchange(other.m_current_pool, VK_NULL_HANDLE); +} + +VkDescriptorSet DescriptorSetAllocator::allocate(const std::string &name, + const VkDescriptorSetLayout descriptor_set_layout) { + assert(descriptor_set_layout); + auto descriptor_set_ai = make_info({ + .descriptorPool = m_current_pool, + .descriptorSetCount = 1, + .pSetLayouts = &descriptor_set_layout, + }); + // Attempt to allocate a new descriptor set from the current descriptor pool + VkDescriptorSet new_descriptor_set = VK_NULL_HANDLE; + auto result = vkAllocateDescriptorSets(m_device.device(), &descriptor_set_ai, &new_descriptor_set); + + // Do not throw an exception rightaway if this attempt to allocate failed + // It might be the case that we simply ran out of pool memory for the allocation + if (result == VK_ERROR_OUT_OF_POOL_MEMORY || result == VK_ERROR_FRAGMENTED_POOL) { + spdlog::trace("[DescriptorSetAllocator::allocate] Requesting new descriptor pool"); + // The allocation failed in the first attempt because we did run out of descriptor pool memory! + // We still have a chance to recover from this: Create a new descriptor pool and then try again! + m_current_pool = m_descriptor_pool_allocator.request_new_descriptor_pool(); + // Don't forget we are using the new descriptor pool here that we just created + descriptor_set_ai.descriptorPool = m_current_pool; + // Try again with the new descriptor pool that was just created + result = vkAllocateDescriptorSets(m_device.device(), &descriptor_set_ai, &new_descriptor_set); + } + // This is true if either the first or the second attempt to call vkAllocateDescriptorSets failed + if (result != VK_SUCCESS) { + // All attempts failed, but it's not because we did run out of descriptor pool memory + // If this happens, we have a huge problem and here's nothing we can do anymore + // This is a hint that there is something fundamentally wrong with our descriptor management in the engine! + throw VulkanException( + "[DescriptorSetAllocator::allocate] Error: All attempts to call vkAllocateDescriptorSets failed!", result); + } + // Assign an internal debug name to the descriptor set that was just created + m_device.set_debug_name(new_descriptor_set, name); + // At this point, the allocation did work successfully either because we had enough memory in the first attempt to + // call vkAllocateDescriptorSets or it worked on the second attempt because we created a new descriptor pool + return new_descriptor_set; +} + +} // namespace inexor::vulkan_renderer::wrapper::descriptors diff --git a/src/vulkan-renderer/wrapper/descriptors/descriptor_set_layout.cpp b/src/vulkan-renderer/wrapper/descriptors/descriptor_set_layout.cpp new file mode 100644 index 000000000..73df13db1 --- /dev/null +++ b/src/vulkan-renderer/wrapper/descriptors/descriptor_set_layout.cpp @@ -0,0 +1,35 @@ +#include "inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_layout.hpp" + +#include "inexor/vulkan-renderer/exception.hpp" +#include "inexor/vulkan-renderer/wrapper/device.hpp" + +#include + +namespace inexor::vulkan_renderer::wrapper::descriptors { + +DescriptorSetLayout::DescriptorSetLayout(const Device &device, + const VkDescriptorSetLayoutCreateInfo descriptor_set_layout_ci, + std::string name) + : m_device(device), m_name(std::move(name)) { + if (m_name.empty()) { + throw std::runtime_error("Error: Internal debug name for descriptor set layout must not be empty!"); + } + if (const auto result = vkCreateDescriptorSetLayout(m_device.device(), &descriptor_set_layout_ci, nullptr, + &m_descriptor_set_layout); + result != VK_SUCCESS) { + throw VulkanException("Error: vkCreateDescriptorSetLayout failed for descriptor set layout " + m_name + " !", + result); + } + m_device.set_debug_name(m_descriptor_set_layout, m_name); +} + +DescriptorSetLayout::DescriptorSetLayout(DescriptorSetLayout &&other) noexcept : m_device(other.m_device) { + m_name = std::move(other.m_name); + m_descriptor_set_layout = std::exchange(other.m_descriptor_set_layout, VK_NULL_HANDLE); +} + +DescriptorSetLayout::~DescriptorSetLayout() { + vkDestroyDescriptorSetLayout(m_device.device(), m_descriptor_set_layout, nullptr); +} + +} // namespace inexor::vulkan_renderer::wrapper::descriptors diff --git a/src/vulkan-renderer/wrapper/descriptors/descriptor_set_layout_builder.cpp b/src/vulkan-renderer/wrapper/descriptors/descriptor_set_layout_builder.cpp new file mode 100644 index 000000000..84cdf639f --- /dev/null +++ b/src/vulkan-renderer/wrapper/descriptors/descriptor_set_layout_builder.cpp @@ -0,0 +1,62 @@ +#include "inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_layout_builder.hpp" + +#include "inexor/vulkan-renderer/wrapper/device.hpp" +#include "inexor/vulkan-renderer/wrapper/make_info.hpp" + +namespace inexor::vulkan_renderer::wrapper::descriptors { + +DescriptorSetLayoutBuilder::DescriptorSetLayoutBuilder(const Device &device) + : m_device(device), m_descriptor_set_layout_cache(device) {} + +DescriptorSetLayoutBuilder::DescriptorSetLayoutBuilder(DescriptorSetLayoutBuilder &&other) noexcept + : m_device(other.m_device), m_descriptor_set_layout_cache(std::move(other.m_descriptor_set_layout_cache)) { + m_bindings = std::move(other.m_bindings); + m_binding = other.m_binding; +} + +DescriptorSetLayoutBuilder &DescriptorSetLayoutBuilder::add_uniform_buffer(const VkShaderStageFlags shader_stage, + const std::uint32_t count) { + m_bindings.emplace_back(VkDescriptorSetLayoutBinding{ + .binding = m_binding, + .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + .descriptorCount = count, + .stageFlags = shader_stage, + }); + // NOTE: Even if 'count' is larger than 1, the binding is incremented by only 1 + m_binding++; + return *this; +} + +DescriptorSetLayoutBuilder & +DescriptorSetLayoutBuilder::add_combined_image_sampler(const VkShaderStageFlags shader_stage, + const std::uint32_t count) { + m_bindings.emplace_back(VkDescriptorSetLayoutBinding{ + .binding = m_binding, + .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .descriptorCount = count, + .stageFlags = shader_stage, + }); + // NOTE: Even if 'count' is larger than 1, the binding is incremented by only 1 + m_binding++; + return *this; +} + +VkDescriptorSetLayout DescriptorSetLayoutBuilder::build(std::string name) { + const auto descriptor_set_layout_ci = make_info({ + .bindingCount = static_cast(m_bindings.size()), + .pBindings = m_bindings.data(), + }); + + // Create the descriptor set layout using the descriptor set layout cache + const auto descriptor_set_layout = + m_descriptor_set_layout_cache.create_descriptor_set_layout(descriptor_set_layout_ci, std::move(name)); + + // Reset all the data of the builder so the builder can be re-used + m_bindings.clear(); + m_binding = 0; + + // Return the descriptor set layout that was created + return descriptor_set_layout; +} + +} // namespace inexor::vulkan_renderer::wrapper::descriptors diff --git a/src/vulkan-renderer/wrapper/descriptors/descriptor_set_layout_cache.cpp b/src/vulkan-renderer/wrapper/descriptors/descriptor_set_layout_cache.cpp new file mode 100644 index 000000000..9116604af --- /dev/null +++ b/src/vulkan-renderer/wrapper/descriptors/descriptor_set_layout_cache.cpp @@ -0,0 +1,92 @@ +#include "inexor/vulkan-renderer/wrapper/descriptors/descriptor_set_layout_cache.hpp" + +#include "inexor/vulkan-renderer/exception.hpp" +#include "inexor/vulkan-renderer/wrapper/device.hpp" + +#include +#include +#include + +namespace inexor::vulkan_renderer::wrapper::descriptors { + +DescriptorSetLayoutCache::DescriptorSetLayoutCache(const Device &device) : m_device(device) {} + +DescriptorSetLayoutCache::DescriptorSetLayoutCache(DescriptorSetLayoutCache &&other) noexcept + : m_device(other.m_device) { + m_cache = std::move(other.m_cache); +} + +VkDescriptorSetLayout +DescriptorSetLayoutCache::create_descriptor_set_layout(const VkDescriptorSetLayoutCreateInfo descriptor_set_layout_ci, + std::string name) { + DescriptorSetLayoutInfo layout_info; + layout_info.bindings.reserve(descriptor_set_layout_ci.bindingCount); + bool is_sorted = true; + int last_binding = -1; + + // Loop through all bindings and ensure that the bindings are in increasing order + for (std::size_t i = 0; i < descriptor_set_layout_ci.bindingCount; i++) { + // Copy the bindings into layout_info + layout_info.bindings.push_back(descriptor_set_layout_ci.pBindings[i]); + + // Check if the descriptor set layout bindings are sorted by binding + if (descriptor_set_layout_ci.pBindings[i].binding < last_binding) { + last_binding = descriptor_set_layout_ci.pBindings[i].binding; + } else { + is_sorted = false; + // As soon as we know it's not sorted, we can stop and start sorting + break; + } + } + // We need to make sure the bindings are sorted because this is important for the hash! + if (!is_sorted) { + std::sort(layout_info.bindings.begin(), layout_info.bindings.end(), [](auto &a, auto &b) { + return a.binding < b.binding; // Sort by binding + }); + } + + // Check if this descriptor set layout does already exist in the cache + if (!m_cache.contains(layout_info)) { + m_cache.emplace(layout_info, DescriptorSetLayout(m_device, descriptor_set_layout_ci, std::move(name))); + } + // TODO: Name descriptor set layout internally! + return m_cache.at(layout_info).m_descriptor_set_layout; +} + +bool DescriptorSetLayoutInfo::operator==(const DescriptorSetLayoutInfo &other) const { + if (other.bindings.size() != bindings.size()) { + return false; + } + // Check if each of the bindings is the same + // Note that we assume the bindings are sorted! + for (std::size_t i = 0; i < bindings.size(); i++) { + if (other.bindings[i].binding != bindings[i].binding) { + return false; + } + if (other.bindings[i].descriptorType != bindings[i].descriptorType) { + return false; + } + if (other.bindings[i].descriptorCount != bindings[i].descriptorCount) { + return false; + } + if (other.bindings[i].stageFlags != bindings[i].stageFlags) { + return false; + } + } + return true; +} + +std::size_t DescriptorSetLayoutInfo::hash() const { + assert(!bindings.empty()); + std::size_t result = std::hash()(bindings.size()); + for (const auto &binding : bindings) { + // Pack binding data into 64 bits + std::size_t binding_hash = + binding.binding | binding.descriptorType << 8 | binding.descriptorCount << 16 | binding.stageFlags << 24; + // shuffle the packed binding data and xor it with the main hash + result ^= std::hash()(binding_hash); + } + return result; +} + +} // namespace inexor::vulkan_renderer::wrapper::descriptors diff --git a/src/vulkan-renderer/wrapper/descriptors/write_descriptor_set_builder.cpp b/src/vulkan-renderer/wrapper/descriptors/write_descriptor_set_builder.cpp new file mode 100644 index 000000000..d695962ca --- /dev/null +++ b/src/vulkan-renderer/wrapper/descriptors/write_descriptor_set_builder.cpp @@ -0,0 +1,97 @@ +#include "inexor/vulkan-renderer/wrapper/descriptors/write_descriptor_set_builder.hpp" + +#include "inexor/vulkan-renderer/render-graph/buffer.hpp" +#include "inexor/vulkan-renderer/render-graph/texture.hpp" +#include "inexor/vulkan-renderer/wrapper/device.hpp" +#include "inexor/vulkan-renderer/wrapper/make_info.hpp" + +#include + +namespace inexor::vulkan_renderer::wrapper::descriptors { + +WriteDescriptorSetBuilder::WriteDescriptorSetBuilder(const Device &device) : m_device(device) {} + +WriteDescriptorSetBuilder::WriteDescriptorSetBuilder(WriteDescriptorSetBuilder &&other) noexcept + : m_device(other.m_device) { + // TODO: Implement me! +} + +WriteDescriptorSetBuilder & +WriteDescriptorSetBuilder::add_uniform_buffer_update(const VkDescriptorSet descriptor_set, + const std::weak_ptr uniform_buffer) { + + if (!descriptor_set) { + throw std::invalid_argument("[WriteDescriptorSetBuilder::add_uniform_buffer_update] Error: Parameter " + "'descriptor_set' is invalid!"); + } + if (uniform_buffer.lock()->m_buffer_type != BufferType::UNIFORM_BUFFER) { + throw std::invalid_argument("[DescriptorSetUpdateBuilder::add_uniform_buffer_update] Error: Buffer " + + uniform_buffer.lock()->m_name + " is not a uniform buffer!"); + } + const auto &buffer = uniform_buffer.lock(); + if (!buffer->m_descriptor_buffer_info.buffer) { + throw std::invalid_argument("[WriteDescriptorSetBuilder::add_uniform_buffer_update] Error: " + "Buffer::m_descriptor_buffer_info.buffer' of uniform buffer '" + + buffer->m_name + "' is invalid!"); + } + m_write_descriptor_sets.emplace_back(wrapper::make_info({ + .dstSet = descriptor_set, + .dstBinding = m_binding, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + .pBufferInfo = &buffer->m_descriptor_buffer_info, + })); + + m_binding++; + return *this; +} + +WriteDescriptorSetBuilder & +WriteDescriptorSetBuilder::add_combined_image_sampler_update(const VkDescriptorSet descriptor_set, + const std::weak_ptr image_texture) { + if (!descriptor_set) { + throw std::invalid_argument("[WriteDescriptorSetBuilder::add_combined_image_sampler_update] Error: Parameter " + "'descriptor_set' is invalid!"); + } + if (image_texture.expired()) { + throw std::invalid_argument( + "[WriteDescriptorSetBuilder::add_combined_image_sampler_update] Error: Parameter 'texture' is invalid!"); + } + + const auto &texture = image_texture.lock(); + if (!texture->m_descriptor_img_info.imageView) { + throw std::invalid_argument("[WriteDescriptorSetBuilder::add_combined_image_sampler_update] Error: " + "'Texture::m_descriptor_img_info.imageView' of texture '" + + texture->m_name + "' is invalid!"); + } + if (!texture->m_descriptor_img_info.sampler) { + throw std::invalid_argument("[WriteDescriptorSetBuilder::add_combined_image_sampler_update] Error: " + "'Texture::m_descriptor_img_info.sampler' of texture '" + + texture->m_name + "' is invalid!"); + } + m_write_descriptor_sets.emplace_back(wrapper::make_info({ + .dstSet = descriptor_set, + .dstBinding = m_binding, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .pImageInfo = &texture->m_descriptor_img_info, + })); + + m_binding++; + return *this; +} + +std::vector WriteDescriptorSetBuilder::build() { + auto write_descriptor_sets = std::move(m_write_descriptor_sets); + reset(); + return write_descriptor_sets; +} + +void WriteDescriptorSetBuilder::reset() { + m_write_descriptor_sets.clear(); + m_binding = 0; +} + +} // namespace inexor::vulkan_renderer::wrapper::descriptors diff --git a/src/vulkan-renderer/wrapper/device.cpp b/src/vulkan-renderer/wrapper/device.cpp index 342b05561..bba260054 100644 --- a/src/vulkan-renderer/wrapper/device.cpp +++ b/src/vulkan-renderer/wrapper/device.cpp @@ -3,7 +3,6 @@ #include "inexor/vulkan-renderer/exception.hpp" #include "inexor/vulkan-renderer/vk_tools/device_info.hpp" #include "inexor/vulkan-renderer/vk_tools/enumerate.hpp" -#include "inexor/vulkan-renderer/vk_tools/representation.hpp" #include "inexor/vulkan-renderer/wrapper/instance.hpp" #include "inexor/vulkan-renderer/wrapper/make_info.hpp" @@ -24,12 +23,56 @@ namespace { -// TODO: Make proper use of queue priorities in the future. +// TODO: Make proper use of queue priorities in the future constexpr float DEFAULT_QUEUE_PRIORITY = 1.0f; } // namespace namespace inexor::vulkan_renderer::wrapper { + +// TODO: Refactor to use VkClearColor to have a general color palette +// TODO: Use std::unordered_map instead! +std::array get_debug_label_color(const DebugLabelColor color) { + switch (color) { + case DebugLabelColor::RED: + return {0.98f, 0.60f, 0.60f, 1.0f}; + case DebugLabelColor::BLUE: + return {0.68f, 0.85f, 0.90f, 1.0f}; + case DebugLabelColor::GREEN: + return {0.73f, 0.88f, 0.73f, 1.0f}; + case DebugLabelColor::YELLOW: + return {0.98f, 0.98f, 0.70f, 1.0f}; + case DebugLabelColor::PURPLE: + return {0.80f, 0.70f, 0.90f, 1.0f}; + case DebugLabelColor::ORANGE: + return {0.98f, 0.75f, 0.53f, 1.0f}; + case DebugLabelColor::MAGENTA: + return {0.96f, 0.60f, 0.76f, 1.0f}; + case DebugLabelColor::CYAN: + return {0.70f, 0.98f, 0.98f, 1.0f}; + case DebugLabelColor::BROWN: + return {0.82f, 0.70f, 0.55f, 1.0f}; + case DebugLabelColor::PINK: + return {0.98f, 0.75f, 0.85f, 1.0f}; + case DebugLabelColor::LIME: + return {0.80f, 0.98f, 0.60f, 1.0f}; + case DebugLabelColor::TURQUOISE: + return {0.70f, 0.93f, 0.93f, 1.0f}; + case DebugLabelColor::BEIGE: + return {0.96f, 0.96f, 0.86f, 1.0f}; + case DebugLabelColor::MAROON: + return {0.76f, 0.50f, 0.50f, 1.0f}; + case DebugLabelColor::OLIVE: + return {0.74f, 0.75f, 0.50f, 1.0f}; + case DebugLabelColor::NAVY: + return {0.53f, 0.70f, 0.82f, 1.0f}; + case DebugLabelColor::TEAL: + return {0.53f, 0.80f, 0.75f, 1.0f}; + default: + return {0.0f, 0.0f, 0.0f, 1.0f}; // Default to opaque black if the color is not recognized + } +} + namespace { /// A function for rating physical devices by type @@ -42,6 +85,7 @@ std::uint32_t device_type_rating(const DeviceInfo &info) { case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU: return 1; default: + // This means we ignore any other gpu types by default return 0; } } @@ -65,8 +109,10 @@ bool is_extension_supported(const std::vector &extensions /// @param print_message If ``true``, an info message will be printed to the console if a device feature or device /// extension is not supported (``true`` by default) /// @return ``true`` if the physical device supports all device features and device extensions -bool is_device_suitable(const DeviceInfo &info, const VkPhysicalDeviceFeatures &required_features, - const std::span required_extensions, const bool print_info = false) { +bool is_device_suitable(const DeviceInfo &info, + const VkPhysicalDeviceFeatures &required_features, + const std::span required_extensions, + const bool print_info = false) { const auto comparable_required_features = vk_tools::get_device_features_as_vector(required_features); const auto comparable_available_features = vk_tools::get_device_features_as_vector(info.features); constexpr auto FEATURE_COUNT = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32); @@ -75,17 +121,18 @@ bool is_device_suitable(const DeviceInfo &info, const VkPhysicalDeviceFeatures & for (std::size_t i = 0; i < FEATURE_COUNT; i++) { if (comparable_required_features[i] == VK_TRUE && comparable_available_features[i] == VK_FALSE) { if (print_info) { - spdlog::info("Physical device {} does not support {}!", info.name, - vk_tools::get_device_feature_description(i)); + spdlog::error("Physical device {} does not support {}!", info.name, + vk_tools::get_device_feature_description(i)); } return false; } } // Loop through all device extensions and check if an extension is required but not supported + // We are not checking for duplicated entries but this is not a problem for (const auto &extension : required_extensions) { if (!is_extension_supported(info.extensions, extension)) { if (print_info) { - spdlog::info("Physical device {} does not support extension {}!", info.name, extension); + spdlog::error("Physical device {} does not support extension {}!", info.name, extension); } return false; } @@ -100,7 +147,8 @@ bool is_device_suitable(const DeviceInfo &info, const VkPhysicalDeviceFeatures & /// @param rhs The other physical device /// @return ``true`` if `lhs` is more preferable over `rhs` bool compare_physical_devices(const VkPhysicalDeviceFeatures &required_features, - const std::span required_extensions, const DeviceInfo &lhs, + const std::span required_extensions, + const DeviceInfo &lhs, const DeviceInfo &rhs) { if (!is_device_suitable(rhs, required_features, required_extensions)) { return true; @@ -184,7 +232,8 @@ VkPhysicalDevice Device::pick_best_physical_device(std::vector &&phy return physical_device_infos.front().physical_device; } -VkPhysicalDevice Device::pick_best_physical_device(const Instance &inst, const VkSurfaceKHR surface, +VkPhysicalDevice Device::pick_best_physical_device(const Instance &inst, + const VkSurfaceKHR surface, const VkPhysicalDeviceFeatures &required_features, const std::span required_extensions) { // Put together all data that is required to compare the physical devices @@ -195,12 +244,19 @@ VkPhysicalDevice Device::pick_best_physical_device(const Instance &inst, const V return pick_best_physical_device(std::move(infos), required_features, required_extensions); } -Device::Device(const Instance &inst, const VkSurfaceKHR surface, const bool prefer_distinct_transfer_queue, - const VkPhysicalDevice physical_device, const std::span required_extensions, - const VkPhysicalDeviceFeatures &required_features, const VkPhysicalDeviceFeatures &optional_features) +Device::Device( + const Instance &inst, + const VkSurfaceKHR surface, + const VkPhysicalDevice physical_device, + const std::span required_extensions, + const VkPhysicalDeviceFeatures &required_features, + const std::span optional_extensions, + const std::optional optional_features, + const std::optional> on_optional_extension_unavailable, + const std::optional> on_optional_feature_unavailable) : m_physical_device(physical_device) { - - if (!is_device_suitable(build_device_info(physical_device, surface), required_features, required_extensions)) { + if (!is_device_suitable(build_device_info(physical_device, surface), required_features, required_extensions, + true)) { throw std::runtime_error("Error: The chosen physical device {} is not suitable!"); } @@ -210,12 +266,6 @@ Device::Device(const Instance &inst, const VkSurfaceKHR surface, const bool pref spdlog::trace("Creating Vulkan device queues"); std::vector queues_to_create; - if (prefer_distinct_transfer_queue) { - spdlog::trace("The application will try to use a distinct data transfer queue if it is available"); - } else { - spdlog::warn("The application is forced not to use a distinct data transfer queue!"); - } - // Check if there is one queue family which can be used for both graphics and presentation auto queue_candidate = find_queue_family_index_if([&](const std::uint32_t index, const VkQueueFamilyProperties &queue_family) { @@ -249,25 +299,6 @@ Device::Device(const Instance &inst, const VkSurfaceKHR surface, const bool pref bool use_distinct_data_transfer_queue = false; - if (queue_candidate && prefer_distinct_transfer_queue) { - m_transfer_queue_family_index = *queue_candidate; - - spdlog::trace("A separate queue will be used for data transfer."); - - // We have the opportunity to use a separated queue for data transfer! - use_distinct_data_transfer_queue = true; - - queues_to_create.push_back(make_info({ - .queueFamilyIndex = m_transfer_queue_family_index, - .queueCount = 1, - .pQueuePriorities = &::DEFAULT_QUEUE_PRIORITY, - })); - } else { - // We don't have the opportunity to use a separated queue for data transfer! - // Do not create a new queue, use the graphics queue instead. - use_distinct_data_transfer_queue = false; - } - if (!use_distinct_data_transfer_queue) { spdlog::warn("The application is forced to avoid distinct data transfer queues"); spdlog::warn("Because of this, the graphics queue will be used for data transfer"); @@ -279,7 +310,9 @@ Device::Device(const Instance &inst, const VkSurfaceKHR surface, const bool pref vkGetPhysicalDeviceFeatures(physical_device, &available_features); const auto comparable_required_features = vk_tools::get_device_features_as_vector(required_features); - const auto comparable_optional_features = vk_tools::get_device_features_as_vector(optional_features); + const auto comparable_optional_features = optional_features + ? vk_tools::get_device_features_as_vector(optional_features.value()) + : std::vector{}; const auto comparable_available_features = vk_tools::get_device_features_as_vector(available_features); constexpr auto FEATURE_COUNT = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32); @@ -300,9 +333,24 @@ Device::Device(const Instance &inst, const VkSurfaceKHR surface, const bool pref } } + spdlog::trace("Enabled device extensions:"); + + // Note that the device extensions have already been checked by is_device_suitable at the beginning of the method + for (const auto &extension : required_extensions) { + spdlog::trace(" - {}", extension); + } + std::memcpy(&m_enabled_features, features_to_enable.data(), features_to_enable.size()); + // We want to use dynamic rendering (VK_KHR_dynamic_rendering) + const auto dyn_rendering_feature = make_info({ + .pNext = nullptr, + .dynamicRendering = VK_TRUE, + }); + const auto device_ci = make_info({ + // This is one of those rare cases where pNext is actually not nullptr! + .pNext = &dyn_rendering_feature, // We use dynamic rendering .queueCreateInfoCount = static_cast(queues_to_create.size()), .pQueueCreateInfos = queues_to_create.data(), .enabledExtensionCount = static_cast(required_extensions.size()), @@ -316,64 +364,54 @@ Device::Device(const Instance &inst, const VkSurfaceKHR surface, const bool pref throw VulkanException("Error: vkCreateDevice failed!", result); } - spdlog::trace("Loading Vulkan entrypoints directly from driver (bypass Vulkan loader dispatch code)"); - volkLoadDevice(m_device); - - const bool enable_debug_markers = - std::find_if(required_extensions.begin(), required_extensions.end(), [&](const char *extension) { - return std::string(extension) == std::string(VK_EXT_DEBUG_MARKER_EXTENSION_NAME); - }) != required_extensions.end(); + // Set an internal debug name to this device using Vulkan debug utils (VK_EXT_debug_utils) + set_debug_name(m_device, "Device"); -#ifndef NDEBUG - if (enable_debug_markers) { - spdlog::trace("Initializing Vulkan debug markers"); - - // The debug marker extension is not part of the core, so function pointers need to be loaded manually. - m_vk_debug_marker_set_object_tag = reinterpret_cast( // NOLINT - vkGetDeviceProcAddr(m_device, "vkDebugMarkerSetObjectTagEXT")); - assert(m_vk_debug_marker_set_object_tag); - - m_vk_debug_marker_set_object_name = reinterpret_cast( // NOLINT - vkGetDeviceProcAddr(m_device, "vkDebugMarkerSetObjectNameEXT")); - assert(m_vk_debug_marker_set_object_name); - - m_vk_cmd_debug_marker_begin = reinterpret_cast( // NOLINT - vkGetDeviceProcAddr(m_device, "vkCmdDebugMarkerBeginEXT")); - assert(m_vk_cmd_debug_marker_begin); - - m_vk_cmd_debug_marker_end = reinterpret_cast( // NOLINT - vkGetDeviceProcAddr(m_device, "vkCmdDebugMarkerEndEXT")); - assert(m_vk_cmd_debug_marker_end); + spdlog::trace( + "Loading Vulkan entrypoints directly from driver with volk metaloader (bypass Vulkan loader dispatch code)"); + volkLoadDevice(m_device); - m_vk_cmd_debug_marker_insert = reinterpret_cast( // NOLINT - vkGetDeviceProcAddr(m_device, "vkCmdDebugMarkerInsertEXT")); - assert(m_vk_cmd_debug_marker_insert); + // TODO: Refactor: Compute queue but no graphics queue? (Refactor selection) + auto compute_queue_candidate = + find_queue_family_index_if([&](const std::uint32_t index, const VkQueueFamilyProperties &queue_family) { + return (queue_family.queueFlags & VK_QUEUE_COMPUTE_BIT) != 0u; + }); - m_vk_set_debug_utils_object_name = reinterpret_cast( // NOLINT - vkGetDeviceProcAddr(m_device, "vkSetDebugUtilsObjectNameEXT")); - assert(m_vk_set_debug_utils_object_name); + if (!compute_queue_candidate) { + throw std::runtime_error("Error: Could not find a compute queue!"); } -#endif + + m_compute_queue_family_index = compute_queue_candidate.value(); spdlog::trace("Queue family indices:"); spdlog::trace(" - Graphics: {}", m_graphics_queue_family_index); spdlog::trace(" - Present: {}", m_present_queue_family_index); spdlog::trace(" - Transfer: {}", m_transfer_queue_family_index); + spdlog::trace(" - Compute: {}", m_compute_queue_family_index); - // Setup the queues for presentation and graphics. - // Since we only have one queue per queue family, we acquire index 0. + // Setup the queues for presentation and graphics + // Since we only have one queue per queue family, we acquire queue index 0 vkGetDeviceQueue(m_device, m_present_queue_family_index, 0, &m_present_queue); vkGetDeviceQueue(m_device, m_graphics_queue_family_index, 0, &m_graphics_queue); + vkGetDeviceQueue(m_device, m_compute_queue_family_index, 0, &m_compute_queue); + + // TODO: Combine names: "Graphics and Compute Queue" if they are the same! + + // Set an internal debug name to the queues using Vulkan debug utils (VK_EXT_debug_utils) + set_debug_name(m_graphics_queue, "Graphics Queue"); + set_debug_name(m_present_queue, "Present Queue"); + set_debug_name(m_compute_queue, "Compute Queue"); // The use of data transfer queues can be forbidden by using -no_separate_data_queue. if (use_distinct_data_transfer_queue) { // Use a separate queue for data transfer to GPU. vkGetDeviceQueue(m_device, m_transfer_queue_family_index, 0, &m_transfer_queue); + set_debug_name(m_transfer_queue, "Transfer Queue"); } spdlog::trace("Creating VMA allocator"); - VmaVulkanFunctions vma_vulkan_functions{ + const VmaVulkanFunctions vma_vulkan_functions{ .vkGetInstanceProcAddr = vkGetInstanceProcAddr, .vkGetDeviceProcAddr = vkGetDeviceProcAddr, }; @@ -382,25 +420,43 @@ Device::Device(const Instance &inst, const VkSurfaceKHR surface, const bool pref .device = m_device, .pVulkanFunctions = &vma_vulkan_functions, .instance = inst.instance(), - // Just tell Vulkan Memory Allocator to use Vulkan 1.1, even if a newer version is specified in instance wrapper - // This might need to be changed in the future - .vulkanApiVersion = VK_API_VERSION_1_1, + .vulkanApiVersion = VK_API_VERSION_1_3, }; - spdlog::trace("Creating Vulkan memory allocator instance"); + spdlog::trace("Creating Vulkan Memory Allocator (VMA) instance"); if (const auto result = vmaCreateAllocator(&vma_allocator_ci, &m_allocator); result != VK_SUCCESS) { throw VulkanException("Error: vmaCreateAllocator failed!", result); } -} -Device::Device(Device &&other) noexcept { - m_device = std::exchange(other.m_device, nullptr); - m_physical_device = std::exchange(other.m_physical_device, nullptr); + // TODO: Separte constructor setup into smaller methods again! + + // Store the properties of this physical device + vkGetPhysicalDeviceProperties(m_physical_device, &m_properties); + + auto determine_max_usable_sample_count = [&]() { + const auto sample_count = + m_properties.limits.framebufferColorSampleCounts & m_properties.limits.framebufferDepthSampleCounts; + + const VkSampleCountFlagBits sample_count_flag_bits[] = { + VK_SAMPLE_COUNT_64_BIT, VK_SAMPLE_COUNT_32_BIT, VK_SAMPLE_COUNT_16_BIT, + VK_SAMPLE_COUNT_8_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_2_BIT, + }; + + for (const auto &sample_count_flag_bit : sample_count_flag_bits) { + if (sample_count & sample_count_flag_bit) { + return sample_count_flag_bit; + } + } + return VK_SAMPLE_COUNT_1_BIT; + }; + + m_max_available_sample_count = determine_max_usable_sample_count(); } Device::~Device() { std::scoped_lock locker(m_mutex); + wait_idle(); // Because the device handle must be valid for the destruction of the command pools in the CommandPool destructor, // we must destroy the command pools manually here in order to ensure the right order of destruction @@ -422,37 +478,29 @@ bool Device::is_presentation_supported(const VkSurfaceKHR surface, const std::ui return supported == VK_TRUE; } -VkSurfaceCapabilitiesKHR Device::get_surface_capabilities(const VkSurfaceKHR surface) const { - VkSurfaceCapabilitiesKHR caps{}; - if (const auto result = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(m_physical_device, surface, &caps); - result != VK_SUCCESS) { - throw VulkanException("Error: vkGetPhysicalDeviceSurfaceCapabilitiesKHR failed!", result); - } - return caps; -} - -bool Device::format_supports_feature(const VkFormat format, const VkFormatFeatureFlagBits feature) const { - VkFormatProperties properties{}; - vkGetPhysicalDeviceFormatProperties(m_physical_device, format, &properties); - return (properties.optimalTilingFeatures & feature) != 0u; -} - -bool Device::surface_supports_usage(const VkSurfaceKHR surface, const VkImageUsageFlagBits usage) const { - const auto capabilities = get_surface_capabilities(surface); - return (capabilities.supportedUsageFlags & usage) != 0u; -} - void Device::execute(const std::string &name, - const std::function &cmd_lambda) const { - // TODO: Support other queues (not just graphics) - const auto &cmd_buf = thread_graphics_pool().request_command_buffer(name); - cmd_lambda(cmd_buf); - cmd_buf.submit_and_wait(); + const VkQueueFlagBits queue_type, + const DebugLabelColor dbg_label_color, + const std::function &cmd_buf_recording_func, + const std::span wait_semaphores, + const std::span signal_semaphores) const { + // Request the thread_local command pool for this queue type + const auto &cmd_pool = thread_local_command_pool(queue_type); + // Start recording the command buffer + const auto &cmd_buf = cmd_pool.request_command_buffer(name); + // Begin a debug label region (visible in graphics debuggers like RenderDoc) + cmd_buf.begin_debug_label_region(name, get_debug_label_color(dbg_label_color)); + // Call the external code + std::invoke(cmd_buf_recording_func, cmd_buf); + // End the debug label region + cmd_buf.end_debug_label_region(); + // Submit the command buffer and do necessary synchronization + cmd_buf.submit_and_wait(queue_type, wait_semaphores, signal_semaphores); } std::optional Device::find_queue_family_index_if( const std::function &criteria_lambda) { - for (std::uint32_t index = 0; const auto queue_family : vk_tools::get_queue_family_properties(m_physical_device)) { + for (std::uint32_t index = 0; const auto &queue_family : vk_tools::get_queue_family_properties(m_physical_device)) { if (criteria_lambda(index, queue_family)) { return index; } @@ -461,249 +509,68 @@ std::optional Device::find_queue_family_index_if( return std::nullopt; } -void Device::set_debug_marker_name(void *object, VkDebugReportObjectTypeEXT object_type, - const std::string &name) const { -#ifndef NDEBUG - if (m_vk_debug_marker_set_object_name == nullptr) { - return; - } - - assert(object); - assert(!name.empty()); - assert(m_vk_debug_marker_set_object_name); +const CommandPool &Device::thread_local_command_pool(const VkQueueFlagBits queue_type) const { + // NOTE: thread_local means implicitly static! + thread_local CommandPool *cmd_pool_graphics = nullptr; // NOLINT + thread_local CommandPool *cmd_pool_transfer = nullptr; // NOLINT + thread_local CommandPool *cmd_pool_compute = nullptr; // NOLINT - const auto name_info = make_info({ - .objectType = object_type, - .object = reinterpret_cast(object), // NOLINT - .pObjectName = name.c_str(), - }); - - if (const auto result = m_vk_debug_marker_set_object_name(m_device, &name_info); result != VK_SUCCESS) { - throw VulkanException("Failed to assign Vulkan debug marker name " + name + "!", result); - } -#endif -} - -void Device::set_memory_block_attachment(void *object, VkDebugReportObjectTypeEXT object_type, const std::uint64_t name, - const std::size_t memory_size, const void *memory_block) const { -#ifndef NDEBUG - if (m_vk_debug_marker_set_object_tag == nullptr) { - return; - } - - assert(name); - assert(memory_size > 0); - assert(memory_block); - assert(m_vk_debug_marker_set_object_tag); - - const auto tag_info = make_info({ - .objectType = object_type, - .object = reinterpret_cast(object), // NOLINT - .tagName = name, - .tagSize = memory_size, - .pTag = memory_block, - }); - - if (const auto result = m_vk_debug_marker_set_object_tag(m_device, &tag_info); result != VK_SUCCESS) { - throw VulkanException("Failed to assign Vulkan debug marker memory block!", result); - } -#endif -} - -void Device::bind_debug_region(const VkCommandBuffer command_buffer, const std::string &name, - const std::array color) const { -#ifndef NDEBUG - if (m_vk_cmd_debug_marker_begin == nullptr) { - return; - } - - assert(command_buffer); - assert(!name.empty()); - assert(m_vk_cmd_debug_marker_begin); - - auto debug_marker = make_info(); - - std::copy(color.begin(), color.end(), debug_marker.color); - - debug_marker.pMarkerName = name.c_str(); - - m_vk_cmd_debug_marker_begin(command_buffer, &debug_marker); -#endif -} - -void Device::insert_debug_marker(const VkCommandBuffer command_buffer, const std::string &name, - const std::array color) const { -#ifndef NDEBUG - if (m_vk_cmd_debug_marker_insert == nullptr) { - return; - } - - assert(command_buffer); - assert(!name.empty()); - assert(m_vk_cmd_debug_marker_insert); - - auto debug_marker = make_info(); - - std::copy(color.begin(), color.end(), debug_marker.color); - - debug_marker.pMarkerName = name.c_str(); - - m_vk_cmd_debug_marker_insert(command_buffer, &debug_marker); -#endif -} - -void Device::end_debug_region(const VkCommandBuffer command_buffer) const { -#ifndef NDEBUG - if (m_vk_cmd_debug_marker_end == nullptr) { - return; - } - - assert(m_vk_cmd_debug_marker_end); - m_vk_cmd_debug_marker_end(command_buffer); -#endif -} - -void Device::create_command_pool(const VkCommandPoolCreateInfo &command_pool_ci, VkCommandPool *command_pool, - const std::string &name) const { - if (const auto result = vkCreateCommandPool(m_device, &command_pool_ci, nullptr, command_pool); - result != VK_SUCCESS) { - throw VulkanException("Error: vkCreateCommandPool failed for command pool " + name + "!", result); - } - - set_debug_marker_name(&command_pool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, name); -} - -void Device::create_descriptor_pool(const VkDescriptorPoolCreateInfo &descriptor_pool_ci, - VkDescriptorPool *descriptor_pool, const std::string &name) const { - if (const auto result = vkCreateDescriptorPool(m_device, &descriptor_pool_ci, nullptr, descriptor_pool); - result != VK_SUCCESS) { - throw VulkanException("Error: vkCreateDescriptorPool failed for descriptor pool " + name + " !", result); - } - - set_debug_marker_name(&descriptor_pool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, name); -} - -void Device::create_descriptor_set_layout(const VkDescriptorSetLayoutCreateInfo &descriptor_set_layout_ci, - VkDescriptorSetLayout *descriptor_set_layout, const std::string &name) const { - if (const auto result = - vkCreateDescriptorSetLayout(m_device, &descriptor_set_layout_ci, nullptr, descriptor_set_layout); - result != VK_SUCCESS) { - throw VulkanException("Error: vkCreateDescriptorSetLayout failed for descriptor " + name + " !", result); - } - - set_debug_marker_name(&descriptor_set_layout, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, name); -} - -void Device::create_fence(const VkFenceCreateInfo &fence_ci, VkFence *fence, const std::string &name) const { - if (const auto result = vkCreateFence(m_device, &fence_ci, nullptr, fence); result != VK_SUCCESS) { - throw VulkanException("Error: vkCreateFence failed for fence " + name + "!", result); - } - - set_debug_marker_name(&fence, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, name); -} - -void Device::create_framebuffer(const VkFramebufferCreateInfo &framebuffer_ci, VkFramebuffer *framebuffer, - const std::string &name) const { - if (const auto result = vkCreateFramebuffer(m_device, &framebuffer_ci, nullptr, framebuffer); - result != VK_SUCCESS) { - throw VulkanException("Error: vkCreateFramebuffer failed for framebuffer " + name + "!", result); - } - - set_debug_marker_name(&framebuffer, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT, name); -} - -void Device::create_graphics_pipeline(const VkGraphicsPipelineCreateInfo &pipeline_ci, VkPipeline *pipeline, - const std::string &name) const { - if (const auto result = vkCreateGraphicsPipelines(m_device, nullptr, 1, &pipeline_ci, nullptr, pipeline); - result != VK_SUCCESS) { - throw VulkanException("Error: vkCreateGraphicsPipelines failed for pipeline " + name + " !", result); - } - - set_debug_marker_name(&pipeline, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, name); -} - -void Device::create_image_view(const VkImageViewCreateInfo &image_view_ci, VkImageView *image_view, - const std::string &name) const { - if (const auto result = vkCreateImageView(m_device, &image_view_ci, nullptr, image_view); result != VK_SUCCESS) { - throw VulkanException("Error: vkCreateImageView failed for image view " + name + "!", result); - } - - set_debug_marker_name(&image_view, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, name); -} - -void Device::create_pipeline_layout(const VkPipelineLayoutCreateInfo &pipeline_layout_ci, - VkPipelineLayout *pipeline_layout, const std::string &name) const { - if (const auto result = vkCreatePipelineLayout(m_device, &pipeline_layout_ci, nullptr, pipeline_layout); - result != VK_SUCCESS) { - throw VulkanException("Error: vkCreatePipelineLayout failed for pipeline layout " + name + "!", result); - } - - set_debug_marker_name(&pipeline_layout, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, name); -} - -void Device::create_render_pass(const VkRenderPassCreateInfo &render_pass_ci, VkRenderPass *render_pass, - const std::string &name) const { - if (const auto result = vkCreateRenderPass(m_device, &render_pass_ci, nullptr, render_pass); result != VK_SUCCESS) { - throw VulkanException("Error: vkCreateRenderPass failed for renderpass " + name + " !", result); - } - - set_debug_marker_name(&render_pass, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, name); -} - -void Device::create_sampler(const VkSamplerCreateInfo &sampler_ci, VkSampler *sampler, const std::string &name) const { - if (const auto result = vkCreateSampler(m_device, &sampler_ci, nullptr, sampler); result != VK_SUCCESS) { - throw VulkanException("Error: vkCreateSampler failed for sampler " + name + " !", result); + switch (queue_type) { + case VK_QUEUE_COMPUTE_BIT: { + if (cmd_pool_compute == nullptr) { + auto new_cmd_pool = std::make_unique(*this, VK_QUEUE_COMPUTE_BIT, "Compute"); + std::scoped_lock locker(m_mutex); + cmd_pool_compute = m_cmd_pools.emplace_back(std::move(new_cmd_pool)).get(); + } + return *cmd_pool_compute; } - - set_debug_marker_name(&sampler, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, name); -} - -void Device::create_semaphore(const VkSemaphoreCreateInfo &semaphore_ci, VkSemaphore *semaphore, - const std::string &name) const { - if (const auto result = vkCreateSemaphore(m_device, &semaphore_ci, nullptr, semaphore); result != VK_SUCCESS) { - throw VulkanException("Error: vkCreateSemaphore failed for " + name + " !", result); + case VK_QUEUE_TRANSFER_BIT: { + if (cmd_pool_transfer == nullptr) { + auto new_cmd_pool = std::make_unique(*this, VK_QUEUE_TRANSFER_BIT, "Transfer"); + std::scoped_lock locker(m_mutex); + cmd_pool_transfer = m_cmd_pools.emplace_back(std::move(new_cmd_pool)).get(); + } + return *cmd_pool_transfer; + } + default: { + // VK_QUEUE_GRAPHICS_BIT and others + if (cmd_pool_graphics == nullptr) { + auto new_cmd_pool = std::make_unique(*this, VK_QUEUE_GRAPHICS_BIT, "Graphics"); + std::scoped_lock locker(m_mutex); + cmd_pool_graphics = m_cmd_pools.emplace_back(std::move(new_cmd_pool)).get(); + } + return *cmd_pool_graphics; } - - set_debug_marker_name(&semaphore, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, name); -} - -void Device::create_shader_module(const VkShaderModuleCreateInfo &shader_module_ci, VkShaderModule *shader_module, - const std::string &name) const { - if (const auto result = vkCreateShaderModule(m_device, &shader_module_ci, nullptr, shader_module); - result != VK_SUCCESS) { - throw VulkanException("Error: vkCreateShaderModule failed for shader module " + name + "!", result); } - - set_debug_marker_name(&shader_module, VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, name); } -void Device::create_swapchain(const VkSwapchainCreateInfoKHR &swapchain_ci, VkSwapchainKHR *swapchain, - const std::string &name) const { - if (const auto result = vkCreateSwapchainKHR(m_device, &swapchain_ci, nullptr, swapchain); result != VK_SUCCESS) { - throw VulkanException("Error: vkCreateSwapchainKHR failed for swapchain " + name + "!", result); +void Device::set_debug_utils_object_name(const VkObjectType obj_type, + const std::uint64_t obj_handle, + const std::string &name) const { + if (!obj_handle) { + throw std::runtime_error( + "[Device::set_debug_utils_object_name] Error: Parameter 'obj_handle' is an invalid pointer!"); } + const auto dbg_obj_name = wrapper::make_info({ + .objectType = obj_type, + .objectHandle = obj_handle, + .pObjectName = name.c_str(), + }); - set_debug_marker_name(&swapchain, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, name); -} - -CommandPool &Device::thread_graphics_pool() const { - // Note that thread_graphics_pool is implicitely static! - thread_local CommandPool *thread_graphics_pool = nullptr; // NOLINT - if (thread_graphics_pool == nullptr) { - auto cmd_pool = std::make_unique(*this, "graphics pool"); - std::scoped_lock locker(m_mutex); - thread_graphics_pool = m_cmd_pools.emplace_back(std::move(cmd_pool)).get(); + if (const auto result = vkSetDebugUtilsObjectNameEXT(m_device, &dbg_obj_name); result != VK_SUCCESS) { + throw VulkanException("Error: Failed to assign debug name using debug utils", result); } - return *thread_graphics_pool; } -const CommandBuffer &Device::request_command_buffer(const std::string &name) { - return thread_graphics_pool().request_command_buffer(name); -} - -void Device::wait_idle() const { - if (const auto result = vkDeviceWaitIdle(m_device); result != VK_SUCCESS) { - throw VulkanException("Error: vkDeviceWaitIdle failed!", result); +void Device::wait_idle(const VkQueue queue) const { + if (queue == VK_NULL_HANDLE) { + if (const auto result = vkDeviceWaitIdle(m_device); result != VK_SUCCESS) { + throw VulkanException("Error: vkDeviceWaitIdle failed!", result); + } + } else { + if (const auto result = vkQueueWaitIdle(queue); result != VK_SUCCESS) { + throw VulkanException("Error: vkQueueWaitIdle failed!", result); + } } } diff --git a/src/vulkan-renderer/wrapper/framebuffer.cpp b/src/vulkan-renderer/wrapper/framebuffer.cpp deleted file mode 100644 index 0182fef80..000000000 --- a/src/vulkan-renderer/wrapper/framebuffer.cpp +++ /dev/null @@ -1,35 +0,0 @@ -#include "inexor/vulkan-renderer/wrapper/framebuffer.hpp" - -#include "inexor/vulkan-renderer/wrapper/device.hpp" -#include "inexor/vulkan-renderer/wrapper/make_info.hpp" -#include "inexor/vulkan-renderer/wrapper/swapchain.hpp" - -#include -#include - -namespace inexor::vulkan_renderer::wrapper { - -Framebuffer::Framebuffer(const Device &device, VkRenderPass render_pass, const std::vector &attachments, - const Swapchain &swapchain, std::string name) - : m_device(device), m_name(std::move(name)) { - m_device.create_framebuffer(make_info({ - .renderPass = render_pass, - .attachmentCount = static_cast(attachments.size()), - .pAttachments = attachments.data(), - .width = swapchain.extent().width, - .height = swapchain.extent().height, - .layers = 1, - }), - &m_framebuffer, m_name); -} - -Framebuffer::Framebuffer(Framebuffer &&other) noexcept : m_device(other.m_device) { - m_framebuffer = std::exchange(other.m_framebuffer, nullptr); - m_name = std::move(other.m_name); -} - -Framebuffer::~Framebuffer() { - vkDestroyFramebuffer(m_device.device(), m_framebuffer, nullptr); -} - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/src/vulkan-renderer/wrapper/gpu_memory_buffer.cpp b/src/vulkan-renderer/wrapper/gpu_memory_buffer.cpp deleted file mode 100644 index dcd060d87..000000000 --- a/src/vulkan-renderer/wrapper/gpu_memory_buffer.cpp +++ /dev/null @@ -1,76 +0,0 @@ -#include "inexor/vulkan-renderer/wrapper/gpu_memory_buffer.hpp" - -#include "inexor/vulkan-renderer/exception.hpp" -#include "inexor/vulkan-renderer/wrapper/device.hpp" -#include "inexor/vulkan-renderer/wrapper/make_info.hpp" - -#include - -#include -#include - -namespace inexor::vulkan_renderer::wrapper { - -GPUMemoryBuffer::GPUMemoryBuffer(const Device &device, const std::string &name, const VkDeviceSize &size, - const VkBufferUsageFlags &buffer_usage, const VmaMemoryUsage &memory_usage) - : m_device(device), m_name(name), m_buffer_size(size) { - assert(device.device()); - assert(device.allocator()); - assert(!name.empty()); - - spdlog::trace("Creating GPU memory buffer of size {} for {}", size, name); - - const auto buffer_ci = make_info({ - .size = size, - .usage = buffer_usage, - .sharingMode = VK_SHARING_MODE_EXCLUSIVE, - }); - - const VmaAllocationCreateInfo m_allocation_ci{ - .flags = VMA_ALLOCATION_CREATE_MAPPED_BIT, - .usage = memory_usage, - }; - - // TODO: Should we create this buffer as mapped? - // TODO: Is it good to have memory mapped all the time? - // TODO: When should memory be mapped / unmapped? - - if (const auto result = vmaCreateBuffer(m_device.allocator(), &buffer_ci, &m_allocation_ci, &m_buffer, - &m_allocation, &m_allocation_info); - result != VK_SUCCESS) { - throw VulkanException("Error: GPU memory buffer allocation for " + name + " failed!", result); - } - - // Assign an internal debug marker name to this buffer. - m_device.set_debug_marker_name(m_buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, name); - - vmaSetAllocationName(m_device.allocator(), m_allocation, m_name.c_str()); -} - -GPUMemoryBuffer::GPUMemoryBuffer(const Device &device, const std::string &name, const VkDeviceSize &buffer_size, - const void *data, const std::size_t data_size, const VkBufferUsageFlags &buffer_usage, - const VmaMemoryUsage &memory_usage) - : GPUMemoryBuffer(device, name, buffer_size, buffer_usage, memory_usage) { - assert(device.device()); - assert(device.allocator()); - assert(!name.empty()); - assert(buffer_size > 0); - assert(data_size > 0); - assert(data); - - // Copy the memory into the buffer! - std::memcpy(m_allocation_info.pMappedData, data, data_size); -} - -GPUMemoryBuffer::GPUMemoryBuffer(GPUMemoryBuffer &&other) noexcept : m_device(other.m_device) { - m_name = std::move(other.m_name); - m_buffer = std::exchange(other.m_buffer, nullptr); - m_allocation = std::exchange(other.m_allocation, nullptr); - m_allocation_info = other.m_allocation_info; -} - -GPUMemoryBuffer::~GPUMemoryBuffer() { - vmaDestroyBuffer(m_device.allocator(), m_buffer, m_allocation); -} - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/src/vulkan-renderer/wrapper/gpu_texture.cpp b/src/vulkan-renderer/wrapper/gpu_texture.cpp deleted file mode 100644 index 7dcd80483..000000000 --- a/src/vulkan-renderer/wrapper/gpu_texture.cpp +++ /dev/null @@ -1,102 +0,0 @@ -#include "inexor/vulkan-renderer/wrapper/gpu_texture.hpp" - -#include "inexor/vulkan-renderer/wrapper/cpu_texture.hpp" -#include "inexor/vulkan-renderer/wrapper/make_info.hpp" - -#include - -#include - -namespace inexor::vulkan_renderer::wrapper { - -GpuTexture::GpuTexture(const Device &device, const CpuTexture &cpu_texture) - : m_device(device), m_texture_width(cpu_texture.width()), m_texture_height(cpu_texture.height()), - m_texture_channels(cpu_texture.channels()), m_mip_levels(cpu_texture.mip_levels()), m_name(cpu_texture.name()) { - create_texture(cpu_texture.data(), cpu_texture.data_size()); -} - -GpuTexture::GpuTexture(const Device &device, void *data, const std::size_t data_size, const int texture_width, - const int texture_height, const int texture_channels, const int mip_levels, std::string name) - : m_device(device), m_texture_width(texture_width), m_texture_height(texture_height), - m_texture_channels(texture_channels), m_mip_levels(mip_levels), m_name(std::move(name)) { - create_texture(data, data_size); -} - -GpuTexture::GpuTexture(GpuTexture &&other) noexcept - : m_device(other.m_device), m_texture_image_format(other.m_texture_image_format) { - m_texture_image = std::exchange(other.m_texture_image, nullptr); - m_name = std::move(other.m_name); - m_texture_width = other.m_texture_width; - m_texture_height = other.m_texture_height; - m_texture_channels = other.m_texture_channels; - m_mip_levels = other.m_mip_levels; - m_sampler = std::exchange(other.m_sampler, nullptr); -} - -GpuTexture::~GpuTexture() { - vkDestroySampler(m_device.device(), m_sampler, nullptr); -} - -void GpuTexture::create_texture(void *texture_data, const std::size_t texture_size) { - const VkExtent2D extent{ - // Because stb_image stored the texture's width and height as a normal int, we need a cast here - .width = static_cast(m_texture_width), - .height = static_cast(m_texture_height), - }; - - m_texture_image = std::make_unique(m_device, m_texture_image_format, - VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, - VK_IMAGE_ASPECT_COLOR_BIT, VK_SAMPLE_COUNT_1_BIT, m_name, extent); - - const VkBufferImageCopy copy_region{ - .bufferOffset = 0, - .bufferRowLength = 0, - .bufferImageHeight = 0, - .imageSubresource{.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .mipLevel = 0, .baseArrayLayer = 0, .layerCount = 1}, - .imageOffset = {0, 0, 0}, - // Because stb_image stored the texture's width and height as a normal int, we need a cast here - .imageExtent = {static_cast(m_texture_width), static_cast(m_texture_height), 1}, - }; - - m_device.execute(m_name, [&](const CommandBuffer &cmd_buf) { - cmd_buf - .change_image_layout(m_texture_image->get(), VK_IMAGE_LAYOUT_UNDEFINED, - VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) - .copy_buffer_to_image(texture_data, static_cast(texture_size), m_texture_image->get(), - copy_region, m_name) - .change_image_layout(m_texture_image->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, - VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); - }); - - create_texture_sampler(); -} - -void GpuTexture::create_texture_sampler() { - VkPhysicalDeviceFeatures device_features; - vkGetPhysicalDeviceFeatures(m_device.physical_device(), &device_features); - - VkPhysicalDeviceProperties graphics_card_properties; - vkGetPhysicalDeviceProperties(m_device.physical_device(), &graphics_card_properties); - - const auto sampler_ci = make_info({ - .magFilter = VK_FILTER_LINEAR, - .minFilter = VK_FILTER_LINEAR, - .mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR, - .addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT, - .addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT, - .addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT, - .mipLodBias = 0.0f, - .anisotropyEnable = VK_FALSE, - .maxAnisotropy = 1.0f, - .compareEnable = VK_FALSE, - .compareOp = VK_COMPARE_OP_ALWAYS, - .minLod = 0.0f, - .maxLod = 0.0f, - .borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK, - .unnormalizedCoordinates = VK_FALSE, - }); - - m_device.create_sampler(sampler_ci, &m_sampler, m_name); -} - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/src/vulkan-renderer/wrapper/image.cpp b/src/vulkan-renderer/wrapper/image.cpp deleted file mode 100644 index 77b3b1972..000000000 --- a/src/vulkan-renderer/wrapper/image.cpp +++ /dev/null @@ -1,85 +0,0 @@ -#include "inexor/vulkan-renderer/wrapper/image.hpp" - -#include "inexor/vulkan-renderer/exception.hpp" -#include "inexor/vulkan-renderer/wrapper/device.hpp" -#include "inexor/vulkan-renderer/wrapper/make_info.hpp" - -#include -#include - -namespace inexor::vulkan_renderer::wrapper { - -Image::Image(const Device &device, const VkFormat format, const VkImageUsageFlags image_usage, - const VkImageAspectFlags aspect_flags, const VkSampleCountFlagBits sample_count, const std::string &name, - const VkExtent2D image_extent) - : m_device(device), m_format(format), m_name(name) { - assert(device.device()); - assert(device.physical_device()); - assert(device.allocator()); - assert(image_extent.width > 0); - assert(image_extent.height > 0); - assert(!name.empty()); - - const auto image_ci = make_info({ - .imageType = VK_IMAGE_TYPE_2D, - .format = format, - .extent{ - .width = image_extent.width, - .height = image_extent.height, - .depth = 1, - }, - .mipLevels = 1, - .arrayLayers = 1, - .samples = sample_count, - .tiling = VK_IMAGE_TILING_OPTIMAL, - .usage = image_usage, - .sharingMode = VK_SHARING_MODE_EXCLUSIVE, - .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, - }); - - const VmaAllocationCreateInfo vma_allocation_ci{ - .flags = VMA_ALLOCATION_CREATE_MAPPED_BIT, - .usage = VMA_MEMORY_USAGE_GPU_ONLY, - }; - - if (const auto result = vmaCreateImage(m_device.allocator(), &image_ci, &vma_allocation_ci, &m_image, &m_allocation, - &m_allocation_info); - result != VK_SUCCESS) { - throw VulkanException("Error: vmaCreateImage failed for image " + m_name + "!", result); - } - - vmaSetAllocationName(m_device.allocator(), m_allocation, m_name.c_str()); - - // Assign an internal name using Vulkan debug markers. - m_device.set_debug_marker_name(m_image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, m_name); - - m_device.create_image_view(make_info({ - .image = m_image, - .viewType = VK_IMAGE_VIEW_TYPE_2D, - .format = format, - .subresourceRange{ - .aspectMask = aspect_flags, - .baseMipLevel = 0, - .levelCount = 1, - .baseArrayLayer = 0, - .layerCount = 1, - }, - }), - &m_image_view, m_name); -} - -Image::Image(Image &&other) noexcept : m_device(other.m_device) { - m_allocation = other.m_allocation; - m_allocation_info = other.m_allocation_info; - m_image = other.m_image; - m_format = other.m_format; - m_image_view = other.m_image_view; - m_name = std::move(other.m_name); -} - -Image::~Image() { - vkDestroyImageView(m_device.device(), m_image_view, nullptr); - vmaDestroyImage(m_device.allocator(), m_image, m_allocation); -} - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/src/vulkan-renderer/wrapper/instance.cpp b/src/vulkan-renderer/wrapper/instance.cpp index 3b48e4bef..87bc0df50 100644 --- a/src/vulkan-renderer/wrapper/instance.cpp +++ b/src/vulkan-renderer/wrapper/instance.cpp @@ -69,13 +69,22 @@ bool Instance::is_extension_supported(const std::string &extension_name) { }) != instance_extensions.end(); } -Instance::Instance(const std::string &application_name, const std::string &engine_name, - const std::uint32_t application_version, const std::uint32_t engine_version, - bool enable_validation_layers, bool enable_renderdoc_layer, +Instance::Instance(const std::string &application_name, + const std::string &engine_name, + const std::uint32_t application_version, + const std::uint32_t engine_version, + const PFN_vkDebugUtilsMessengerCallbackEXT debug_callback, const std::vector &requested_instance_extensions, const std::vector &requested_instance_layers) { - assert(!application_name.empty()); - assert(!engine_name.empty()); + if (application_name.empty()) { + throw std::invalid_argument("Error: Application name is empty!"); + } + if (engine_name.empty()) { + throw std::invalid_argument("Error: Engine name is empty!"); + } + if (debug_callback == nullptr) { + throw std::invalid_argument("Error: Invalid debug utils messenger callback!"); + } spdlog::trace("Initializing Vulkan metaloader"); if (const auto result = volkInitialize(); result != VK_SUCCESS) { @@ -125,11 +134,8 @@ Instance::Instance(const std::string &application_name, const std::string &engin std::vector instance_extension_wishlist = { #ifndef NDEBUG - // In debug mode, we use the following instance extensions: - // This one is for assigning internal names to Vulkan resources. + // VK_EXT_debug_utils VK_EXT_DEBUG_UTILS_EXTENSION_NAME, - // This one is for setting up a Vulkan debug report callback function. - VK_EXT_DEBUG_REPORT_EXTENSION_NAME, #endif }; @@ -159,9 +165,9 @@ Instance::Instance(const std::string &application_name, const std::string &engin std::vector enabled_instance_extensions{}; - spdlog::trace("List of enabled instance extensions:"); + spdlog::trace("Enabled instance extensions:"); - // We are not checking for duplicated entries but this is no problem. + // We are not checking for duplicated entries but this is not a problem for (const auto &instance_extension : instance_extension_wishlist) { if (is_extension_supported(instance_extension)) { spdlog::trace(" - {} ", instance_extension); @@ -171,31 +177,9 @@ Instance::Instance(const std::string &application_name, const std::string &engin } } - std::vector instance_layers_wishlist{}; - - spdlog::trace("Instance layer wishlist:"); - -#ifndef NDEBUG - // RenderDoc is a very useful open source graphics debugger for Vulkan and other APIs. - // Not using it all the time during development is fine, but as soon as something crashes - // you should enable it, take a snapshot and look up what's wrong. - if (enable_renderdoc_layer) { - spdlog::trace(" - VK_LAYER_RENDERDOC_Capture"); - instance_layers_wishlist.push_back("VK_LAYER_RENDERDOC_Capture"); - } - - // We can't stress enough how important it is to use validation layers during development! - // Validation layers in Vulkan are in-depth error checks for the application's use of the API. - // They check for a multitude of possible errors. They can be disabled easily for releases. - // Understand that in contrary to other APIs, in Vulkan API the driver provides no error checks - // for you! If you use Vulkan API incorrectly, your application will likely just crash. - // To avoid this, you must use validation layers during development! - if (enable_validation_layers) { - spdlog::trace(" - VK_LAYER_KHRONOS_validation"); - instance_layers_wishlist.push_back("VK_LAYER_KHRONOS_validation"); - } - -#endif + std::vector instance_layers_wishlist{ + "VK_LAYER_KHRONOS_validation", + }; // Add requested instance layers to wishlist. for (const auto &instance_layer : requested_instance_layers) { @@ -204,7 +188,7 @@ Instance::Instance(const std::string &application_name, const std::string &engin std::vector enabled_instance_layers{}; - spdlog::trace("List of enabled instance layers:"); + spdlog::trace("Enabled instance layers:"); // We have to check which instance layers of our wishlist are available on the current system! // We are not checking for duplicated entries but this is no problem. @@ -213,13 +197,7 @@ Instance::Instance(const std::string &application_name, const std::string &engin spdlog::trace(" - {}", current_layer); enabled_instance_layers.push_back(current_layer); } else { -#ifdef NDEBUG - if (std::string(current_layer) == VK_EXT_DEBUG_MARKER_EXTENSION_NAME) { - spdlog::error("You can't use command line argument -renderdoc in release mode"); - } -#else - spdlog::trace("Requested instance layer {} is not available on this system!", current_layer); -#endif + spdlog::error("Requested instance layer {} is not available on this system!", current_layer); } } @@ -231,24 +209,47 @@ Instance::Instance(const std::string &application_name, const std::string &engin .ppEnabledExtensionNames = enabled_instance_extensions.data(), }); + // Note that an internal debug name will be assigned to the instance inside of the device wrapper if (const auto result = vkCreateInstance(&instance_ci, nullptr, &m_instance); result != VK_SUCCESS) { throw VulkanException("Error: vkCreateInstance failed!", result); } volkLoadInstanceOnly(m_instance); -} -Instance::Instance(const std::string &application_name, const std::string &engine_name, - const std::uint32_t application_version, const std::uint32_t engine_version, - bool enable_validation_layers, bool enable_renderdoc_layer) - : Instance(application_name, engine_name, application_version, engine_version, enable_validation_layers, - enable_renderdoc_layer, {}, {}) {} + // Note that we can only call is_extension_supported afer volkLoadInstanceOnly! + if (!is_extension_supported(VK_EXT_DEBUG_UTILS_EXTENSION_NAME)) { + // Don't forget to destroy the instance before throwing the exception! + vkDestroyInstance(m_instance, nullptr); + throw std::runtime_error("Error: VK_EXT_DEBUG_UTILS_EXTENSION_NAME is not supported!"); + } + + const auto dbg_messenger_ci = make_info({ + .messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT, + .messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT, + .pfnUserCallback = debug_callback, + .pUserData = nullptr, + }); + + if (const auto result = vkCreateDebugUtilsMessengerEXT(m_instance, &dbg_messenger_ci, nullptr, &m_debug_callback); + result != VK_SUCCESS) { + // Don't forget to destroy the instance before throwing the exception! + vkDestroyInstance(m_instance, nullptr); + throw VulkanException( + "Error: Could not create Vulkan validation layer debug callback! (vkCreateDebugUtilsMessengerEXT failed!)", + result); + } +} Instance::Instance(Instance &&other) noexcept { m_instance = std::exchange(other.m_instance, nullptr); + m_debug_callback = std::exchange(other.m_debug_callback, nullptr); } Instance::~Instance() { + vkDestroyDebugUtilsMessengerEXT(m_instance, m_debug_callback, nullptr); vkDestroyInstance(m_instance, nullptr); } diff --git a/src/vulkan-renderer/wrapper/make_info.cpp b/src/vulkan-renderer/wrapper/make_info.cpp index 4e840def8..8d3119f0b 100644 --- a/src/vulkan-renderer/wrapper/make_info.cpp +++ b/src/vulkan-renderer/wrapper/make_info.cpp @@ -1,6 +1,6 @@ #include "inexor/vulkan-renderer/wrapper/make_info.hpp" -#include +#include namespace inexor::vulkan_renderer::wrapper { @@ -16,6 +16,12 @@ VkBufferCreateInfo make_info(VkBufferCreateInfo info) { return info; } +template <> +VkBufferMemoryBarrier make_info(VkBufferMemoryBarrier info) { + info.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; + return info; +} + template <> VkCommandBufferAllocateInfo make_info(VkCommandBufferAllocateInfo info) { info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; @@ -53,8 +59,20 @@ VkDebugMarkerObjectTagInfoEXT make_info(VkDebugMarkerObjectTagInfoEXT info) { } template <> -VkDebugReportCallbackCreateInfoEXT make_info(VkDebugReportCallbackCreateInfoEXT info) { - info.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT; +VkDebugUtilsLabelEXT make_info(VkDebugUtilsLabelEXT info) { + info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT; + return info; +} + +template <> +VkDebugUtilsMessengerCreateInfoEXT make_info(VkDebugUtilsMessengerCreateInfoEXT info) { + info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT; + return info; +} + +template <> +VkDebugUtilsObjectNameInfoEXT make_info(VkDebugUtilsObjectNameInfoEXT info) { + info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT; return info; } @@ -94,12 +112,6 @@ VkFenceCreateInfo make_info(VkFenceCreateInfo info) { return info; } -template <> -VkFramebufferCreateInfo make_info(VkFramebufferCreateInfo info) { - info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; - return info; -} - template <> VkGraphicsPipelineCreateInfo make_info(VkGraphicsPipelineCreateInfo info) { info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; @@ -136,6 +148,12 @@ VkMemoryBarrier make_info(VkMemoryBarrier info) { return info; } +template <> +VkPhysicalDeviceDynamicRenderingFeatures make_info(VkPhysicalDeviceDynamicRenderingFeatures info) { + info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DYNAMIC_RENDERING_FEATURES; + return info; +} + template <> VkPipelineColorBlendStateCreateInfo make_info(VkPipelineColorBlendStateCreateInfo info) { info.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; @@ -148,6 +166,12 @@ VkPipelineDepthStencilStateCreateInfo make_info(VkPipelineDepthStencilStateCreat return info; } +template <> +VkPipelineDynamicStateCreateInfo make_info(VkPipelineDynamicStateCreateInfo info) { + info.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; + return info; +} + template <> VkPipelineInputAssemblyStateCreateInfo make_info(VkPipelineInputAssemblyStateCreateInfo info) { info.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; @@ -172,12 +196,24 @@ VkPipelineRasterizationStateCreateInfo make_info(VkPipelineRasterizationStateCre return info; } +template <> +VkPipelineRenderingCreateInfo make_info(VkPipelineRenderingCreateInfo info) { + info.sType = VK_STRUCTURE_TYPE_PIPELINE_RENDERING_CREATE_INFO; + return info; +} + template <> VkPipelineShaderStageCreateInfo make_info(VkPipelineShaderStageCreateInfo info) { info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; return info; } +template <> +VkPipelineTessellationStateCreateInfo make_info(VkPipelineTessellationStateCreateInfo info) { + info.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO; + return info; +} + template <> VkPipelineVertexInputStateCreateInfo make_info(VkPipelineVertexInputStateCreateInfo info) { info.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; @@ -197,14 +233,14 @@ VkPresentInfoKHR make_info(VkPresentInfoKHR info) { } template <> -VkRenderPassBeginInfo make_info(VkRenderPassBeginInfo info) { - info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; +VkRenderingAttachmentInfo make_info(VkRenderingAttachmentInfo info) { + info.sType = VK_STRUCTURE_TYPE_RENDERING_ATTACHMENT_INFO; return info; } template <> -VkRenderPassCreateInfo make_info(VkRenderPassCreateInfo info) { - info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; +VkRenderingInfo make_info(VkRenderingInfo info) { + info.sType = VK_STRUCTURE_TYPE_RENDERING_INFO; return info; } diff --git a/src/vulkan-renderer/wrapper/pipelines/pipeline.cpp b/src/vulkan-renderer/wrapper/pipelines/pipeline.cpp new file mode 100644 index 000000000..99391a5c7 --- /dev/null +++ b/src/vulkan-renderer/wrapper/pipelines/pipeline.cpp @@ -0,0 +1,44 @@ +#include "inexor/vulkan-renderer/wrapper/pipelines/pipeline.hpp" + +#include "inexor/vulkan-renderer/exception.hpp" +#include "inexor/vulkan-renderer/wrapper/device.hpp" +#include "inexor/vulkan-renderer/wrapper/make_info.hpp" + +#include + +namespace inexor::vulkan_renderer::wrapper::pipelines { + +GraphicsPipeline::GraphicsPipeline(const Device &device, + std::vector descriptor_set_layouts, + std::vector push_constant_ranges, + VkGraphicsPipelineCreateInfo pipeline_ci, + std::string name) + : m_device(device), m_name(std::move(name)) { + + // Create the graphics pipeline layout + m_pipeline_layout = std::make_unique(m_device, m_name, std::move(descriptor_set_layouts), + std::move(push_constant_ranges)); + + // Set the pipeline layout + pipeline_ci.layout = m_pipeline_layout->m_pipeline_layout; + + // Then create the graphics pipeline + if (const auto result = + vkCreateGraphicsPipelines(m_device.device(), nullptr, 1, &pipeline_ci, nullptr, &m_pipeline); + result != VK_SUCCESS) { + throw VulkanException("Error: vkCreateGraphicsPipelines failed for pipeline " + m_name + " !", result); + } + m_device.set_debug_name(m_pipeline, m_name); +} + +GraphicsPipeline::GraphicsPipeline(GraphicsPipeline &&other) noexcept : m_device(other.m_device) { + m_pipeline = std::exchange(other.m_pipeline, VK_NULL_HANDLE); + m_pipeline_layout = std::exchange(other.m_pipeline_layout, nullptr); + m_name = std::move(other.m_name); +} + +GraphicsPipeline::~GraphicsPipeline() { + vkDestroyPipeline(m_device.device(), m_pipeline, nullptr); +} + +} // namespace inexor::vulkan_renderer::wrapper::pipelines diff --git a/src/vulkan-renderer/wrapper/pipelines/pipeline_builder.cpp b/src/vulkan-renderer/wrapper/pipelines/pipeline_builder.cpp new file mode 100644 index 000000000..4e9232e79 --- /dev/null +++ b/src/vulkan-renderer/wrapper/pipelines/pipeline_builder.cpp @@ -0,0 +1,343 @@ +#include "inexor/vulkan-renderer/wrapper/pipelines/pipeline_builder.hpp" + +#include "inexor/vulkan-renderer/wrapper/device.hpp" + +#include + +namespace inexor::vulkan_renderer::wrapper::pipelines { + +GraphicsPipelineBuilder::GraphicsPipelineBuilder(const Device &device) : m_device(device) { + reset(); +} + +GraphicsPipelineBuilder::GraphicsPipelineBuilder(GraphicsPipelineBuilder &&other) noexcept : m_device(other.m_device) { + m_pipeline_rendering_ci = std::move(other.m_pipeline_rendering_ci); + m_color_attachments = std::move(other.m_color_attachments); + m_depth_attachment_format = other.m_depth_attachment_format; + m_stencil_attachment_format = other.m_stencil_attachment_format; + m_shader_stages = std::move(other.m_shader_stages); + m_vertex_input_binding_descriptions = std::move(other.m_vertex_input_binding_descriptions); + m_vertex_input_attribute_descriptions = std::move(other.m_vertex_input_attribute_descriptions); + m_vertex_input_sci = std::move(other.m_vertex_input_sci); + m_input_assembly_sci = std::move(other.m_input_assembly_sci); + m_tesselation_sci = std::move(other.m_tesselation_sci); + m_viewport_sci = std::move(other.m_viewport_sci); + m_viewports = std::move(other.m_viewports); + m_scissors = std::move(other.m_scissors); + m_rasterization_sci = std::move(m_rasterization_sci); + m_multisample_sci = std::move(other.m_multisample_sci); + m_depth_stencil_sci = std::move(other.m_depth_stencil_sci); + m_color_blend_sci = std::move(other.m_color_blend_sci); + m_dynamic_states = std::move(other.m_dynamic_states); + m_dynamic_states_sci = std::move(other.m_dynamic_states_sci); + m_pipeline_layout = std::exchange(other.m_pipeline_layout, VK_NULL_HANDLE); + m_color_blend_attachment_states = std::move(other.m_color_blend_attachment_states); +} + +std::shared_ptr GraphicsPipelineBuilder::build(std::string name) { + if (name.empty()) { + throw std::invalid_argument("[GraphicsPipelineBuilder::build] Error: 'name' is empty!"); + } + // NOTE: Inside of GraphicsPipelineBuilder, we carry out no error checks when it comes to the data which is used to + // build the graphics pipeline. This is because validation of this data is job of the validation layers, and not the + // job of GraphicsPipelineBuilder. We should not mimic the behavious of validation layers here. + + m_pipeline_rendering_ci = make_info({ + // TODO: Support multiview rendering and expose viewMask parameter + .colorAttachmentCount = static_cast(m_color_attachments.size()), + .pColorAttachmentFormats = m_color_attachments.data(), + .depthAttachmentFormat = m_depth_attachment_format, + .stencilAttachmentFormat = m_stencil_attachment_format, + }); + + m_vertex_input_sci = make_info({ + .vertexBindingDescriptionCount = static_cast(m_vertex_input_binding_descriptions.size()), + .pVertexBindingDescriptions = m_vertex_input_binding_descriptions.data(), + .vertexAttributeDescriptionCount = static_cast(m_vertex_input_attribute_descriptions.size()), + .pVertexAttributeDescriptions = m_vertex_input_attribute_descriptions.data(), + + }); + + m_viewport_sci = make_info({ + .viewportCount = static_cast(m_viewports.size()), + .pViewports = m_viewports.data(), + .scissorCount = static_cast(m_scissors.size()), + .pScissors = m_scissors.data(), + }); + + m_color_blend_sci = wrapper::make_info({ + .attachmentCount = static_cast(m_color_blend_attachment_states.size()), + .pAttachments = m_color_blend_attachment_states.data(), + }); + + m_dynamic_states_sci = make_info({ + .dynamicStateCount = static_cast(m_dynamic_states.size()), + .pDynamicStates = m_dynamic_states.data(), + }); + + const auto pipeline_ci = make_info({ + // NOTE: This is one of those rare cases where pNext is actually not nullptr! + .pNext = &m_pipeline_rendering_ci, + .stageCount = static_cast(m_shader_stages.size()), + .pStages = m_shader_stages.data(), + .pVertexInputState = &m_vertex_input_sci, + .pInputAssemblyState = &m_input_assembly_sci, + .pTessellationState = &m_tesselation_sci, + .pViewportState = &m_viewport_sci, + .pRasterizationState = &m_rasterization_sci, + .pMultisampleState = &m_multisample_sci, + .pDepthStencilState = &m_depth_stencil_sci, + .pColorBlendState = &m_color_blend_sci, + .pDynamicState = &m_dynamic_states_sci, + .layout = m_pipeline_layout, + // NOTE: This is VK_NULL_HANDLE because we use dynamic rendering + .renderPass = VK_NULL_HANDLE, + }); + + auto graphics_pipeline = std::make_shared(m_device, std::vector{m_descriptor_set_layout}, + m_push_constant_ranges, pipeline_ci, std::move(name)); + + // NOTE: We reset the data of the builder here so it can be re-used + reset(); + + // Return the graphics pipeline we created + return graphics_pipeline; +} + +void GraphicsPipelineBuilder::reset() { + m_pipeline_rendering_ci = make_info(); + m_color_attachments = {}; + m_depth_attachment_format = VK_FORMAT_UNDEFINED; + m_stencil_attachment_format = VK_FORMAT_UNDEFINED; + + m_shader_stages = {}; + + m_vertex_input_binding_descriptions = {}; + m_vertex_input_attribute_descriptions = {}; + m_vertex_input_sci = make_info(); + + m_input_assembly_sci = make_info({ + .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, + .primitiveRestartEnable = VK_FALSE, + }); + + m_tesselation_sci = make_info(); + + m_viewports = {}; + m_scissors = {}; + m_viewport_sci = make_info(); + + m_rasterization_sci = make_info({ + .polygonMode = VK_POLYGON_MODE_FILL, + .cullMode = VK_CULL_MODE_BACK_BIT, + .frontFace = VK_FRONT_FACE_CLOCKWISE, + .lineWidth = 1.0f, + }); + + m_multisample_sci = make_info({ + .rasterizationSamples = VK_SAMPLE_COUNT_1_BIT, + .sampleShadingEnable = VK_FALSE, + .minSampleShading = 1.0f, + }); + + m_depth_stencil_sci = make_info(); + m_color_blend_sci = make_info(); + + m_dynamic_states = {}; + m_dynamic_states_sci = make_info(); + + m_pipeline_layout = VK_NULL_HANDLE; + m_color_blend_attachment_states = {}; +} + +GraphicsPipelineBuilder &GraphicsPipelineBuilder::add_color_attachment_format(const VkFormat format) { + m_color_attachments.push_back(format); + return *this; +} + +GraphicsPipelineBuilder & +GraphicsPipelineBuilder::add_color_blend_attachment(const VkPipelineColorBlendAttachmentState &attachment) { + m_color_blend_attachment_states.push_back(attachment); + return *this; +} + +/// Add the default color blend attachment +/// @return A reference to the dereferenced this pointer (allows method calls to be chained) +[[nodiscard]] GraphicsPipelineBuilder &GraphicsPipelineBuilder::add_default_color_blend_attachment() { + return add_color_blend_attachment({ + .blendEnable = VK_TRUE, + .srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA, + .dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, + .colorBlendOp = VK_BLEND_OP_ADD, + .srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE, + .dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO, + .alphaBlendOp = VK_BLEND_OP_ADD, + .colorWriteMask = + VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT, + }); +} + +GraphicsPipelineBuilder &GraphicsPipelineBuilder::add_push_constant_range(const VkShaderStageFlags shader_stage, + const std::uint32_t size, + const std::uint32_t offset) { + m_push_constant_ranges.emplace_back(VkPushConstantRange{ + .stageFlags = shader_stage, + .offset = offset, + .size = size, + }); + return *this; +} + +GraphicsPipelineBuilder &GraphicsPipelineBuilder::add_shader(const std::weak_ptr shader) { + m_shader_stages.push_back(make_info({ + .stage = shader.lock()->m_shader_stage, + .module = shader.lock()->m_shader_module, + .pName = "main", + + })); + return *this; +} + +GraphicsPipelineBuilder & +GraphicsPipelineBuilder::set_color_blend(const VkPipelineColorBlendStateCreateInfo &color_blend) { + m_color_blend_sci = color_blend; + return *this; +} + +GraphicsPipelineBuilder &GraphicsPipelineBuilder::set_color_blend_attachments( + const std::vector &attachments) { + m_color_blend_attachment_states = attachments; + return *this; +} + +GraphicsPipelineBuilder &GraphicsPipelineBuilder::set_culling_mode(const VkBool32 culling_enabled) { + if (culling_enabled == VK_FALSE) { + spdlog::warn("Culling is disabled, which could have negative effects on the performance!"); + } + m_rasterization_sci.cullMode = culling_enabled == VK_TRUE ? VK_CULL_MODE_BACK_BIT : VK_CULL_MODE_NONE; + return *this; +} + +GraphicsPipelineBuilder &GraphicsPipelineBuilder::set_depth_attachment_format(const VkFormat format) { + m_depth_attachment_format = format; + return *this; +} + +GraphicsPipelineBuilder & +GraphicsPipelineBuilder::set_depth_stencil(const VkPipelineDepthStencilStateCreateInfo &depth_stencil) { + m_depth_stencil_sci = depth_stencil; + return *this; +} + +GraphicsPipelineBuilder &GraphicsPipelineBuilder::set_stencil_attachment_format(const VkFormat format) { + m_stencil_attachment_format = format; + return *this; +} + +GraphicsPipelineBuilder & +GraphicsPipelineBuilder::set_descriptor_set_layout(const VkDescriptorSetLayout descriptor_set_layout) { + assert(descriptor_set_layout); + m_descriptor_set_layout = descriptor_set_layout; + return *this; +} + +GraphicsPipelineBuilder & +GraphicsPipelineBuilder::set_dynamic_states(const std::vector &dynamic_states) { + assert(!dynamic_states.empty()); + m_dynamic_states = dynamic_states; + return *this; +} + +GraphicsPipelineBuilder & +GraphicsPipelineBuilder::set_input_assembly(const VkPipelineInputAssemblyStateCreateInfo &input_assembly) { + m_input_assembly_sci = input_assembly; + return *this; +} + +GraphicsPipelineBuilder &GraphicsPipelineBuilder::set_line_width(const float width) { + m_rasterization_sci.lineWidth = width; + return *this; +} + +GraphicsPipelineBuilder &GraphicsPipelineBuilder::set_multisampling(const VkSampleCountFlagBits sample_count, + const std::optional min_sample_shading) { + m_multisample_sci.rasterizationSamples = sample_count; + if (min_sample_shading) { + m_multisample_sci.minSampleShading = min_sample_shading.value(); + } + return *this; +} + +GraphicsPipelineBuilder &GraphicsPipelineBuilder::set_pipeline_layout(const VkPipelineLayout layout) { + assert(layout); + m_pipeline_layout = layout; + return *this; +} + +GraphicsPipelineBuilder &GraphicsPipelineBuilder::set_primitive_topology(const VkPrimitiveTopology topology) { + m_input_assembly_sci.topology = topology; + return *this; +} + +GraphicsPipelineBuilder & +GraphicsPipelineBuilder::set_rasterization(const VkPipelineRasterizationStateCreateInfo &rasterization) { + m_rasterization_sci = rasterization; + return *this; +} + +GraphicsPipelineBuilder &GraphicsPipelineBuilder::set_scissor(const VkRect2D &scissor) { + m_scissors = {scissor}; + m_viewport_sci.scissorCount = 1; + m_viewport_sci.pScissors = m_scissors.data(); + return *this; +} + +GraphicsPipelineBuilder &GraphicsPipelineBuilder::set_scissor(const VkExtent2D &extent) { + return set_scissor({ + // Convert VkExtent2D to VkRect2D + .extent = extent, + }); +} + +GraphicsPipelineBuilder & +GraphicsPipelineBuilder::set_tesselation_control_point_count(const std::uint32_t control_point_count) { + m_tesselation_sci.patchControlPoints = control_point_count; + return *this; +} + +GraphicsPipelineBuilder &GraphicsPipelineBuilder::set_vertex_input_attributes( + const std::vector &descriptions) { + assert(!descriptions.empty()); + m_vertex_input_attribute_descriptions = descriptions; + return *this; +} + +GraphicsPipelineBuilder & +GraphicsPipelineBuilder::set_vertex_input_bindings(const std::vector &descriptions) { + assert(!descriptions.empty()); + m_vertex_input_binding_descriptions = descriptions; + return *this; +} + +GraphicsPipelineBuilder &GraphicsPipelineBuilder::set_viewport(const VkViewport &viewport) { + m_viewports = {viewport}; + m_viewport_sci.viewportCount = 1; + m_viewport_sci.pViewports = m_viewports.data(); + return *this; +} + +GraphicsPipelineBuilder &GraphicsPipelineBuilder::set_viewport(const VkExtent2D &extent) { + return set_viewport({ + // Convert VkExtent2D to VkViewport + .width = static_cast(extent.width), + .height = static_cast(extent.height), + .maxDepth = 1.0f, + }); +} + +GraphicsPipelineBuilder &GraphicsPipelineBuilder::set_wireframe(const VkBool32 wireframe) { + m_rasterization_sci.polygonMode = (wireframe == VK_TRUE) ? VK_POLYGON_MODE_LINE : VK_POLYGON_MODE_FILL; + return *this; +} + +} // namespace inexor::vulkan_renderer::wrapper::pipelines diff --git a/src/vulkan-renderer/wrapper/pipelines/pipeline_layout.cpp b/src/vulkan-renderer/wrapper/pipelines/pipeline_layout.cpp new file mode 100644 index 000000000..1b1f76670 --- /dev/null +++ b/src/vulkan-renderer/wrapper/pipelines/pipeline_layout.cpp @@ -0,0 +1,44 @@ +#include "inexor/vulkan-renderer/wrapper/pipelines/pipeline_layout.hpp" + +#include "inexor/vulkan-renderer/exception.hpp" +#include "inexor/vulkan-renderer/wrapper/device.hpp" +#include "inexor/vulkan-renderer/wrapper/make_info.hpp" + +#include + +namespace inexor::vulkan_renderer::wrapper::pipelines { + +PipelineLayout::PipelineLayout(const Device &device, + std::string name, + const std::span descriptor_set_layouts, + const std::span push_constant_ranges) + : m_device(device), m_name(std::move(name)) { + if (m_name.empty()) { + throw std::invalid_argument("[PipelineLayout::PipelineLayout] Error: Parameter 'name' is emtpy!"); + } + + const auto pipeline_layout_ci = wrapper::make_info({ + .setLayoutCount = static_cast(descriptor_set_layouts.size()), + .pSetLayouts = descriptor_set_layouts.data(), + .pushConstantRangeCount = static_cast(push_constant_ranges.size()), + .pPushConstantRanges = push_constant_ranges.data(), + }); + + if (const auto result = vkCreatePipelineLayout(m_device.device(), &pipeline_layout_ci, nullptr, &m_pipeline_layout); + result != VK_SUCCESS) { + throw VulkanException("Error: vkCreatePipelineLayout failed for pipeline layout " + m_name + "!", result); + } + + m_device.set_debug_name(m_pipeline_layout, m_name); +} + +PipelineLayout::PipelineLayout(PipelineLayout &&other) noexcept : m_device(other.m_device) { + m_pipeline_layout = std::exchange(other.m_pipeline_layout, VK_NULL_HANDLE); + m_name = std::move(other.m_name); +} + +PipelineLayout::~PipelineLayout() { + vkDestroyPipelineLayout(m_device.device(), m_pipeline_layout, nullptr); +} + +} // namespace inexor::vulkan_renderer::wrapper::pipelines diff --git a/src/vulkan-renderer/wrapper/sampler.cpp b/src/vulkan-renderer/wrapper/sampler.cpp new file mode 100644 index 000000000..b675a2829 --- /dev/null +++ b/src/vulkan-renderer/wrapper/sampler.cpp @@ -0,0 +1,24 @@ +#include "inexor/vulkan-renderer/wrapper/sampler.hpp" + +#include "inexor/vulkan-renderer/exception.hpp" +#include "inexor/vulkan-renderer/wrapper/device.hpp" +#include "inexor/vulkan-renderer/wrapper/make_info.hpp" + +#include + +namespace inexor::vulkan_renderer::wrapper { + +Sampler::Sampler(const Device &device, std::string name, const VkSamplerCreateInfo &sampler_ci) + : m_device(device), m_name(std::move(name)) { + if (const auto result = vkCreateSampler(m_device.device(), &sampler_ci, nullptr, &m_sampler); + result != VK_SUCCESS) { + throw VulkanException("[Sampler::Sampler] Error: vkCreateSampler failed for sampler " + m_name + " !", result); + } + m_device.set_debug_name(m_sampler, m_name); +} + +Sampler::~Sampler() { + vkDestroySampler(m_device.device(), m_sampler, nullptr); +} + +} // namespace inexor::vulkan_renderer::wrapper diff --git a/src/vulkan-renderer/wrapper/semaphore.cpp b/src/vulkan-renderer/wrapper/semaphore.cpp deleted file mode 100644 index 8bc7f42b2..000000000 --- a/src/vulkan-renderer/wrapper/semaphore.cpp +++ /dev/null @@ -1,25 +0,0 @@ -#include "inexor/vulkan-renderer/wrapper/semaphore.hpp" - -#include "inexor/vulkan-renderer/wrapper/device.hpp" -#include "inexor/vulkan-renderer/wrapper/make_info.hpp" - -#include -#include - -namespace inexor::vulkan_renderer::wrapper { - -Semaphore::Semaphore(const Device &device, const std::string &name) : m_device(device), m_name(name) { - assert(!name.empty()); - device.create_semaphore(make_info(), &m_semaphore, m_name); -} - -Semaphore::Semaphore(Semaphore &&other) noexcept : m_device(other.m_device) { - m_semaphore = std::exchange(other.m_semaphore, VK_NULL_HANDLE); - m_name = std::move(other.m_name); -} - -Semaphore::~Semaphore() { - vkDestroySemaphore(m_device.device(), m_semaphore, nullptr); -} - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/src/vulkan-renderer/wrapper/shader.cpp b/src/vulkan-renderer/wrapper/shader.cpp index 61b6c3898..8d718ad7b 100644 --- a/src/vulkan-renderer/wrapper/shader.cpp +++ b/src/vulkan-renderer/wrapper/shader.cpp @@ -1,43 +1,58 @@ #include "inexor/vulkan-renderer/wrapper/shader.hpp" -#include "inexor/vulkan-renderer/tools/file.hpp" +#include "inexor/vulkan-renderer/exception.hpp" #include "inexor/vulkan-renderer/wrapper/device.hpp" #include "inexor/vulkan-renderer/wrapper/make_info.hpp" #include +#include #include #include namespace inexor::vulkan_renderer::wrapper { -Shader::Shader(const Device &device, const VkShaderStageFlagBits type, const std::string &name, - const std::string &file_name, const std::string &entry_point) - : Shader(device, type, name, tools::read_file_binary_data(file_name), entry_point) {} - -Shader::Shader(const Device &device, const VkShaderStageFlagBits type, const std::string &name, - const std::vector &code, const std::string &entry_point) - : m_device(device), m_type(type), m_name(name), m_entry_point(entry_point) { - assert(device.device()); - assert(!name.empty()); - assert(!code.empty()); - assert(!entry_point.empty()); - - m_device.create_shader_module( - make_info({ - .codeSize = code.size(), - // When you perform a cast like this, you also need to ensure that the data satisfies the alignment - // requirements of std::uint32_t. Lucky for us, the data is stored in an std::vector where the default - // allocator already ensures that the data satisfies the worst case alignment requirements. - .pCode = reinterpret_cast(code.data()), // NOLINT - }), - &m_shader_module, m_name); +Shader::Shader(const Device &device, std::string name, const VkShaderStageFlagBits type, std::string file_name) + : m_device(device), m_name(std::move(name)), m_shader_stage(type), m_file_name(file_name) { + if (m_name.empty()) { + throw std::runtime_error("[Shader::Shader] Error: Parameter 'name' is empty!"); + } + // Open the file stream at the end of the file to read file size + std::ifstream shader_file(file_name.c_str(), std::ios::ate | std::ios::binary | std::ios::in); + if (!shader_file) { + throw std::runtime_error("[Shader::Shader] Error: Could not open shader file " + file_name + "!"); + } + + // Read the size of the file + const auto file_size = shader_file.tellg(); + // Create a vector of char (bytes) with the size of the shader file + // After the creation of the shader module, this is no longer needed, so it is not a problem that its object + // lifetime ends with the constructor's stack + std::vector shader_code(file_size); + // Set the file read position to the beginning of the file + shader_file.seekg(0); + // Read the entire shader file into memory + shader_file.read(shader_code.data(), file_size); + + const auto shader_module_ci = wrapper::make_info({ + .codeSize = shader_code.size(), + // When you perform a cast like this, you also need to ensure that the data satisfies the alignment + // requirements of std::uint32_t. Lucky for us, the data is stored in an std::vector where the + // default allocator already ensures that the data satisfies the worst case alignment requirements. + .pCode = reinterpret_cast(shader_code.data()), // NOLINT + }); + + if (const auto result = vkCreateShaderModule(m_device.device(), &shader_module_ci, nullptr, &m_shader_module); + result != VK_SUCCESS) { + throw VulkanException("Error: vkCreateShaderModule failed for shader " + file_name + "!", result); + } + m_device.set_debug_name(m_shader_module, file_name); } Shader::Shader(Shader &&other) noexcept : m_device(other.m_device) { - m_type = other.m_type; + m_shader_stage = other.m_shader_stage; m_name = std::move(other.m_name); - m_entry_point = std::move(other.m_entry_point); - m_shader_module = std::exchange(other.m_shader_module, nullptr); + m_file_name = std::move(other.m_file_name); + m_shader_module = std::exchange(other.m_shader_module, VK_NULL_HANDLE); } Shader::~Shader() { diff --git a/src/vulkan-renderer/wrapper/surface.cpp b/src/vulkan-renderer/wrapper/surface.cpp new file mode 100644 index 000000000..968da9ed9 --- /dev/null +++ b/src/vulkan-renderer/wrapper/surface.cpp @@ -0,0 +1,18 @@ +#include "inexor/vulkan-renderer/wrapper/surface.hpp" + +#include "inexor/vulkan-renderer/exception.hpp" + +namespace inexor::vulkan_renderer::wrapper { + +Surface::Surface(const VkInstance instance, GLFWwindow *window) : m_instance(instance), m_window(window) { + // NOTE: glfwCreateWindowSurface does indeed return a VkResult + if (const auto result = glfwCreateWindowSurface(m_instance, m_window, nullptr, &m_surface); result != VK_SUCCESS) { + throw VulkanException("[Surface::Surface] Error: glfwCreateWindowSurface failed!", result); + } +} + +Surface::~Surface() { + vkDestroySurfaceKHR(m_instance, m_surface, nullptr); +} + +} // namespace inexor::vulkan_renderer::wrapper diff --git a/src/vulkan-renderer/wrapper/swapchain.cpp b/src/vulkan-renderer/wrapper/swapchain.cpp index b7b12224f..6c634614e 100644 --- a/src/vulkan-renderer/wrapper/swapchain.cpp +++ b/src/vulkan-renderer/wrapper/swapchain.cpp @@ -5,7 +5,7 @@ #include "inexor/vulkan-renderer/vk_tools/representation.hpp" #include "inexor/vulkan-renderer/wrapper/device.hpp" #include "inexor/vulkan-renderer/wrapper/make_info.hpp" -#include "inexor/vulkan-renderer/wrapper/semaphore.hpp" +#include "inexor/vulkan-renderer/wrapper/window.hpp" #include @@ -17,14 +17,19 @@ namespace inexor::vulkan_renderer::wrapper { -Swapchain::Swapchain(Device &device, const VkSurfaceKHR surface, const std::uint32_t width, const std::uint32_t height, +Swapchain::Swapchain(Device &device, + std::string name, + const VkSurfaceKHR surface, + const Window &wnd, const bool vsync_enabled) : m_device(device), m_surface(surface), m_vsync_enabled(vsync_enabled) { - m_img_available = std::make_unique(m_device, "Swapchain image available"); - setup_swapchain(width, height, vsync_enabled); + m_name = std::move(name); + m_img_available = std::make_unique(m_device, "m_img_available"); + setup(wnd.width(), wnd.height(), vsync_enabled); } Swapchain::Swapchain(Swapchain &&other) noexcept : m_device(other.m_device) { + // TODO: Check me! m_swapchain = std::exchange(other.m_swapchain, VK_NULL_HANDLE); m_surface = std::exchange(other.m_surface, VK_NULL_HANDLE); m_surface_format = other.m_surface_format; @@ -33,21 +38,25 @@ Swapchain::Swapchain(Swapchain &&other) noexcept : m_device(other.m_device) { m_extent = other.m_extent; m_img_available = std::exchange(other.m_img_available, nullptr); m_vsync_enabled = other.m_vsync_enabled; + m_img_index = other.m_img_index; + m_name = std::move(other.m_name); } -std::uint32_t Swapchain::acquire_next_image_index(const std::uint64_t timeout) { - std::uint32_t img_index = 0; - if (const auto result = vkAcquireNextImageKHR(m_device.device(), m_swapchain, timeout, - *m_img_available->semaphore(), VK_NULL_HANDLE, &img_index); +void Swapchain::acquire_next_image_index(const std::uint64_t timeout) { + if (const auto result = vkAcquireNextImageKHR(m_device.device(), m_swapchain, timeout, m_img_available->m_semaphore, + VK_NULL_HANDLE, &m_img_index); result != VK_SUCCESS) { if (result == VK_SUBOPTIMAL_KHR) { // We need to recreate the swapchain - setup_swapchain(m_extent.width, m_extent.height, m_vsync_enabled); + setup(m_extent.width, m_extent.height, m_vsync_enabled); } else { throw VulkanException("Error: vkAcquireNextImageKHR failed!", result); } } - return img_index; + m_current_swapchain_img_view = m_img_views[m_img_index]; + m_device.set_debug_name(m_current_swapchain_img_view, "m_current_swapchain_img_view"); + m_current_swapchain_img = m_imgs[m_img_index]; + m_device.set_debug_name(m_current_swapchain_img, "m_current_swapchain_img"); } std::optional @@ -71,8 +80,10 @@ Swapchain::choose_composite_alpha(const VkCompositeAlphaFlagBitsKHR request_comp return std::nullopt; } -VkExtent2D Swapchain::choose_image_extent(const VkExtent2D &requested_extent, const VkExtent2D &min_extent, - const VkExtent2D &max_extent, const VkExtent2D ¤t_extent) { +VkExtent2D Swapchain::choose_image_extent(const VkExtent2D &requested_extent, + const VkExtent2D &min_extent, + const VkExtent2D &max_extent, + const VkExtent2D ¤t_extent) { if (current_extent.width == std::numeric_limits::max()) { return requested_extent; } @@ -143,6 +154,7 @@ Swapchain::choose_surface_format(const std::vector &availabl if (format != default_surface_format_priority_list.end()) { spdlog::trace("Selecting swapchain image format {}", vk_tools::as_string(*format)); chosen_format = *format; + break; } } // This can be std::nullopt @@ -163,16 +175,45 @@ std::vector Swapchain::get_swapchain_images() { return imgs; } -void Swapchain::present(const std::uint32_t img_index) { +void Swapchain::change_image_layout_to_prepare_for_rendering(const CommandBuffer &cmd_buf) { + if (m_prepared_for_rendering) { + // The image layout has already been changed + return; + } + cmd_buf.insert_debug_label("Swapchain: VK_IMAGE_LAYOUT_UNDEFINED -> VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL", + get_debug_label_color(DebugLabelColor::GREEN)); + // Prepare the swapchain image for rendering by changing the image layout from undefined layout (which is the layout + // it has after presenting) to color attachment optimal. + cmd_buf.change_image_layout(m_current_swapchain_img, VK_IMAGE_LAYOUT_UNDEFINED, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); + m_prepared_for_rendering = true; +} + +void Swapchain::change_image_layout_to_prepare_for_presenting(const CommandBuffer &cmd_buf) { + if (!m_prepared_for_rendering) { + spdlog::warn("[Swapchain::change_image_layout_to_prepare_for_present] Warning: Swapchain image was not " + "prepared for rendering. Did you call this function without calling " + "change_image_layout_to_prepare_for_rendering?"); + return; + } + cmd_buf.insert_debug_label("Swapchain: VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL -> VK_IMAGE_LAYOUT_PRESENT_SRC_KHR", + get_debug_label_color(DebugLabelColor::GREEN)); + // Prepare the swapchain image for presenting + cmd_buf.change_image_layout(m_current_swapchain_img, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_PRESENT_SRC_KHR); + m_prepared_for_rendering = false; +} + +void Swapchain::present() { const auto present_info = make_info({ .swapchainCount = 1, .pSwapchains = &m_swapchain, - .pImageIndices = &img_index, + .pImageIndices = &m_img_index, }); - if (const auto result = vkQueuePresentKHR(m_device.present_queue(), &present_info); result != VK_SUCCESS) { + if (const auto result = vkQueuePresentKHR(m_device.m_present_queue, &present_info); result != VK_SUCCESS) { if (result == VK_SUBOPTIMAL_KHR || result == VK_ERROR_OUT_OF_DATE_KHR) { // We need to recreate the swapchain - setup_swapchain(m_extent.width, m_extent.height, m_vsync_enabled); + setup(m_extent.width, m_extent.height, m_vsync_enabled); } else { // Exception is thrown if result is not VK_SUCCESS but also not VK_SUBOPTIMAL_KHR throw VulkanException("Error: vkQueuePresentKHR failed!", result); @@ -180,13 +221,22 @@ void Swapchain::present(const std::uint32_t img_index) { } } -void Swapchain::setup_swapchain(const std::uint32_t width, const std::uint32_t height, const bool vsync_enabled) { - const auto caps = m_device.get_surface_capabilities(m_surface); - m_surface_format = choose_surface_format(vk_tools::get_surface_formats(m_device.physical_device(), m_surface)); +void Swapchain::setup(const std::uint32_t width, const std::uint32_t height, const bool vsync_enabled) { + VkSurfaceCapabilitiesKHR caps{}; + if (const auto result = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(m_device.m_physical_device, m_surface, &caps); + result != VK_SUCCESS) { + throw VulkanException("Error: vkGetPhysicalDeviceSurfaceCapabilitiesKHR failed!", result); + } + + m_surface_format = choose_surface_format(vk_tools::get_surface_formats(m_device.m_physical_device, m_surface)); const VkExtent2D requested_extent{.width = width, .height = height}; static const std::vector default_present_mode_priorities{ - VK_PRESENT_MODE_MAILBOX_KHR, VK_PRESENT_MODE_FIFO_RELAXED_KHR, VK_PRESENT_MODE_FIFO_KHR}; + VK_PRESENT_MODE_IMMEDIATE_KHR, + VK_PRESENT_MODE_MAILBOX_KHR, + VK_PRESENT_MODE_FIFO_RELAXED_KHR, + VK_PRESENT_MODE_FIFO_KHR, + }; const auto composite_alpha = choose_composite_alpha(VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR, caps.supportedCompositeAlpha); @@ -216,7 +266,7 @@ void Swapchain::setup_swapchain(const std::uint32_t width, const std::uint32_t h ? VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR : caps.currentTransform, .compositeAlpha = composite_alpha.value(), - .presentMode = choose_present_mode(vk_tools::get_surface_present_modes(m_device.physical_device(), m_surface), + .presentMode = choose_present_mode(vk_tools::get_surface_present_modes(m_device.m_physical_device, m_surface), default_present_mode_priorities, vsync_enabled), .clipped = VK_TRUE, .oldSwapchain = old_swapchain, @@ -231,6 +281,8 @@ void Swapchain::setup_swapchain(const std::uint32_t width, const std::uint32_t h throw VulkanException("Error: vkCreateSwapchainKHR failed!", result); } + m_device.set_debug_name(m_swapchain, m_name); + // We need to destroy the old swapchain if specified if (old_swapchain != VK_NULL_HANDLE) { for (auto *const img_view : m_img_views) { @@ -274,7 +326,10 @@ void Swapchain::setup_swapchain(const std::uint32_t width, const std::uint32_t h }, }); - m_device.create_image_view(img_view_ci, &m_img_views[img_index], "swapchain image view"); + if (const auto result = vkCreateImageView(m_device.device(), &img_view_ci, nullptr, &m_img_views[img_index]); + result != VK_SUCCESS) { + throw VulkanException("Error: vkCreateImageView failed for swapchain image view!", result); + } } } diff --git a/src/vulkan-renderer/wrapper/fence.cpp b/src/vulkan-renderer/wrapper/synchronization/fence.cpp similarity index 50% rename from src/vulkan-renderer/wrapper/fence.cpp rename to src/vulkan-renderer/wrapper/synchronization/fence.cpp index bca524b62..8b8743b94 100644 --- a/src/vulkan-renderer/wrapper/fence.cpp +++ b/src/vulkan-renderer/wrapper/synchronization/fence.cpp @@ -1,5 +1,6 @@ -#include "inexor/vulkan-renderer/wrapper/fence.hpp" +#include "inexor/vulkan-renderer/wrapper/synchronization/fence.hpp" +#include "inexor/vulkan-renderer/exception.hpp" #include "inexor/vulkan-renderer/wrapper/device.hpp" #include "inexor/vulkan-renderer/wrapper/make_info.hpp" @@ -7,18 +8,20 @@ #include #include -namespace inexor::vulkan_renderer::wrapper { +namespace inexor::vulkan_renderer::wrapper::synchronization { Fence::Fence(const Device &device, const std::string &name, const bool in_signaled_state) : m_device(device), m_name(name) { assert(!name.empty()); - assert(device.device()); - m_device.create_fence( - make_info({ - .flags = static_cast(in_signaled_state ? VK_FENCE_CREATE_SIGNALED_BIT : 0), - }), - &m_fence, m_name); + const auto fence_ci = make_info({ + .flags = static_cast(in_signaled_state ? VK_FENCE_CREATE_SIGNALED_BIT : 0), + }); + + if (const auto result = vkCreateFence(m_device.device(), &fence_ci, nullptr, &m_fence); result != VK_SUCCESS) { + throw VulkanException("Error: vkCreateFence failed for fence " + name + "!", result); + } + m_device.set_debug_name(m_fence, m_name); } Fence::Fence(Fence &&other) noexcept : m_device(other.m_device) { @@ -30,11 +33,11 @@ Fence::~Fence() { vkDestroyFence(m_device.device(), m_fence, nullptr); } -void Fence::block(std::uint64_t timeout_limit) const { +void Fence::wait(const std::uint64_t timeout_limit) const { vkWaitForFences(m_device.device(), 1, &m_fence, VK_TRUE, timeout_limit); } -void Fence::reset() const { +void Fence::reset_fence() const { vkResetFences(m_device.device(), 1, &m_fence); } @@ -42,4 +45,4 @@ VkResult Fence::status() const { return vkGetFenceStatus(m_device.device(), m_fence); } -} // namespace inexor::vulkan_renderer::wrapper +} // namespace inexor::vulkan_renderer::wrapper::synchronization diff --git a/src/vulkan-renderer/wrapper/synchronization/semaphore.cpp b/src/vulkan-renderer/wrapper/synchronization/semaphore.cpp new file mode 100644 index 000000000..41cee8ed7 --- /dev/null +++ b/src/vulkan-renderer/wrapper/synchronization/semaphore.cpp @@ -0,0 +1,33 @@ +#include "inexor/vulkan-renderer/wrapper/synchronization/semaphore.hpp" + +#include "inexor/vulkan-renderer/exception.hpp" +#include "inexor/vulkan-renderer/wrapper/device.hpp" +#include "inexor/vulkan-renderer/wrapper/make_info.hpp" + +#include +#include + +namespace inexor::vulkan_renderer::wrapper::synchronization { + +Semaphore::Semaphore(const Device &device, const std::string &name) : m_device(device), m_name(name) { + assert(!name.empty()); + + const auto semaphore_ci = make_info(); + + if (const auto result = vkCreateSemaphore(m_device.device(), &semaphore_ci, nullptr, &m_semaphore); + result != VK_SUCCESS) { + throw VulkanException("Error: vkCreateSemaphore failed for " + m_name + " !", result); + } + m_device.set_debug_name(m_semaphore, m_name); +} + +Semaphore::Semaphore(Semaphore &&other) noexcept : m_device(other.m_device) { + m_semaphore = std::exchange(other.m_semaphore, VK_NULL_HANDLE); + m_name = std::move(other.m_name); +} + +Semaphore::~Semaphore() { + vkDestroySemaphore(m_device.device(), m_semaphore, nullptr); +} + +} // namespace inexor::vulkan_renderer::wrapper::synchronization diff --git a/src/vulkan-renderer/wrapper/uniform_buffer.cpp b/src/vulkan-renderer/wrapper/uniform_buffer.cpp deleted file mode 100644 index 240afd58c..000000000 --- a/src/vulkan-renderer/wrapper/uniform_buffer.cpp +++ /dev/null @@ -1,17 +0,0 @@ -#include "inexor/vulkan-renderer/wrapper/uniform_buffer.hpp" - -#include -#include - -namespace inexor::vulkan_renderer::wrapper { - -UniformBuffer::UniformBuffer(const Device &device, const std::string &name, const VkDeviceSize &buffer_size) - : GPUMemoryBuffer(device, name, buffer_size, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VMA_MEMORY_USAGE_CPU_TO_GPU) {} - -UniformBuffer::UniformBuffer(UniformBuffer &&other) noexcept : GPUMemoryBuffer(std::move(other)) {} - -void UniformBuffer::update(void *data, const std::size_t size) { - std::memcpy(m_allocation_info.pMappedData, data, size); -} - -} // namespace inexor::vulkan_renderer::wrapper diff --git a/src/vulkan-renderer/wrapper/window.cpp b/src/vulkan-renderer/wrapper/window.cpp index 25ef7115c..3ebe18bbf 100644 --- a/src/vulkan-renderer/wrapper/window.cpp +++ b/src/vulkan-renderer/wrapper/window.cpp @@ -7,13 +7,18 @@ namespace inexor::vulkan_renderer::wrapper { -Window::Window(const std::string &title, const std::uint32_t width, const std::uint32_t height, const bool visible, - const bool resizable, const Mode mode) +Window::Window(const std::string &title, + const std::uint32_t width, + const std::uint32_t height, + const bool visible, + const bool resizable, + const Mode mode) : m_width(width), m_height(height), m_mode(mode) { - assert(!title.empty()); - + if (title.empty()) { + throw std::invalid_argument("[Window::Window] Error: Parameter 'title' is empty!"); + } if (glfwInit() != GLFW_TRUE) { - throw std::runtime_error("Failed to initialise GLFW!"); + throw std::runtime_error("[Window::Window] Failed to initialise GLFW!"); } glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API); @@ -35,7 +40,7 @@ Window::Window(const std::string &title, const std::uint32_t width, const std::u m_window = glfwCreateWindow(static_cast(width), static_cast(height), title.c_str(), monitor, nullptr); if (m_window == nullptr) { - throw std::runtime_error("Error: glfwCreateWindow failed for window " + title + " !"); + throw std::runtime_error("[Window::Window] Error: glfwCreateWindow failed for window " + title + " !"); } } @@ -44,22 +49,20 @@ Window::~Window() { glfwTerminate(); } -void Window::wait_for_focus() { - int current_width = 0; - int current_height = 0; +void Window::get_framebuffer_size(int *width, int *height) { + glfwGetFramebufferSize(m_window, width, height); +} - do { - glfwWaitEvents(); - glfwGetFramebufferSize(m_window, ¤t_width, ¤t_height); - } while (current_width == 0 || current_height == 0); +void Window::poll() { + glfwPollEvents(); +} - m_width = current_width; - m_height = current_height; +bool Window::should_close() { + return glfwWindowShouldClose(m_window) == GLFW_TRUE; } -void Window::set_title(const std::string &title) { - assert(!title.empty()); - glfwSetWindowTitle(m_window, title.c_str()); +void Window::show() { + glfwShowWindow(m_window); } void Window::set_user_ptr(void *user_ptr) { @@ -86,16 +89,21 @@ void Window::set_mouse_scroll_callback(GLFWscrollfun mouse_scroll_callback) { glfwSetScrollCallback(m_window, mouse_scroll_callback); } -void Window::show() { - glfwShowWindow(m_window); +void Window::set_title(const std::string &title) { + glfwSetWindowTitle(m_window, title.c_str()); } -void Window::poll() { - glfwPollEvents(); -} +void Window::wait_for_focus() { + int current_width = 0; + int current_height = 0; -bool Window::should_close() { - return glfwWindowShouldClose(m_window) == GLFW_TRUE; + do { + glfwWaitEvents(); + glfwGetFramebufferSize(m_window, ¤t_width, ¤t_height); + } while (current_width == 0 || current_height == 0); + + m_width = current_width; + m_height = current_height; } } // namespace inexor::vulkan_renderer::wrapper diff --git a/src/vulkan-renderer/wrapper/window_surface.cpp b/src/vulkan-renderer/wrapper/window_surface.cpp deleted file mode 100644 index e5d2b0936..000000000 --- a/src/vulkan-renderer/wrapper/window_surface.cpp +++ /dev/null @@ -1,32 +0,0 @@ -#include "inexor/vulkan-renderer/wrapper/window_surface.hpp" - -#include "inexor/vulkan-renderer/exception.hpp" - -#include - -#include -#include - -namespace inexor::vulkan_renderer::wrapper { - -WindowSurface::WindowSurface(const VkInstance instance, GLFWwindow *window) : m_instance(instance) { - assert(instance); - assert(window); - - spdlog::trace("Creating window surface"); - - if (const auto result = glfwCreateWindowSurface(instance, window, nullptr, &m_surface); result != VK_SUCCESS) { - throw VulkanException("Error: glfwCreateWindowSurface failed!", result); - } -} - -WindowSurface::WindowSurface(WindowSurface &&other) noexcept { - m_instance = other.m_instance; - m_surface = std::exchange(other.m_surface, nullptr); -} - -WindowSurface::~WindowSurface() { - vkDestroySurfaceKHR(m_instance, m_surface, nullptr); -} - -} // namespace inexor::vulkan_renderer::wrapper