diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 02a27a35483a5..fa3be8cd44290 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -165,6 +165,8 @@ libdevice/nativecpu* @intel/dpcpp-nativecpu-reviewers sycl/include/sycl/ext/oneapi/experimental/graph.hpp @intel/sycl-graphs-reviewers sycl/source/detail/graph_impl.cpp @intel/sycl-graphs-reviewers sycl/source/detail/graph_impl.hpp @intel/sycl-graphs-reviewers +sycl/source/detail/graph_memory_pool.hpp @intel/sycl-graphs-reviewers +sycl/source/detail/graph_memory_pool.cpp @intel/sycl-graphs-reviewers sycl/unittests/Extensions/CommandGraph/ @intel/sycl-graphs-reviewers sycl/test-e2e/Graph @intel/sycl-graphs-reviewers sycl/doc/design/CommandGraph.md @intel/sycl-graphs-reviewers diff --git a/sycl/source/detail/graph_memory_pool.cpp b/sycl/source/detail/graph_memory_pool.cpp index a036efd3a287c..da1d32d4f3cf7 100644 --- a/sycl/source/detail/graph_memory_pool.cpp +++ b/sycl/source/detail/graph_memory_pool.cpp @@ -90,9 +90,9 @@ graph_mem_pool::tryReuseExistingAllocation( const std::vector> &DepNodes) { // If we have no dependencies this is a no-op because allocations must connect // to a free node for reuse to be possible. - // if (DepNodes.empty()) { - // return std::nullopt; - // } + if (DepNodes.empty()) { + return std::nullopt; + } std::vector CompatibleAllocs; // Compatible allocs can only be as big as MFreeAllocations @@ -127,26 +127,22 @@ graph_mem_pool::tryReuseExistingAllocation( NodesToCheck.push(Dep); } - std::optional AllocInfo = {}; - // Called when traversing over nodes to check if the current node is a free // node for one of the available allocations. If it is we populate AllocInfo // with the allocation to be reused. auto CheckNodeEqual = - [&CompatibleAllocs, - &AllocInfo](const std::shared_ptr &CurrentNode) -> bool { + [&CompatibleAllocs](const std::shared_ptr &CurrentNode) + -> std::optional { for (auto &Alloc : CompatibleAllocs) { const auto &AllocFreeNode = Alloc.LastFreeNode; // Compare control blocks without having to lock AllocFreeNode to check // for node equality if (!CurrentNode.owner_before(AllocFreeNode) && !AllocFreeNode.owner_before(CurrentNode)) { - Alloc.LastFreeNode.reset(); - AllocInfo = Alloc; - return true; + return Alloc; } } - return false; + return std::nullopt; }; while (!NodesToCheck.empty()) { @@ -161,11 +157,19 @@ graph_mem_pool::tryReuseExistingAllocation( // for any of the allocations which are free for reuse. We should not bother // checking nodes that are not free nodes, so we continue and check their // predecessors. - if (CurrentNode->MNodeType == node_type::async_free && - CheckNodeEqual(CurrentNode)) { - // If we found an allocation AllocInfo has already been populated in - // CheckNodeEqual(), so we simply break out of the loop - break; + if (CurrentNode->MNodeType == node_type::async_free) { + std::optional AllocFound = CheckNodeEqual(CurrentNode); + if (AllocFound) { + // Reset visited nodes tracking + MGraph.resetNodeVisitedEdges(); + // Reset last free node for allocation + MAllocations.at(AllocFound.value().Ptr).LastFreeNode.reset(); + // Remove found allocation from the free list + MFreeAllocations.erase(std::find(MFreeAllocations.begin(), + MFreeAllocations.end(), + AllocFound.value().Ptr)); + return AllocFound; + } } // Add CurrentNode predecessors to queue @@ -176,16 +180,8 @@ graph_mem_pool::tryReuseExistingAllocation( // Mark node as visited CurrentNode->MTotalVisitedEdges = 1; } - // Reset visited nodes tracking - MGraph.resetNodeVisitedEdges(); - // If we found an allocation, remove it from the free list. - if (AllocInfo) { - MFreeAllocations.erase(std::find(MFreeAllocations.begin(), - MFreeAllocations.end(), - AllocInfo.value().Ptr)); - } - return AllocInfo; + return std::nullopt; } void graph_mem_pool::markAllocationAsAvailable(