From a6cceeddff3ad82eb14c371d24f519dc12840157 Mon Sep 17 00:00:00 2001 From: Nick Bofferding Date: Tue, 19 May 2020 11:19:09 -0500 Subject: [PATCH] Fix page coalescing for regions past initial cache allocation Creates new tracking structure in the kernel to register heap memory ranges against that will automatically compute the page coalesce boundaries and eliminate a bunch of cumbersome math. Change-Id: Ia08446c50c49e6b87a71b1809da6d403e5b7e547 Reviewed-on: http://rchgit01.rchland.ibm.com/gerrit1/97650 Reviewed-by: Ilya Smirnov Reviewed-by: Zachary Clark Tested-by: Jenkins Server Tested-by: FSP CI Jenkins Tested-by: Jenkins OP Build CI Tested-by: Jenkins Combined Simics CI Reviewed-by: Daniel M Crowell Reviewed-on: http://rchgit01.rchland.ibm.com/gerrit1/102730 Tested-by: Jenkins OP HW Reviewed-by: Christian R Geddes --- src/include/kernel/pagemgr.H | 19 ++++++++++- src/kernel/pagemgr.C | 64 ++++++++++++++++++++++-------------- 2 files changed, 58 insertions(+), 25 deletions(-) diff --git a/src/include/kernel/pagemgr.H b/src/include/kernel/pagemgr.H index d2500601f56..5a67e9e9755 100644 --- a/src/include/kernel/pagemgr.H +++ b/src/include/kernel/pagemgr.H @@ -5,7 +5,7 @@ /* */ /* OpenPOWER HostBoot Project */ /* */ -/* Contributors Listed Below - COPYRIGHT 2010,2019 */ +/* Contributors Listed Below - COPYRIGHT 2010,2020 */ /* [+] International Business Machines Corp. */ /* */ /* */ @@ -33,6 +33,8 @@ #include #include #include +#include +#include /** @class PageManagerCore * @brief Manages the allocation of memory pages @@ -115,6 +117,21 @@ class PageManagerCore size_t iv_available; //!< free pages Util::Lockfree::Stack iv_heap[BUCKETS]; //!< The heap + //!< Starting address of a heap memory range + using rangeAddr_t = uint64_t; + + //!< Size of a heap memory range + using rangeSizeBytes_t = uint64_t; + + //!< Heap memory range tracking element + using range_t = std::pair; + + //!< Max number of heap memory ranges tracked by the kernel + static constexpr size_t MAX_HEAP_RANGES = 4; + + //!< Heap memory range tracker + std::array iv_ranges; + /** * Find a page of proper size * @param[in] the Size diff --git a/src/kernel/pagemgr.C b/src/kernel/pagemgr.C index bb2cf758126..9d2b744b8ba 100644 --- a/src/kernel/pagemgr.C +++ b/src/kernel/pagemgr.C @@ -5,7 +5,7 @@ /* */ /* OpenPOWER HostBoot Project */ /* */ -/* Contributors Listed Below - COPYRIGHT 2010,2019 */ +/* Contributors Listed Below - COPYRIGHT 2010,2020 */ /* [+] International Business Machines Corp. */ /* */ /* */ @@ -40,7 +40,8 @@ #include #include #include - +#include +#include size_t PageManager::cv_coalesce_count = 0; size_t PageManager::cv_low_page_count = -1; @@ -64,6 +65,25 @@ void PageManagerCore::addMemory( size_t i_addr, size_t i_pageCount ) page = (page_t*)((uint64_t)page + (1 << page_length)*PAGESIZE); length -= (1 << page_length); } + + // Update set of registered heap memory ranges to support heap coalescing. + // It is a critical error for the last range to already be registered when + // this API is invoked. + kassert(!iv_ranges.back().first); + for(auto& range : iv_ranges) + { + // Range value of 0 indicates a free range to use, since Hostboot cannot + // ever start the heap at an address of 0. + if(!range.first) + { + range.first=i_addr; + range.second=i_pageCount*PAGE_SIZE; + break; + } + + // Can't ever start a range at/below that of an existing range. + kassert(i_addr > range.first); + } __sync_add_and_fetch(&iv_available, i_pageCount); } @@ -430,35 +450,31 @@ void PageManagerCore::coalesce( void ) while(NULL != (p = pq.remove())) { - // p needs to be the even buddy to prevent merging of wrong block. + // p needs to be the even buddy to prevent merging of wrong blocks. // To determine this, get the index of the block as if the whole // page memory space were blocks of this size. Note: have to // take into account the page manager "hole" in the middle of the // initial memory allocation. Also have to ignore the OCC - // bootloader page at the start of the third memory range which - // accounts for the rest of the cache. + // bootloader page at the start of the third memory range (which + // accounts for the rest of the initial cache), and the SPTE entries + // at the start of the 4th memory range (which accounts for the rest + // of the Hostboot memory footprint). uint64_t p_idx = 0; - if(reinterpret_cast(p) < VmmManager::pageTableOffset()) + const auto addr = reinterpret_cast(p); + bool found=false; + for(const auto& range : iv_ranges) { - p_idx = ( reinterpret_cast(p) - - VmmManager::endPreservedOffset())/ - ((1 << bucket)*PAGESIZE); - } - else if( reinterpret_cast(p) - < VmmManager::INITIAL_MEM_SIZE) - { - p_idx = ( reinterpret_cast(p) - - ( VmmManager::pageTableOffset() - + VmmManager::PTSIZE) )/ - ((1 << bucket)*PAGESIZE); - } - else - { - p_idx = ( reinterpret_cast(p) - - ( VmmManager::INITIAL_MEM_SIZE - + PAGESIZE) )/ - ((1 << bucket)*PAGESIZE); + if( (addr >= range.first) + && (addr < (range.first + range.second)) ) + { + p_idx = (addr - range.first) / ((1 << bucket)*PAGESIZE); + found=true; + break; + } } + // Critical error if we didn't map into a known/registered address + // range. + kassert(found); if(0 != (p_idx % 2)) // odd index {