Skip to content

Commit

Permalink
Fix page coalescing for regions past initial cache allocation
Browse files Browse the repository at this point in the history
Creates new tracking structure in the kernel to register heap memory
ranges against that will automatically compute the page coalesce boundaries
and eliminate a bunch of cumbersome math.

Change-Id: Ia08446c50c49e6b87a71b1809da6d403e5b7e547
Reviewed-on: http://rchgit01.rchland.ibm.com/gerrit1/97650
Reviewed-by: Ilya Smirnov <ismirno@us.ibm.com>
Reviewed-by: Zachary Clark <zach@ibm.com>
Tested-by: Jenkins Server <pfd-jenkins+hostboot@us.ibm.com>
Tested-by: FSP CI Jenkins <fsp-CI-jenkins+hostboot@us.ibm.com>
Tested-by: Jenkins OP Build CI <op-jenkins+hostboot@us.ibm.com>
Tested-by: Jenkins Combined Simics CI <combined-simics-ci+hostboot@us.ibm.com>
Reviewed-by: Daniel M Crowell <dcrowell@us.ibm.com>
Reviewed-on: http://rchgit01.rchland.ibm.com/gerrit1/102730
Tested-by: Jenkins OP HW <op-hw-jenkins+hostboot@us.ibm.com>
Reviewed-by: Christian R Geddes <crgeddes@us.ibm.com>
  • Loading branch information
Nick Bofferding authored and crgeddes committed Aug 19, 2020
1 parent 5d53ef8 commit a6cceed
Show file tree
Hide file tree
Showing 2 changed files with 58 additions and 25 deletions.
19 changes: 18 additions & 1 deletion src/include/kernel/pagemgr.H
Expand Up @@ -5,7 +5,7 @@
/* */
/* OpenPOWER HostBoot Project */
/* */
/* Contributors Listed Below - COPYRIGHT 2010,2019 */
/* Contributors Listed Below - COPYRIGHT 2010,2020 */
/* [+] International Business Machines Corp. */
/* */
/* */
Expand Down Expand Up @@ -33,6 +33,8 @@
#include <kernel/console.H>
#include <util/align.H>
#include <sys/vfs.h>
#include <utility>
#include <array>

/** @class PageManagerCore
* @brief Manages the allocation of memory pages
Expand Down Expand Up @@ -115,6 +117,21 @@ class PageManagerCore
size_t iv_available; //!< free pages
Util::Lockfree::Stack<page_t> iv_heap[BUCKETS]; //!< The heap

//!< Starting address of a heap memory range
using rangeAddr_t = uint64_t;

//!< Size of a heap memory range
using rangeSizeBytes_t = uint64_t;

//!< Heap memory range tracking element
using range_t = std::pair<rangeAddr_t,rangeSizeBytes_t>;

//!< Max number of heap memory ranges tracked by the kernel
static constexpr size_t MAX_HEAP_RANGES = 4;

//!< Heap memory range tracker
std::array<range_t,MAX_HEAP_RANGES> iv_ranges;

/**
* Find a page of proper size
* @param[in] the Size
Expand Down
64 changes: 40 additions & 24 deletions src/kernel/pagemgr.C
Expand Up @@ -5,7 +5,7 @@
/* */
/* OpenPOWER HostBoot Project */
/* */
/* Contributors Listed Below - COPYRIGHT 2010,2019 */
/* Contributors Listed Below - COPYRIGHT 2010,2020 */
/* [+] International Business Machines Corp. */
/* */
/* */
Expand Down Expand Up @@ -40,7 +40,8 @@
#include <kernel/misc.H>
#include <usr/debugpointers.H>
#include <kernel/cpumgr.H>

#include <usr/vmmconst.h>
#include <kernel/spte.H>

size_t PageManager::cv_coalesce_count = 0;
size_t PageManager::cv_low_page_count = -1;
Expand All @@ -64,6 +65,25 @@ void PageManagerCore::addMemory( size_t i_addr, size_t i_pageCount )
page = (page_t*)((uint64_t)page + (1 << page_length)*PAGESIZE);
length -= (1 << page_length);
}

// Update set of registered heap memory ranges to support heap coalescing.
// It is a critical error for the last range to already be registered when
// this API is invoked.
kassert(!iv_ranges.back().first);
for(auto& range : iv_ranges)
{
// Range value of 0 indicates a free range to use, since Hostboot cannot
// ever start the heap at an address of 0.
if(!range.first)
{
range.first=i_addr;
range.second=i_pageCount*PAGE_SIZE;
break;
}

// Can't ever start a range at/below that of an existing range.
kassert(i_addr > range.first);
}
__sync_add_and_fetch(&iv_available, i_pageCount);
}

Expand Down Expand Up @@ -430,35 +450,31 @@ void PageManagerCore::coalesce( void )

while(NULL != (p = pq.remove()))
{
// p needs to be the even buddy to prevent merging of wrong block.
// p needs to be the even buddy to prevent merging of wrong blocks.
// To determine this, get the index of the block as if the whole
// page memory space were blocks of this size. Note: have to
// take into account the page manager "hole" in the middle of the
// initial memory allocation. Also have to ignore the OCC
// bootloader page at the start of the third memory range which
// accounts for the rest of the cache.
// bootloader page at the start of the third memory range (which
// accounts for the rest of the initial cache), and the SPTE entries
// at the start of the 4th memory range (which accounts for the rest
// of the Hostboot memory footprint).
uint64_t p_idx = 0;
if(reinterpret_cast<uint64_t>(p) < VmmManager::pageTableOffset())
const auto addr = reinterpret_cast<uint64_t>(p);
bool found=false;
for(const auto& range : iv_ranges)
{
p_idx = ( reinterpret_cast<uint64_t>(p)
- VmmManager::endPreservedOffset())/
((1 << bucket)*PAGESIZE);
}
else if( reinterpret_cast<uint64_t>(p)
< VmmManager::INITIAL_MEM_SIZE)
{
p_idx = ( reinterpret_cast<uint64_t>(p)
- ( VmmManager::pageTableOffset()
+ VmmManager::PTSIZE) )/
((1 << bucket)*PAGESIZE);
}
else
{
p_idx = ( reinterpret_cast<uint64_t>(p)
- ( VmmManager::INITIAL_MEM_SIZE
+ PAGESIZE) )/
((1 << bucket)*PAGESIZE);
if( (addr >= range.first)
&& (addr < (range.first + range.second)) )
{
p_idx = (addr - range.first) / ((1 << bucket)*PAGESIZE);
found=true;
break;
}
}
// Critical error if we didn't map into a known/registered address
// range.
kassert(found);

if(0 != (p_idx % 2)) // odd index
{
Expand Down

0 comments on commit a6cceed

Please sign in to comment.