Skip to content

Commit

Permalink
Remove half cache init and do it in the page manager
Browse files Browse the repository at this point in the history
Change-Id: I3e870c9b50d13704c4c88adfc96e5943cff9dae2
RTC: 175114
Reviewed-on: http://ralgit01.raleigh.ibm.com/gerrit1/42153
Reviewed-by: Michael Baiocchi <mbaiocch@us.ibm.com>
Tested-by: Jenkins Server <pfd-jenkins+hostboot@us.ibm.com>
Reviewed-by: Martin Gloff <mgloff@us.ibm.com>
Tested-by: Jenkins OP Build CI <op-jenkins+hostboot@us.ibm.com>
Tested-by: Jenkins OP HW <op-hw-jenkins+hostboot@us.ibm.com>
Tested-by: FSP CI Jenkins <fsp-CI-jenkins+hostboot@us.ibm.com>
Reviewed-by: Daniel M. Crowell <dcrowell@us.ibm.com>
  • Loading branch information
Stephen Cprek authored and dcrowell77 committed Jul 20, 2017
1 parent e43ee27 commit 0b68011
Show file tree
Hide file tree
Showing 11 changed files with 66 additions and 174 deletions.
20 changes: 4 additions & 16 deletions src/build/debug/Hostboot/Dump.pm
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ use constant MEMSTATE_HALF_CACHE => 0x4;
use constant MEMSTATE_REDUCED_CACHE => 0x8;
use constant MEMSTATE_FULL_CACHE => 0xa;
use constant MEMSTATE_MS_32MEG => 0x20;
use constant MEMSTATE_PRE_SECURE_BOOT => 0xff;

use constant _KB => 1024;
use constant _MB => 1024 * 1024;
Expand All @@ -50,16 +49,6 @@ our %memory_maps = (
# Secureboot Header.
[ 0, (512 - 4) * _KB
],
MEMSTATE_PRE_SECURE_BOOT() =>
# Until the early secureboot operations have been done, we can
# only access the top 512k of each 1MB column. Need to avoid
# the hole for the MBOX DMA buffers (64K @ 3MB + 256K).
[ (512 - 4) * _KB, 4 * _KB,
1 * _MB, 512 * _KB,
2 * _MB, 512 * _KB,
3 * _MB, 256 * _KB,
3 * _MB + (256 + 64) * _KB, (256 - 64) * _KB
],
MEMSTATE_HALF_CACHE() =>
# All of the first 4MB can now be read (except reserved MBOX).
[ 512 * _KB, 512 * _KB,
Expand Down Expand Up @@ -88,16 +77,15 @@ our %memory_maps = (
# Map the current state to the combined states available.
our %memory_states = (
MEMSTATE_NO_MEM() => [ MEMSTATE_NO_MEM ],
MEMSTATE_PRE_SECURE_BOOT() => [ MEMSTATE_NO_MEM, MEMSTATE_PRE_SECURE_BOOT ],
MEMSTATE_HALF_CACHE() => [ MEMSTATE_NO_MEM, MEMSTATE_PRE_SECURE_BOOT,
MEMSTATE_HALF_CACHE() => [ MEMSTATE_NO_MEM,
MEMSTATE_HALF_CACHE ],
MEMSTATE_REDUCED_CACHE() =>
[ MEMSTATE_NO_MEM, MEMSTATE_PRE_SECURE_BOOT,
[ MEMSTATE_NO_MEM,
MEMSTATE_HALF_CACHE, MEMSTATE_REDUCED_CACHE ],
MEMSTATE_FULL_CACHE() => [ MEMSTATE_NO_MEM, MEMSTATE_PRE_SECURE_BOOT,
MEMSTATE_FULL_CACHE() => [ MEMSTATE_NO_MEM,
MEMSTATE_HALF_CACHE, MEMSTATE_REDUCED_CACHE,
MEMSTATE_FULL_CACHE ],
MEMSTATE_MS_32MEG() => [ MEMSTATE_NO_MEM, MEMSTATE_PRE_SECURE_BOOT,
MEMSTATE_MS_32MEG() => [ MEMSTATE_NO_MEM,
MEMSTATE_HALF_CACHE, MEMSTATE_REDUCED_CACHE,
MEMSTATE_FULL_CACHE, MEMSTATE_MS_32MEG ]
);
Expand Down
6 changes: 4 additions & 2 deletions src/include/bootloader/bootloaderif.H
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,13 @@ namespace Bootloader{
// Size of exception vector reserved space at start of the HBBL section
#define HBBL_EXCEPTION_VECTOR_SIZE (12 * KILOBYTE)

#define MAX_HBB_SIZE (512 * KILOBYTE)

// The Bootloader to Hostboot communication area exists after the working HBB
#ifdef BOOTLOADER
#define BLTOHB_COMM_DATA_ADDR (getHRMOR() - ( 2*MEGABYTE) + 512*KILOBYTE)
#define BLTOHB_COMM_DATA_ADDR (getHRMOR() - ( 2*MEGABYTE) + MAX_HBB_SIZE)
#else
#define BLTOHB_COMM_DATA_ADDR (getHRMOR() + 512*KILOBYTE)
#define BLTOHB_COMM_DATA_ADDR (getHRMOR() + MAX_HBB_SIZE)
#endif

// Expected BlToHbData eye catch
Expand Down
3 changes: 1 addition & 2 deletions src/include/kernel/memstate.H
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
/* */
/* OpenPOWER HostBoot Project */
/* */
/* Contributors Listed Below - COPYRIGHT 2013,2016 */
/* Contributors Listed Below - COPYRIGHT 2013,2017 */
/* [+] International Business Machines Corp. */
/* */
/* */
Expand Down Expand Up @@ -63,7 +63,6 @@ namespace KernelMemState
REDUCED_CACHE = 0x00000008,
FULL_CACHE = 0x0000000A,
MS_32MEG = 0x00000020,
PRE_SECURE_BOOT = 0x000000FF,
};

struct mem_location
Expand Down
9 changes: 0 additions & 9 deletions src/include/kernel/misc.H
Original file line number Diff line number Diff line change
Expand Up @@ -152,15 +152,6 @@ namespace KernelMisc

};

/** @fn expand_half_cache
*
* @brief Expands the image footprint from a quarter-cache (top 512k of
* each cache column) to a half-cache (full 1mb of each column).
*
* @return 0 or -errno
*/
int expand_half_cache();

/** @fn expand_full_cache
*
* @brief Expands the image footprint from a half-cache to full-cache
Expand Down
14 changes: 11 additions & 3 deletions src/include/kernel/vmmmgr.H
Original file line number Diff line number Diff line change
Expand Up @@ -47,18 +47,16 @@ class VmmManager
// Place the page table at the top side of the cache, 256k in size.
INITIAL_PT_OFFSET = INITIAL_MEM_SIZE - 1*MEGABYTE,
PTSIZE = 256*KILOBYTE,
HTABORG_OFFSET = INITIAL_PT_OFFSET,

// Put the DMA Pages just after the Page Table
MBOX_DMA_PAGES = 64, // must be <= 64
MBOX_DMA_PAGESIZE = (1 * KILOBYTE),
MBOX_DMA_ADDR = INITIAL_PT_OFFSET + PTSIZE,
MBOX_DMA_SIZE = MBOX_DMA_PAGES * MBOX_DMA_PAGESIZE,

/** We need to reserve a hole in heap memory for the page table,
* etc. Use these constants to define the hole. */
FIRST_RESERVED_PAGE = INITIAL_PT_OFFSET,
END_RESERVED_PAGE = INITIAL_PT_OFFSET + PTSIZE + MBOX_DMA_SIZE,
END_RESERVED_PAGE = INITIAL_PT_OFFSET + PTSIZE,

BLTOHB_DATA_START = END_RESERVED_PAGE,

Expand Down Expand Up @@ -215,6 +213,13 @@ class VmmManager
*/
static int mmLinearMap(void *i_paddr, uint64_t i_size);

/** @fn pageTableOffset()
* @brief Gets starting address of Page Table
*
* @return uint64_t - starting address of Page Table
*/
static uint64_t pageTableOffset();

protected:
VmmManager();
~VmmManager() {};
Expand Down Expand Up @@ -274,6 +279,9 @@ class VmmManager
/** See mmLinearMap */
int _mmLinearMap(void*, uint64_t);

/** See pageTableOffset */
uint64_t _pageTableOffset() const;

public:
friend class Block;
friend class StackSegment;
Expand Down
3 changes: 1 addition & 2 deletions src/include/sys/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
/* */
/* OpenPOWER HostBoot Project */
/* */
/* Contributors Listed Below - COPYRIGHT 2011,2016 */
/* Contributors Listed Below - COPYRIGHT 2011,2017 */
/* [+] International Business Machines Corp. */
/* */
/* */
Expand Down Expand Up @@ -102,7 +102,6 @@ int mm_set_permission(void* va, uint64_t size, uint64_t access_type);

enum MM_EXTEND_SIZE
{
MM_EXTEND_PARTIAL_CACHE, //< Extend memory to include 512KB to 4MB
MM_EXTEND_REDUCED_CACHE, //< Extend memory to include reduced cache (8MB).
MM_EXTEND_FULL_CACHE, //< Extend memory to include full cache (10MB).
MM_EXTEND_REAL_MEMORY, //< Extend memory into real mainstore.
Expand Down
51 changes: 3 additions & 48 deletions src/kernel/misc.C
Original file line number Diff line number Diff line change
Expand Up @@ -490,54 +490,6 @@ namespace KernelMisc
kassert(false);
}

int expand_half_cache()
{
static bool executed = false;

if (executed) // Why are we being called a second time?
{
return -EFAULT;
}

uint64_t startAddr = 512*KILOBYTE;
uint64_t endAddr = 1*MEGABYTE;

size_t cache_columns = 0;

switch(CpuID::getCpuType())
{
case CORE_POWER8_MURANO:
case CORE_POWER8_VENICE:
case CORE_POWER8_NAPLES:
case CORE_POWER9_NIMBUS:
case CORE_POWER9_CUMULUS:
cache_columns = 4;
break;

default:
kassert(false);
break;
}

for (size_t i = 0; i < cache_columns; i++)
{
size_t offset = i * MEGABYTE;
populate_cache_lines(
reinterpret_cast<uint64_t*>(startAddr + offset),
reinterpret_cast<uint64_t*>(endAddr + offset));

PageManager::addMemory(startAddr + offset,
(512*KILOBYTE)/PAGESIZE);
}

executed = true;

KernelMemState::setMemScratchReg(KernelMemState::MEM_CONTAINED_L3,
KernelMemState::HALF_CACHE);

return 0;
}

int expand_full_cache(uint64_t i_expandSize)
{
static bool executed = false;
Expand Down Expand Up @@ -596,6 +548,9 @@ namespace KernelMisc
{
size_t cache_line_size = getCacheLineWords();

// Assert start/end address is divisible by Cache Line Words
kassert(reinterpret_cast<uint64_t>(i_start)%cache_line_size == 0);
kassert(reinterpret_cast<uint64_t>(i_end)%cache_line_size == 0);
while(i_start != i_end)
{
dcbz(i_start);
Expand Down
111 changes: 30 additions & 81 deletions src/kernel/pagemgr.C
Original file line number Diff line number Diff line change
Expand Up @@ -206,95 +206,44 @@ void PageManager::_initialize()
uint64_t totalPages = 0;

page_t* startAddr = reinterpret_cast<page_t*>(firstPageAddr());
page_t* endAddr = reinterpret_cast<page_t*>(VmmManager::INITIAL_MEM_SIZE);
printk("PageManager starts at %p...", startAddr);
printk("PageManager starts at %p\n", startAddr);

// Populate cache lines from end of HBB to PT offset and add to heap
uint64_t startBlock = reinterpret_cast<uint64_t>(startAddr);
uint64_t endBlock = VmmManager::INITIAL_PT_OFFSET;
KernelMisc::populate_cache_lines(
reinterpret_cast<uint64_t*>(startBlock),
reinterpret_cast<uint64_t*>(endBlock));

uint64_t pages = (endBlock - startBlock) / PAGESIZE;
iv_heap.addMemory(startBlock, pages);
totalPages += pages;

// Populate cache lines of PT
startBlock = VmmManager::INITIAL_PT_OFFSET;
endBlock = VmmManager::INITIAL_PT_OFFSET + VmmManager::PTSIZE;
KernelMisc::populate_cache_lines(reinterpret_cast<uint64_t*>(startBlock),
reinterpret_cast<uint64_t*>(endBlock));

// Populate cachelines from end of Preserved read (PT + securebood data) to
// 4MB and add to heap
// Add on secureboot data size to end of reserved space
size_t securebootDataSize = 0;
if (g_BlToHbDataManager.isValid())
{
securebootDataSize = g_BlToHbDataManager.getPreservedSize();
}
size_t l_endReservedPage = VmmManager::END_RESERVED_PAGE
size_t l_endReservedPage = VmmManager::BLTOHB_DATA_START
+ securebootDataSize;

// Calculate chunks along the top half of the L3 and erase them.
uint64_t currentBlock = reinterpret_cast<uint64_t>(startAddr);
do
{
if (currentBlock % (1*MEGABYTE) >= (512*KILOBYTE))
{
currentBlock = ALIGN_MEGABYTE(currentBlock);
continue;
}

uint64_t endBlock = ALIGN_MEGABYTE_DOWN(currentBlock) + 512*KILOBYTE;

// Adjust address to compensate for reserved hole and add to
// heap...

// Check if this block starts in the hole.
if ((currentBlock >= VmmManager::FIRST_RESERVED_PAGE) &&
(currentBlock < l_endReservedPage))
{
// End of the block is in the hole, skip.
if (endBlock < l_endReservedPage)
{
currentBlock = ALIGN_MEGABYTE(endBlock);
continue;
}

// Advance the current block past the hole.
currentBlock = l_endReservedPage;
}

// Check if the block is has the hole in it.
if ((endBlock >= VmmManager::FIRST_RESERVED_PAGE) &&
(currentBlock < VmmManager::FIRST_RESERVED_PAGE))
{
// Hole is at the end of the block, shrink it down.
if (endBlock < l_endReservedPage)
{
endBlock = VmmManager::FIRST_RESERVED_PAGE;
}
// Hole is in the middle... yuck.
else
{
uint64_t hole_end =
(VmmManager::FIRST_RESERVED_PAGE - currentBlock);

// Populate L3 for the first part of the chunk.
KernelMisc::populate_cache_lines(
reinterpret_cast<uint64_t*>(currentBlock),
reinterpret_cast<uint64_t*>(hole_end));

// Add it to the heap.
iv_heap.addMemory(currentBlock, hole_end / PAGESIZE);
totalPages += (hole_end / PAGESIZE);

currentBlock = l_endReservedPage;
}
}

// Populate L3 cache lines for this chunk.
KernelMisc::populate_cache_lines(
reinterpret_cast<uint64_t*>(currentBlock),
reinterpret_cast<uint64_t*>(endBlock));

uint64_t pages = (endBlock - currentBlock) / PAGESIZE;

iv_heap.addMemory(currentBlock, pages);
totalPages += pages;

currentBlock = ALIGN_MEGABYTE(endBlock);

} while (reinterpret_cast<page_t*>(currentBlock) != endAddr);

// Ensure HW page table area is erased / populated.
startBlock = l_endReservedPage;
endBlock = VmmManager::INITIAL_MEM_SIZE;
KernelMisc::populate_cache_lines(
reinterpret_cast<uint64_t*>(VmmManager::INITIAL_PT_OFFSET),
reinterpret_cast<uint64_t*>(VmmManager::INITIAL_PT_OFFSET +
VmmManager::PTSIZE));
reinterpret_cast<uint64_t*>(startBlock),
reinterpret_cast<uint64_t*>(endBlock));

pages = (endBlock - startBlock) / PAGESIZE;
iv_heap.addMemory(startBlock, pages);
totalPages += pages;

printk("%ld pages.\n", totalPages);

Expand All @@ -309,7 +258,7 @@ void PageManager::_initialize()
cv_low_page_count = totalPages;

KernelMemState::setMemScratchReg(KernelMemState::MEM_CONTAINED_L3,
KernelMemState::PRE_SECURE_BOOT);
KernelMemState::HALF_CACHE);
}

void* PageManager::_allocatePage(size_t n, bool userspace)
Expand Down
4 changes: 0 additions & 4 deletions src/kernel/syscall.C
Original file line number Diff line number Diff line change
Expand Up @@ -908,10 +908,6 @@ namespace Systemcalls

switch (size)
{
case MM_EXTEND_PARTIAL_CACHE:
TASK_SETRTN(t, KernelMisc::expand_half_cache());
break;

case MM_EXTEND_REDUCED_CACHE:
TASK_SETRTN(t, KernelMisc::expand_full_cache(8*MEGABYTE));
break;
Expand Down
14 changes: 12 additions & 2 deletions src/kernel/vmmmgr.C
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
/* */
/* OpenPOWER HostBoot Project */
/* */
/* Contributors Listed Below - COPYRIGHT 2010,2016 */
/* Contributors Listed Below - COPYRIGHT 2010,2017 */
/* [+] International Business Machines Corp. */
/* */
/* */
Expand Down Expand Up @@ -277,7 +277,7 @@ int VmmManager::_devUnmap(void* ea)

uint64_t VmmManager::HTABORG()
{
return ((uint32_t)HTABORG_OFFSET + getHRMOR());
return static_cast<uint32_t>(pageTableOffset()) + getHRMOR();
}

uint64_t VmmManager::findKernelAddress(uint64_t i_vaddr)
Expand All @@ -304,3 +304,13 @@ int VmmManager::_mmLinearMap(void *i_paddr, uint64_t i_size)
lock.unlock();
return rc;
}

uint64_t VmmManager::pageTableOffset()
{
return Singleton<VmmManager>::instance()._pageTableOffset();
}

uint64_t VmmManager::_pageTableOffset() const
{
return INITIAL_PT_OFFSET;
}

0 comments on commit 0b68011

Please sign in to comment.