Permalink
Browse files

[x86] Allow machines with >1GB of RAM to boot. Initially first 1GB of…

… RAM is mapped into kernel memory and used to bring up the kernel and map all physical memory into kernel space. During this initialization, the kernel will pull physical pages off its page stack and if they are outside the low 1GB area, it will fault because only the first 1GB is accessible. This fix allows the early kernel to use the first 1GB as a priority over other pages by splitting physical memory into two stacks - one for 0-1GB and the other for >1GB. the rest of the kernel should treat the stacks as one stack (previously SYSTEMSTACK). Now there are LOWSTACK and HIGHSTACK but pgstack.rs provides veener functions to access either one in an abstract manner. Thanks to Nick Rathke for reporting this bug.
  • Loading branch information...
1 parent 66e7349 commit 9674633eea0f66c6c4bbb04581566db6e6e0e4ca @diodesign committed Mar 21, 2016
Showing with 115 additions and 22 deletions.
  1. +2 −2 platform/x86/src/paging.rs
  2. +107 −13 platform/x86/src/pgstack.rs
  3. +6 −7 platform/x86/src/physmem.rs
@@ -111,7 +111,7 @@ impl PageTable
{
/* no table allocated, so we need to grab a physical page to hold
* a new PDP table for the PML4 to point to */
- let pdp: usize = try!(pgstack::SYSTEMSTACK.lock().pop());
+ let pdp: usize = try!(pgstack::pop());
/* zero the new PDP table so its entries are all marked not present */
unsafe{ memset(self.phys_to_kernel_virt(pdp) as *mut u8, 0, physmem::SMALL_PAGE_SIZE) };
@@ -153,7 +153,7 @@ impl PageTable
{
/* no table allocated, so we need to grab a physical page to hold
* a new PDP table for the PML4 to point to */
- let pd: usize = try!(pgstack::SYSTEMSTACK.lock().pop());
+ let pd: usize = try!(pgstack::pop());
/* zero the new PDP table so its entries are all marked not present */
unsafe{ memset(self.phys_to_kernel_virt(pd) as *mut u8, 0, physmem::SMALL_PAGE_SIZE) };
@@ -14,19 +14,42 @@ use errors::KernelInternalError;
use ::hardware::physmem;
-const PAGE_STACK_PHYS_START: usize = 4 * 1024 * 1024; /* start page stack at 4MB mark in physical memory */
-
-/* create a system-wide physical stack with a locking mechanism. */
-pub static SYSTEMSTACK: Mutex<PageStack> = Mutex::new(PageStack
- {
- base: PAGE_STACK_PHYS_START,
- ptr: 0,
- max_ptr: 0,
- size: 0,
- virtual_translation_offset: 0,
- });
-
-/* page stack design notes
+/* two stacks are used to manage physical page allocations.
+ * the low stack is 0-1GB and the high stack is 1GB+.
+ * there are two stacks because during early kernel init, just the first 1GB of phys mem is mapped
+ * in. this memory is needed to initialize the rest of the system. any allocations must therefore
+ * come off the low stack - the pages referenced by the high stack are not available.
+ * STACK_SPLIT_PHYS_ADDR defines that split. in future, page stacks could be split by NUMA regions */
+const STACK_SPLIT_PHYS_ADDR: usize = 1 * 1024 * 1024 * 1024;
+
+/* start page stack at 4MB mark in physical memory. the high stack follows immediately after the
+ * low stack */
+const PTR_SIZE: usize = 8; /* Rust won't let me use size_of::<usize>() here because it's not defined as aconst */
+const LOW_PAGE_STACK_PHYS_START: usize = 4 * 1024 * 1024;
+const HIGH_PAGE_STACK_PHYS_START: usize = LOW_PAGE_STACK_PHYS_START +
+ ((STACK_SPLIT_PHYS_ADDR / physmem::SMALL_PAGE_SIZE) * PTR_SIZE);
+
+/* create a system-wide stack for lowest 1GB of phys RAM with a locking mechanism. */
+static LOWSTACK: Mutex<PageStack> = Mutex::new(PageStack
+ {
+ base: LOW_PAGE_STACK_PHYS_START,
+ ptr: 0,
+ max_ptr: 0,
+ size: 0,
+ virtual_translation_offset: 0,
+ });
+
+/* create a system-wide stack for >1GB of phys RAM with a locking mechanism. */
+static HIGHSTACK: Mutex<PageStack> = Mutex::new(PageStack
+ {
+ base: HIGH_PAGE_STACK_PHYS_START,
+ ptr: 0,
+ max_ptr: 0,
+ size: 0,
+ virtual_translation_offset: 0,
+ });
+
+/* individual page stack design notes
*
* Each 1GB of physical RAM takes up 2MB of RAM: 262,144 x 8-byte pointers.
* Each stacked pointer is the base address of a 4K physical page frame.
@@ -194,3 +217,74 @@ impl PageStack
}
}
+/* ----------------------------------------------------------------------- */
+
+/* veneer of functions to provide access to LOWSTACK and HIGHSTACK.
+ * check the function definitions in PageStack for full details of their use. */
+
+/* set the kernel phys->virt translation offset for the stacks */
+pub fn set_kernel_translation_offset(offset: usize)
+{
+ /* will always succeed - it's just integer math */
+ LOWSTACK.lock().set_kernel_translation_offset(offset);
+ HIGHSTACK.lock().set_kernel_translation_offset(offset);
+}
+
+/* set limits for the page stacks: limit = max number of stack entries */
+pub fn set_limit(limit: usize) -> Result<(), KernelInternalError>
+{
+ let page_split = STACK_SPLIT_PHYS_ADDR / physmem::SMALL_PAGE_SIZE;
+ let mut lower: usize = 0;
+ let mut upper: usize = 0;
+
+ if limit > page_split
+ {
+ lower = page_split;
+ upper = limit - page_split;
+ }
+ else
+ {
+ lower = limit;
+ }
+
+ try!(LOWSTACK.lock().set_limit(lower));
+ try!(HIGHSTACK.lock().set_limit(upper));
+ Ok(())
+}
+
+/* returns true if the given physical address is occupied by a page stack */
+pub fn check_collision(addr: usize) -> bool
+{
+ if LOWSTACK.lock().check_collision(addr) == true
+ {
+ /* don't bother checking HIGHSTACK if LOWSTACK collides */
+ return true;
+ }
+
+ return HIGHSTACK.lock().check_collision(addr);
+}
+
+/* push a 4K physical page base address onto the correct stack */
+pub fn push(phys_addr: usize) -> Result<(), KernelInternalError>
+{
+ /* make sure we return the page to the correct stack */
+ if phys_addr < STACK_SPLIT_PHYS_ADDR
+ {
+ try!(LOWSTACK.lock().push(phys_addr));
+ return Ok(());
+ }
+
+ try!(HIGHSTACK.lock().push(phys_addr));
+ Ok(())
+}
+
+/* pop a 4K page's base physical address off the stack */
+pub fn pop() -> Result<usize, KernelInternalError>
+{
+ /* try the low stack then the high stack */
+ let res = LOWSTACK.lock().pop();
+ if res.is_ok() == true { return res; }
+
+ return HIGHSTACK.lock().pop();
+}
+
@@ -139,13 +139,13 @@ pub fn init() -> Result<(), KernelInternalError>
{
let pages: usize = mem_total / SMALL_PAGE_SIZE; /* convert bytes into 4k pages */
kprintln!("... found {} physical pages", pages);
- try!(pgstack::SYSTEMSTACK.lock().set_limit(pages));
+ try!(pgstack::set_limit(pages));
}
}
kprintln!("... done, {} MB RAM available ({} bytes reserved for kernel use)", mem_total >> 20, mem_total - mem_stacked);
/* get the physical page stack and paging code using the upper kernel area */
- pgstack::SYSTEMSTACK.lock().set_kernel_translation_offset(KERNEL_VIRTUAL_UPPER_BASE);
+ pgstack::set_kernel_translation_offset(KERNEL_VIRTUAL_UPPER_BASE);
paging::BOOTPGTABL.lock().set_kernel_translation_offset(KERNEL_VIRTUAL_UPPER_BASE);
/* throw out all the redundant mappings, leaving just the kernel code, read-only data
@@ -184,13 +184,12 @@ fn add_phys_region(base: usize, size: usize) -> Result<usize, KernelInternalErro
continue;
}
- let mut stack = pgstack::SYSTEMSTACK.lock();
- if stack.check_collision(page_base) == true
+ if pgstack::check_collision(page_base) == true
{
continue;
}
- if stack.push(page_base).is_ok() == true
+ if pgstack::push(page_base).is_ok() == true
{
stacked = stacked + SMALL_PAGE_SIZE;
}
@@ -242,7 +241,7 @@ fn map_phys_region(base: usize, size: usize) -> Result<(), KernelInternalError>
*/
pub fn get_page() -> Result<usize, KernelInternalError>
{
- let page_base = try!(pgstack::SYSTEMSTACK.lock().pop());
+ let page_base = try!(pgstack::pop());
Ok(page_base + KERNEL_VIRTUAL_UPPER_BASE)
}
@@ -262,7 +261,7 @@ pub fn return_page(virt: usize) -> Result<(), KernelInternalError>
}
let virt = virt - KERNEL_VIRTUAL_UPPER_BASE;
- try!(pgstack::SYSTEMSTACK.lock().push(virt));
+ try!(pgstack::push(virt));
Ok(())
}

0 comments on commit 9674633

Please sign in to comment.