Skip to content

Commit

Permalink
Refactor kernel mapping so that symbol table is mapped
Browse files Browse the repository at this point in the history
  • Loading branch information
jackpot51 committed Jun 14, 2017
1 parent 8b05863 commit d6354ae
Show file tree
Hide file tree
Showing 6 changed files with 104 additions and 100 deletions.
11 changes: 4 additions & 7 deletions linkers/x86_64.ld
Expand Up @@ -29,6 +29,10 @@ SECTIONS {
*(.data*)
. = ALIGN(4096);
__data_end = .;
__bss_start = .;
*(.bss*)
. = ALIGN(4096);
__bss_end = .;
}

.tdata : AT(ADDR(.tdata) - KERNEL_OFFSET) {
Expand All @@ -43,13 +47,6 @@ SECTIONS {
__tbss_end = .;
}

.bss : AT(ADDR(.bss) - KERNEL_OFFSET) {
__bss_start = .;
*(.bss*)
. = ALIGN(4096);
__bss_end = .;
}

__end = .;

/DISCARD/ : {
Expand Down
37 changes: 3 additions & 34 deletions src/interrupt/mod.rs
@@ -1,13 +1,12 @@
//! Interrupt instructions

use core::mem;

use paging::{ActivePageTable, VirtualAddress};

pub mod exception;
pub mod ipi;
pub mod irq;
pub mod syscall;
pub mod trace;

pub use self::trace::stack_trace;

/// Clear interrupts
#[inline(always)]
Expand Down Expand Up @@ -53,33 +52,3 @@ pub unsafe fn halt() {
pub fn pause() {
unsafe { asm!("pause" : : : : "intel", "volatile"); }
}

/// Get a stack trace
//TODO: Check for stack being mapped before dereferencing
#[inline(never)]
pub unsafe fn stack_trace() {
let mut rbp: usize;
asm!("" : "={rbp}"(rbp) : : : "intel", "volatile");

println!("TRACE: {:>016X}", rbp);
//Maximum 64 frames
let active_table = ActivePageTable::new();
for _frame in 0..64 {
if let Some(rip_rbp) = rbp.checked_add(mem::size_of::<usize>()) {
if active_table.translate(VirtualAddress::new(rbp)).is_some() && active_table.translate(VirtualAddress::new(rip_rbp)).is_some() {
let rip = *(rip_rbp as *const usize);
if rip == 0 {
println!(" {:>016X}: EMPTY RETURN", rbp);
break;
}
println!(" {:>016X}: {:>016X}", rbp, rip);
rbp = *(rbp as *const usize);
} else {
println!(" {:>016X}: GUARD PAGE", rbp);
break;
}
} else {
println!(" {:>016X}: RBP OVERFLOW", rbp);
}
}
}
33 changes: 33 additions & 0 deletions src/interrupt/trace.rs
@@ -0,0 +1,33 @@
use core::mem;

use paging::{ActivePageTable, VirtualAddress};

/// Get a stack trace
//TODO: Check for stack being mapped before dereferencing
#[inline(never)]
pub unsafe fn stack_trace() {
let mut rbp: usize;
asm!("" : "={rbp}"(rbp) : : : "intel", "volatile");

println!("TRACE: {:>016X}", rbp);
//Maximum 64 frames
let active_table = ActivePageTable::new();
for _frame in 0..64 {
if let Some(rip_rbp) = rbp.checked_add(mem::size_of::<usize>()) {
if active_table.translate(VirtualAddress::new(rbp)).is_some() && active_table.translate(VirtualAddress::new(rip_rbp)).is_some() {
let rip = *(rip_rbp as *const usize);
if rip == 0 {
println!(" {:>016X}: EMPTY RETURN", rbp);
break;
}
println!(" {:>016X}: {:>016X}", rbp, rip);
rbp = *(rbp as *const usize);
} else {
println!(" {:>016X}: GUARD PAGE", rbp);
break;
}
} else {
println!(" {:>016X}: RBP OVERFLOW", rbp);
}
}
}
1 change: 1 addition & 0 deletions src/lib.rs
Expand Up @@ -7,6 +7,7 @@
#![feature(alloc)]
#![feature(asm)]
#![feature(collections)]
#![feature(concat_idents)]
#![feature(const_fn)]
#![feature(core_intrinsics)]
#![feature(drop_types_in_const)]
Expand Down
87 changes: 55 additions & 32 deletions src/paging/mod.rs
Expand Up @@ -78,7 +78,7 @@ unsafe fn init_tcb(cpu_id: usize) -> usize {
/// Initialize paging
///
/// Returns page table and thread control block offset
pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (ActivePageTable, usize) {
pub unsafe fn init(cpu_id: usize, kernel_start: usize, kernel_end: usize, stack_start: usize, stack_end: usize) -> (ActivePageTable, usize) {
extern {
/// The starting byte of the text (code) data segment.
static mut __text_start: u8;
Expand Down Expand Up @@ -118,6 +118,60 @@ pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (Acti
};

active_table.with(&mut new_table, &mut temporary_page, |mapper| {
// Remap stack writable, no execute
{
let start_frame = Frame::containing_address(PhysicalAddress::new(stack_start - ::KERNEL_OFFSET));
let end_frame = Frame::containing_address(PhysicalAddress::new(stack_end - ::KERNEL_OFFSET - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_OFFSET));
let result = mapper.map_to(page, frame, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
// The flush can be ignored as this is not the active table. See later active_table.switch
unsafe { result.ignore(); }
}
}

// Map all frames in kernel
{
let start_frame = Frame::containing_address(PhysicalAddress::new(kernel_start));
let end_frame = Frame::containing_address(PhysicalAddress::new(kernel_end - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let phys_addr = frame.start_address().get();
let virt_addr = phys_addr + ::KERNEL_OFFSET;

macro_rules! in_section {
($n: ident) => (
virt_addr >= & concat_idents!(__, $n, _start) as *const u8 as usize &&
virt_addr < & concat_idents!(__, $n, _end) as *const u8 as usize
);
}

let flags = if in_section!(text) {
// Remap text read-only
PRESENT | GLOBAL
} else if in_section!(rodata) {
// Remap rodata read-only, no execute
PRESENT | GLOBAL | NO_EXECUTE
} else if in_section!(data) {
// Remap data writable, no execute
PRESENT | GLOBAL | NO_EXECUTE | WRITABLE
} else if in_section!(tdata) {
// Remap tdata master read-only, no execute
PRESENT | GLOBAL | NO_EXECUTE
} else if in_section!(bss) {
// Remap bss writable, no execute
PRESENT | GLOBAL | NO_EXECUTE | WRITABLE
} else {
// Remap anything else read-only, no execute
PRESENT | GLOBAL | NO_EXECUTE
};

let page = Page::containing_address(VirtualAddress::new(virt_addr));
let result = mapper.map_to(page, frame, flags);
// The flush can be ignored as this is not the active table. See later active_table.switch
unsafe { result.ignore(); }
}
}

// Map tdata and tbss
{
let size = & __tbss_end as *const _ as usize - & __tdata_start as *const _ as usize;
Expand All @@ -133,37 +187,6 @@ pub unsafe fn init(cpu_id: usize, stack_start: usize, stack_end: usize) -> (Acti
unsafe { result.ignore(); }
}
}

let mut remap = |start: usize, end: usize, flags: EntryFlags| {
if end > start {
let start_frame = Frame::containing_address(PhysicalAddress::new(start));
let end_frame = Frame::containing_address(PhysicalAddress::new(end - 1));
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::containing_address(VirtualAddress::new(frame.start_address().get() + ::KERNEL_OFFSET));
let result = mapper.map_to(page, frame, flags);
// The flush can be ignored as this is not the active table. See later active_table.switch
unsafe { result.ignore(); }
}
}
};

// Remap stack writable, no execute
remap(stack_start - ::KERNEL_OFFSET, stack_end - ::KERNEL_OFFSET, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);

// Remap a section with `flags`
let mut remap_section = |start: &u8, end: &u8, flags: EntryFlags| {
remap(start as *const _ as usize - ::KERNEL_OFFSET, end as *const _ as usize - ::KERNEL_OFFSET, flags);
};
// Remap text read-only
remap_section(& __text_start, & __text_end, PRESENT | GLOBAL);
// Remap rodata read-only, no execute
remap_section(& __rodata_start, & __rodata_end, PRESENT | GLOBAL | NO_EXECUTE);
// Remap data writable, no execute
remap_section(& __data_start, & __data_end, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
// Remap tdata master writable, no execute
remap_section(& __tdata_start, & __tdata_end, PRESENT | GLOBAL | NO_EXECUTE);
// Remap bss writable, no execute
remap_section(& __bss_start, & __bss_end, PRESENT | GLOBAL | NO_EXECUTE | WRITABLE);
});

// This switches the active table, which is setup by the bootloader, to a correct table
Expand Down
35 changes: 8 additions & 27 deletions src/start.rs
Expand Up @@ -3,7 +3,6 @@
/// It must create the IDT with the correct entries, those entries are
/// defined in other files inside of the `arch` module

use core::ptr;
use core::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, AtomicUsize, ATOMIC_USIZE_INIT, Ordering};

use acpi;
Expand Down Expand Up @@ -40,43 +39,25 @@ extern {

/// The entry to Rust, all things must be initialized
#[no_mangle]
pub unsafe extern fn kstart() -> ! {
pub unsafe extern fn kstart(kernel_base: usize, kernel_size: usize, stack_base: usize, stack_size: usize) -> ! {
{
extern {
/// The starting byte of the _.bss_ (uninitialized data) segment.
static mut __bss_start: u8;
/// The ending byte of the _.bss_ (uninitialized data) segment.
static mut __bss_end: u8;
/// The end of the kernel
static mut __end: u8;
}

// Zero BSS, this initializes statics that are set to 0
// BSS should already be zero
{
let start_ptr = &mut __bss_start as *mut u8;
let end_ptr = & __bss_end as *const u8 as usize;

if start_ptr as usize <= end_ptr {
let size = end_ptr - start_ptr as usize;
ptr::write_bytes(start_ptr, 0, size);
}

assert_eq!(BSS_TEST_ZERO, 0);
assert_eq!(DATA_TEST_NONZERO, 0xFFFFFFFFFFFFFFFF);
}

// Initialize memory management
memory::init(0, &__end as *const u8 as usize - ::KERNEL_OFFSET);
println!("Kernel: {:X}:{:X}", kernel_base, kernel_base + kernel_size);
println!("Stack: {:X}:{:X}", stack_base, stack_base + stack_size);

// TODO: allocate a stack
let stack_start = 0x00080000 + ::KERNEL_OFFSET;
let stack_end = 0x0009F000 + ::KERNEL_OFFSET;
// Initialize memory management
memory::init(0, kernel_base + ((kernel_size + 4095)/4096) * 4096);

// Initialize paging
let (mut active_table, tcb_offset) = paging::init(0, stack_start, stack_end);
let (mut active_table, tcb_offset) = paging::init(0, kernel_base, kernel_base + kernel_size, stack_base, stack_base + stack_size);

// Set up GDT
gdt::init(tcb_offset, stack_end);
gdt::init(tcb_offset, stack_base + stack_size);

// Set up IDT
idt::init();
Expand Down

0 comments on commit d6354ae

Please sign in to comment.