Skip to content

Commit

Permalink
Initialize documentation for arch mod
Browse files Browse the repository at this point in the history
  • Loading branch information
sorpaas committed Jan 6, 2017
1 parent bdfd0df commit fa09d3b
Show file tree
Hide file tree
Showing 22 changed files with 280 additions and 46 deletions.
3 changes: 2 additions & 1 deletion kernel/src/arch/x86_64/addr.rs
Expand Up @@ -77,12 +77,13 @@ macro_rules! addr_common {
}
}

/// Represent a physical memory address.
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct PAddr(u64);

addr_common!(PAddr, PAddr);

/// Represent a virtual (linear) memory address
/// Represent a virtual (linear) memory address.
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct VAddr(u64);

Expand Down
15 changes: 15 additions & 0 deletions kernel/src/arch/x86_64/cap/mod.rs
@@ -1,3 +1,4 @@
/// Paging-related arch-specific capabilities.
mod paging;

pub use self::paging::{PML4Descriptor, PML4Cap,
Expand All @@ -7,12 +8,23 @@ pub use self::paging::{PML4Descriptor, PML4Cap,
PageDescriptor, PageCap,
PAGE_LENGTH};

/// The top-level page table capability. In `x86_64`, this is PML4.
pub type TopPageTableCap = PML4Cap;

use common::*;
use core::any::{TypeId};
use util::managed_arc::{ManagedArc, ManagedWeakPool256Arc, ManagedArcAny};

/// Create a managed Arc (capability) from an address of an
/// architecture-specific kernel object. The `type_id` should be a
/// [TypeId](https://doc.rust-lang.org/std/any/struct.TypeId.html) of
/// an architecture-specific capability. If the `type_id` is not
/// recognized, `None` is returned.
///
/// # Safety
///
/// `ptr` must be a physical address pointing to a valid kernel object
/// of type `type_id`.
pub unsafe fn upgrade_any(ptr: PAddr, type_id: TypeId) -> Option<ManagedArcAny> {
if type_id == TypeId::of::<PML4Cap>() {
Some(unsafe { ManagedArc::from_ptr(ptr): PML4Cap }.into())
Expand All @@ -27,6 +39,9 @@ pub unsafe fn upgrade_any(ptr: PAddr, type_id: TypeId) -> Option<ManagedArcAny>
}
}

/// Drop an architecture-specific `any` capability. `ManagedArcAny` is
/// not itself droppable. It must be converted to its real type before
/// dropping. This function is used by `kernel::cap::drop_any`.
pub fn drop_any(any: ManagedArcAny) {
if any.is::<PML4Cap>() {
any.into(): PML4Cap;
Expand Down
21 changes: 20 additions & 1 deletion kernel/src/arch/x86_64/cap/paging/mod.rs
Expand Up @@ -12,41 +12,60 @@ use core::marker::{PhantomData};
use core::any::{Any};
use cap::{UntypedDescriptor, SetDefault};

pub use self::page::{PAGE_LENGTH};
/// Page length used in current kernel. This is `BASE_PAGE_LENGTH` in x86_64.
pub const PAGE_LENGTH: usize = BASE_PAGE_LENGTH;

/// PML4 page table descriptor.
pub struct PML4Descriptor {
start_paddr: PAddr,
next: Option<ManagedArcAny>,
}

/// PML4 page table capability.
pub type PML4Cap = ManagedArc<RwLock<PML4Descriptor>>;


/// PDPT page table descriptor.
pub struct PDPTDescriptor {
mapped_weak_pool: ManagedWeakPool1Arc,
start_paddr: PAddr,
next: Option<ManagedArcAny>,
}

/// PDPT page table capability.
pub type PDPTCap = ManagedArc<RwLock<PDPTDescriptor>>;


/// PD page table descriptor.
pub struct PDDescriptor {
mapped_weak_pool: ManagedWeakPool1Arc,
start_paddr: PAddr,
next: Option<ManagedArcAny>,
}

/// PD page table capability.
pub type PDCap = ManagedArc<RwLock<PDDescriptor>>;


/// PT page table descriptor.
pub struct PTDescriptor {
mapped_weak_pool: ManagedWeakPool1Arc,
start_paddr: PAddr,
next: Option<ManagedArcAny>,
}

/// PT page table capability.
pub type PTCap = ManagedArc<RwLock<PTDescriptor>>;

/// Page descriptor.
pub struct PageDescriptor<T: SetDefault + Any> {
mapped_weak_pool: ManagedWeakPool1Arc,
start_paddr: PAddr,
next: Option<ManagedArcAny>,
_marker: PhantomData<T>
}

/// Page capability.
pub type PageCap<T: SetDefault + Any> = ManagedArc<RwLock<PageDescriptor<T>>>;

macro_rules! paging_cap {
Expand Down
4 changes: 1 addition & 3 deletions kernel/src/arch/x86_64/cap/paging/page.rs
Expand Up @@ -5,11 +5,9 @@ use util::managed_arc::{ManagedWeakPool1Arc};
use core::marker::{PhantomData};
use core::any::{Any};
use core::mem;
use super::{PageDescriptor, PageCap};
use super::{PageDescriptor, PageCap, PAGE_LENGTH};
use cap::{UntypedDescriptor, SetDefault};

pub const PAGE_LENGTH: usize = BASE_PAGE_LENGTH;

impl<T: SetDefault + Any> PageCap<T> {
pub fn retype_from(untyped: &mut UntypedDescriptor) -> Self {
unsafe { Self::bootstrap(unsafe { untyped.allocate(BASE_PAGE_LENGTH, BASE_PAGE_LENGTH) }, untyped) }
Expand Down
2 changes: 2 additions & 0 deletions kernel/src/arch/x86_64/init/interrupt.rs
@@ -1,5 +1,7 @@
use arch::interrupt::{self, IDT, IO_APIC, LOCAL_APIC, disable_pic};

/// Initialize interrupt. Disable PIC and then initialize APIC
/// together with keyboard interrupt on I/O APIC.
pub fn init() {
unsafe { disable_pic() };
IDT.load();
Expand Down
29 changes: 28 additions & 1 deletion kernel/src/arch/x86_64/init/mod.rs
@@ -1,6 +1,14 @@
/// [Multiboot](https://www.gnu.org/software/grub/manual/multiboot/multiboot.html)
/// information parser.
mod multiboot;

/// Paging initialization code.
mod paging;

/// Interrupt initialization code.
mod interrupt;

/// Segmentation initialization code.
mod segmentation;

pub use self::paging::{KERNEL_PML4, KERNEL_PDPT, KERNEL_PD,
Expand All @@ -24,7 +32,9 @@ use core::ops::{Deref};
use common::{PAddr, VAddr, MemoryRegion};

extern {
/// Multiboot signature exposed by linker.
static multiboot_sig: u32;
/// Multiboot pointer exposed by linker.
static multiboot_ptr: u64;
}

Expand All @@ -33,6 +43,8 @@ pub fn multiboot_paddr() -> PAddr {
PAddr::from(multiboot_ptr)
}

/// Iterator for `Option<MemoryRegion>`. It returns `None` if the
/// inner `Option` is none. Otherwise return the value unwrapped.
pub struct FreeRegionsIterator<'a>(Iter<'a, Option<MemoryRegion>>);

impl<'a> Iterator for FreeRegionsIterator<'a> {
Expand All @@ -53,6 +65,9 @@ impl<'a> Iterator for FreeRegionsIterator<'a> {
}
}

/// Initialization information to be passed to `kmain`. It contains
/// free regions and rinit and kernel memory region information. At
/// most 16 free regions are supported.
#[derive(Debug)]
pub struct InitInfo {
free_regions_size: usize,
Expand All @@ -62,31 +77,41 @@ pub struct InitInfo {
}

impl InitInfo {
/// Return a `FreeRegionsIterator` that allows iterating over all
/// free regions.
pub fn free_regions(&self) -> FreeRegionsIterator {
FreeRegionsIterator(self.free_regions.iter())
}

/// The kernel memory region.
pub fn kernel_region(&self) -> MemoryRegion {
self.kernel_region
}

/// The user-space rinit program memory region.
pub fn rinit_region(&self) -> MemoryRegion {
self.rinit_region
}

/// Create a new `InitInfo` using a kernel region and a rinit region.
pub fn new(kernel_region: MemoryRegion, rinit_region: MemoryRegion) -> InitInfo {
InitInfo { free_regions_size: 0,
free_regions: [None; 16],
kernel_region: kernel_region,
rinit_region: rinit_region }
}

/// Append a new free region to the `InitInfo`.
pub fn push_free_region(&mut self, region: MemoryRegion) {
self.free_regions[self.free_regions_size] = Some(region);
self.free_regions_size += 1;
}
}

/// Read the multiboot structure. Construct an `InitInfo` with all
/// free regions. A memory region that will be used for initial memory
/// allocation is returned seperately. That region is always the same
/// as the region of the kernel region.
fn bootstrap_archinfo() -> (InitInfo, MemoryRegion) {
let bootinfo = unsafe {
multiboot::Multiboot::new(multiboot_paddr(), |addr, size| {
Expand Down Expand Up @@ -129,7 +154,9 @@ fn bootstrap_archinfo() -> (InitInfo, MemoryRegion) {
(archinfo, alloc_region.unwrap())
}

/// Kernel entrypoint
/// Kernel entrypoint. This function calls `bootstrap_archinfo`, and
/// then use the information to initialize paging, segmentation,
/// interrupt, and APIC. It then jumps to `kmain`.
#[lang="start"]
#[no_mangle]
pub fn kinit() {
Expand Down
45 changes: 40 additions & 5 deletions kernel/src/arch/x86_64/init/paging.rs
Expand Up @@ -13,42 +13,67 @@ use core::ops::{Deref};
use arch::addr;

extern {
/// `init_pd` exposed by linker.
static mut init_pd: PD;
/// `kernel_stack_guard_page` exposed by linker.
static kernel_stack_guard_page: u64;
}

// Below should be used BEFORE switching to new page table structure.
/// Virtual address as the base for all allocations. Those are used
/// before switching to the new page structure.
const INITIAL_ALLOC_START_VADDR: VAddr = VAddr::new(KERNEL_BASE + 0xc00000);
/// Offset for the new PML4 virtual address.
const INITIAL_ALLOC_PML4_OFFSET: usize = 0x0000;
/// Offset for the new PDPT virtual address.
const INITIAL_ALLOC_PDPT_OFFSET: usize = 0x1000;
/// Offset for the new PD virtual address.
const INITIAL_ALLOC_PD_OFFSET: usize = 0x2000;
/// Offset for the new Object Pool PT virtual address.
const INITIAL_ALLOC_OBJECT_POOL_PT_OFFSET: usize = 0x3000;
/// Offset for the new kernel PT virtual address.
const INITIAL_ALLOC_KERNEL_PT_START_OFFSET: usize = 0x4000;

// Below should be used AFTER switching to new page table structure.
pub const OBJECT_POOL_START_VADDR: VAddr = VAddr::new(KERNEL_BASE + 0xe00000);
/// Object Pool virtual address after switching to new page table.
pub const OBJECT_POOL_START_VADDR: VAddr = VAddr::new(KERNEL_BASE +
0xe00000);
/// Object Pool size, excluding the recursive Object Pool virtual
/// address, local APIC page address, and I/O APIC page address.
pub const OBJECT_POOL_SIZE: usize = 509;
pub const OBJECT_POOL_PT_VADDR: VAddr = VAddr::new(KERNEL_BASE + 0xfff000);
pub const LOCAL_APIC_PAGE_VADDR: VAddr = VAddr::new(KERNEL_BASE + 0xffe000);
/// Object Pool PT virtual address after switching to new page table.
pub const OBJECT_POOL_PT_VADDR: VAddr = VAddr::new(KERNEL_BASE +
0xfff000);
/// Local APIC page virtual address after switching to new page table.
pub const LOCAL_APIC_PAGE_VADDR: VAddr = VAddr::new(KERNEL_BASE +
0xffe000);
/// I/O APIC page virtual address after switching to new page table.
pub const IO_APIC_PAGE_VADDR: VAddr = VAddr::new(KERNEL_BASE + 0xffd000);

// Variables
/// Initial PD. Invalid after switching to the new page table.
static INITIAL_PD: ExternMutex<PD> =
unsafe { ExternMutex::new(Some(&init_pd as *const _)) };

/// Object Pool PT struct.
pub static OBJECT_POOL_PT: ExternMutex<[PTEntry; OBJECT_POOL_SIZE]> =
unsafe { ExternMutex::new(None) };
/// Kernel PML4 struct.
pub static KERNEL_PML4: ExternReadonlyObject<PML4> =
unsafe { ExternReadonlyObject::new() };
/// Kernel PDPT struct.
pub static KERNEL_PDPT: ExternReadonlyObject<PDPT> =
unsafe { ExternReadonlyObject::new() };
/// Kernel PD struct.
pub static KERNEL_PD: ExternReadonlyObject<PD> =
unsafe { ExternReadonlyObject::new() };

/// Guard page virtual address after switching to the new page table.
fn kernel_stack_guard_page_vaddr() -> VAddr {
VAddr::from((&kernel_stack_guard_page as *const _) as u64)
}

/// Allocate the kernel PML4 using the given memory region and
/// allocation base.
fn alloc_kernel_pml4(region: &mut MemoryRegion, alloc_base: PAddr) -> Unique<PML4> {
use arch::paging::PML4Entry;

Expand All @@ -71,6 +96,8 @@ fn alloc_kernel_pml4(region: &mut MemoryRegion, alloc_base: PAddr) -> Unique<PML
pml4_unique
}

/// Allocate the kernel PDPT using the given memory region and
/// allocation base.
fn alloc_kernel_pdpt(region: &mut MemoryRegion, pml4: &mut PML4, alloc_base: PAddr) -> Unique<PDPT> {
use arch::paging::{PDPTEntry, PML4Entry, PML4_P, PML4_RW};

Expand All @@ -95,6 +122,8 @@ fn alloc_kernel_pdpt(region: &mut MemoryRegion, pml4: &mut PML4, alloc_base: PAd
pdpt_unique
}

/// Allocate the kernel PD using the given memory region and
/// allocation base.
fn alloc_kernel_pd(region: &mut MemoryRegion, pdpt: &mut PDPT, alloc_base: PAddr) -> Unique<PD> {
use arch::paging::{PDEntry, PDPTEntry, PDPT_P, PDPT_RW};

Expand All @@ -119,6 +148,8 @@ fn alloc_kernel_pd(region: &mut MemoryRegion, pdpt: &mut PDPT, alloc_base: PAddr
pd_unique
}

/// Allocate the object pool PT. It also maps a reverse ObjectPool PT
/// access point, and APIC pages (local and I/O).
fn alloc_object_pool_pt(region: &mut MemoryRegion, pd: &mut PD, alloc_base: PAddr) -> Unique<PT> {
use arch::paging::{PTEntry, PDEntry, PD_P, PD_RW, PT_P, PT_RW, PT_PWT, PT_PCD};

Expand Down Expand Up @@ -164,6 +195,7 @@ fn alloc_object_pool_pt(region: &mut MemoryRegion, pd: &mut PD, alloc_base: PAdd
pt_unique
}

/// Allocate one kernel page using `offset_size`.
fn alloc_kernel_page(pt: &mut PT, offset_size: usize, alloc_base: PAddr) {
use arch::paging::{PT_P, PT_RW};

Expand All @@ -175,6 +207,7 @@ fn alloc_kernel_page(pt: &mut PT, offset_size: usize, alloc_base: PAddr) {
pt[pt_index(vaddr)] = PTEntry::new(paddr, PT_P | PT_RW);
}

/// Allocate the kernel guard page specified by `offset_size`.
fn alloc_kernel_guard_page(pt: &mut PT, offset_size: usize, alloc_base: PAddr) {
use arch::paging::{PT_P, PT_RW};

Expand All @@ -186,6 +219,7 @@ fn alloc_kernel_guard_page(pt: &mut PT, offset_size: usize, alloc_base: PAddr) {
pt[pt_index(vaddr)] = PTEntry::empty();
}

/// Allocate necessary kernel PTs calculated by `block_count`.
fn alloc_kernel_pts(region: &mut MemoryRegion, pd: &mut PD, alloc_base: PAddr) {
use arch::paging::{PDEntry, PD_P, PD_RW};

Expand Down Expand Up @@ -218,7 +252,7 @@ fn alloc_kernel_pts(region: &mut MemoryRegion, pd: &mut PD, alloc_base: PAddr) {
}
}

// This maps 2MB for allocation region
/// Map the initial 2 MiB for allocation region.
fn map_alloc_region(alloc_region: &mut MemoryRegion) -> PAddr {
use arch::paging::{PD_P, PD_RW, PD_PS, PDEntry, flush_all};

Expand All @@ -237,6 +271,7 @@ fn map_alloc_region(alloc_region: &mut MemoryRegion) -> PAddr {
map_alloc_start_paddr
}

/// Main function to initialize paging.
pub fn init(mut alloc_region: &mut MemoryRegion) {
use arch::paging::{switch_to};

Expand Down

0 comments on commit fa09d3b

Please sign in to comment.