Skip to content

Commit

Permalink
Update to new dependencies
Browse files Browse the repository at this point in the history
  • Loading branch information
jackpot51 committed Jun 20, 2018
1 parent fc0db71 commit c28c147
Show file tree
Hide file tree
Showing 16 changed files with 139 additions and 140 deletions.
138 changes: 71 additions & 67 deletions Cargo.lock

Large diffs are not rendered by default.

14 changes: 7 additions & 7 deletions Cargo.toml
Expand Up @@ -9,21 +9,21 @@ path = "src/lib.rs"
crate-type = ["staticlib"]

[dependencies]
bitflags = "1"
clippy = { version = "*", optional = true }
linked_list_allocator = "0.6"
raw-cpuid = "3.0"
bitflags = "1.0.3"
clippy = { version = "0.0.209", optional = true }
linked_list_allocator = "0.6.2"
raw-cpuid = "4.0.0"
redox_syscall = { path = "syscall" }
slab_allocator = { path = "slab_allocator", optional = true }
spin = "0.4"
spin = "0.4.8"

[dependencies.goblin]
version = "0.0.10"
version = "0.0.15"
default-features = false
features = ["elf32", "elf64"]

[dependencies.x86]
version = "0.7"
version = "0.9.0"
default-features = false

[features]
Expand Down
8 changes: 4 additions & 4 deletions src/allocator/linked_list.rs
@@ -1,4 +1,4 @@
use alloc::heap::{AllocErr, GlobalAlloc, Layout, Opaque};
use core::alloc::{AllocErr, GlobalAlloc, Layout};
use core::ptr::NonNull;
use linked_list_allocator::Heap;
use spin::Mutex;
Expand All @@ -16,7 +16,7 @@ impl Allocator {
}

unsafe impl GlobalAlloc for Allocator {
unsafe fn alloc(&self, layout: Layout) -> *mut Opaque {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
loop {
let res = if let Some(ref mut heap) = *HEAP.lock() {
heap.allocate_first_fit(layout)
Expand All @@ -40,12 +40,12 @@ unsafe impl GlobalAlloc for Allocator {
panic!("__rust_allocate: heap not initialized");
}
},
other => return other.ok().map_or(0 as *mut Opaque, |allocation| allocation.as_ptr()),
other => return other.ok().map_or(0 as *mut u8, |allocation| allocation.as_ptr()),
}
}
}

unsafe fn dealloc(&self, ptr: *mut Opaque, layout: Layout) {
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
if let Some(ref mut heap) = *HEAP.lock() {
heap.deallocate(NonNull::new_unchecked(ptr), layout)
} else {
Expand Down
2 changes: 1 addition & 1 deletion src/allocator/slab.rs
@@ -1,4 +1,4 @@
use alloc::heap::{Alloc, AllocErr, Layout};
use core::alloc::{Alloc, AllocErr, Layout};
use spin::Mutex;
use slab_allocator::Heap;

Expand Down
2 changes: 1 addition & 1 deletion src/arch/x86_64/device/cpu.rs
Expand Up @@ -115,7 +115,7 @@ pub fn cpu_info<W: Write>(w: &mut W) -> Result {
if info.has_rep_movsb_stosb() { write!(w, " erms")? };
if info.has_invpcid() { write!(w, " invpcid")? };
if info.has_rtm() { write!(w, " rtm")? };
if info.has_qm() { write!(w, " qm")? };
//if info.has_qm() { write!(w, " qm")? };
if info.has_fpu_cs_ds_deprecated() { write!(w, " fpu_seg")? };
if info.has_mpx() { write!(w, " mpx")? };
}
Expand Down
4 changes: 2 additions & 2 deletions src/arch/x86_64/device/local_apic.rs
@@ -1,6 +1,6 @@
use core::intrinsics::{volatile_load, volatile_store};
use x86::cpuid::CpuId;
use x86::msr::*;
use x86::shared::cpuid::CpuId;
use x86::shared::msr::*;

use memory::Frame;
use paging::{ActivePageTable, PhysicalAddress, Page, VirtualAddress};
Expand Down
49 changes: 26 additions & 23 deletions src/arch/x86_64/gdt.rs
@@ -1,9 +1,12 @@
//! Global descriptor table

use core::mem;
use x86::dtables::{self, DescriptorTablePointer};
use x86::segmentation::{self, SegmentSelector};
use x86::task::{self, TaskStateSegment};
use x86::current::segmentation::set_cs;
use x86::current::task::TaskStateSegment;
use x86::shared::PrivilegeLevel;
use x86::shared::dtables::{self, DescriptorTablePointer};
use x86::shared::segmentation::{self, SegmentDescriptor, SegmentSelector};
use x86::shared::task;

pub const GDT_NULL: usize = 0;
pub const GDT_KERNEL_CODE: usize = 1;
Expand Down Expand Up @@ -33,9 +36,9 @@ pub const GDT_F_PAGE_SIZE: u8 = 1 << 7;
pub const GDT_F_PROTECTED_MODE: u8 = 1 << 6;
pub const GDT_F_LONG_MODE: u8 = 1 << 5;

static mut INIT_GDTR: DescriptorTablePointer = DescriptorTablePointer {
static mut INIT_GDTR: DescriptorTablePointer<SegmentDescriptor> = DescriptorTablePointer {
limit: 0,
base: 0
base: 0 as *const SegmentDescriptor
};

static mut INIT_GDT: [GdtEntry; 4] = [
Expand All @@ -50,9 +53,9 @@ static mut INIT_GDT: [GdtEntry; 4] = [
];

#[thread_local]
pub static mut GDTR: DescriptorTablePointer = DescriptorTablePointer {
pub static mut GDTR: DescriptorTablePointer<SegmentDescriptor> = DescriptorTablePointer {
limit: 0,
base: 0
base: 0 as *const SegmentDescriptor
};

#[thread_local]
Expand Down Expand Up @@ -105,18 +108,18 @@ pub unsafe fn init() {
// Setup the initial GDT with TLS, so we can setup the TLS GDT (a little confusing)
// This means that each CPU will have its own GDT, but we only need to define it once as a thread local
INIT_GDTR.limit = (INIT_GDT.len() * mem::size_of::<GdtEntry>() - 1) as u16;
INIT_GDTR.base = INIT_GDT.as_ptr() as u64;
INIT_GDTR.base = INIT_GDT.as_ptr() as *const SegmentDescriptor;

// Load the initial GDT, before we have access to thread locals
dtables::lgdt(&INIT_GDTR);

// Load the segment descriptors
segmentation::load_cs(SegmentSelector::new(GDT_KERNEL_CODE as u16));
segmentation::load_ds(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_es(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_fs(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_gs(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_ss(SegmentSelector::new(GDT_KERNEL_DATA as u16));
set_cs(SegmentSelector::new(GDT_KERNEL_CODE as u16, PrivilegeLevel::Ring0));
segmentation::load_ds(SegmentSelector::new(GDT_KERNEL_DATA as u16, PrivilegeLevel::Ring0));
segmentation::load_es(SegmentSelector::new(GDT_KERNEL_DATA as u16, PrivilegeLevel::Ring0));
segmentation::load_fs(SegmentSelector::new(GDT_KERNEL_DATA as u16, PrivilegeLevel::Ring0));
segmentation::load_gs(SegmentSelector::new(GDT_KERNEL_DATA as u16, PrivilegeLevel::Ring0));
segmentation::load_ss(SegmentSelector::new(GDT_KERNEL_DATA as u16, PrivilegeLevel::Ring0));
}

/// Initialize GDT with TLS
Expand All @@ -128,11 +131,11 @@ pub unsafe fn init_paging(tcb_offset: usize, stack_offset: usize) {
dtables::lgdt(&INIT_GDTR);

// Load the segment descriptors
segmentation::load_fs(SegmentSelector::new(GDT_KERNEL_TLS as u16));
segmentation::load_fs(SegmentSelector::new(GDT_KERNEL_TLS as u16, PrivilegeLevel::Ring0));

// Now that we have access to thread locals, setup the AP's individual GDT
GDTR.limit = (GDT.len() * mem::size_of::<GdtEntry>() - 1) as u16;
GDTR.base = GDT.as_ptr() as u64;
GDTR.base = GDT.as_ptr() as *const SegmentDescriptor;

// Set the TLS segment to the offset of the Thread Control Block
GDT[GDT_KERNEL_TLS].set_offset(tcb_offset as u32);
Expand All @@ -151,15 +154,15 @@ pub unsafe fn init_paging(tcb_offset: usize, stack_offset: usize) {
dtables::lgdt(&GDTR);

// Reload the segment descriptors
segmentation::load_cs(SegmentSelector::new(GDT_KERNEL_CODE as u16));
segmentation::load_ds(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_es(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_fs(SegmentSelector::new(GDT_KERNEL_TLS as u16));
segmentation::load_gs(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_ss(SegmentSelector::new(GDT_KERNEL_DATA as u16));
set_cs(SegmentSelector::new(GDT_KERNEL_CODE as u16, PrivilegeLevel::Ring0));
segmentation::load_ds(SegmentSelector::new(GDT_KERNEL_DATA as u16, PrivilegeLevel::Ring0));
segmentation::load_es(SegmentSelector::new(GDT_KERNEL_DATA as u16, PrivilegeLevel::Ring0));
segmentation::load_fs(SegmentSelector::new(GDT_KERNEL_TLS as u16, PrivilegeLevel::Ring0));
segmentation::load_gs(SegmentSelector::new(GDT_KERNEL_DATA as u16, PrivilegeLevel::Ring0));
segmentation::load_ss(SegmentSelector::new(GDT_KERNEL_DATA as u16, PrivilegeLevel::Ring0));

// Load the task register
task::load_ltr(SegmentSelector::new(GDT_TSS as u16));
task::load_tr(SegmentSelector::new(GDT_TSS as u16, PrivilegeLevel::Ring0));
}

#[derive(Copy, Clone, Debug)]
Expand Down
7 changes: 3 additions & 4 deletions src/arch/x86_64/graphical_debug/display.rs
@@ -1,5 +1,4 @@
use alloc::allocator::{Alloc, Layout};
use alloc::heap::Heap;
use core::alloc::{Alloc, GlobalAlloc, Layout};
use core::{cmp, slice};

use super::FONT;
Expand All @@ -16,7 +15,7 @@ pub struct Display {
impl Display {
pub fn new(width: usize, height: usize, onscreen: usize) -> Display {
let size = width * height;
let offscreen = unsafe { Heap.alloc(Layout::from_size_align_unchecked(size * 4, 4096)).unwrap() };
let offscreen = unsafe { ::ALLOCATOR.alloc(Layout::from_size_align_unchecked(size * 4, 4096)).unwrap() };
unsafe { fast_set64(offscreen as *mut u64, 0, size/2) };
Display {
width: width,
Expand Down Expand Up @@ -145,6 +144,6 @@ impl Display {

impl Drop for Display {
fn drop(&mut self) {
unsafe { Heap.dealloc(self.offscreen.as_mut_ptr() as *mut u8, Layout::from_size_align_unchecked(self.offscreen.len() * 4, 4096)) };
unsafe { ::ALLOCATOR.dealloc(self.offscreen.as_mut_ptr() as *mut u8, Layout::from_size_align_unchecked(self.offscreen.len() * 4, 4096)) };
}
}
13 changes: 7 additions & 6 deletions src/arch/x86_64/idt.rs
@@ -1,16 +1,17 @@
use core::mem;
use x86::dtables::{self, DescriptorTablePointer};
use x86::current::irq::IdtEntry as X86IdtEntry;
use x86::shared::dtables::{self, DescriptorTablePointer};

use interrupt::*;

pub static mut INIT_IDTR: DescriptorTablePointer = DescriptorTablePointer {
pub static mut INIT_IDTR: DescriptorTablePointer<X86IdtEntry> = DescriptorTablePointer {
limit: 0,
base: 0
base: 0 as *const X86IdtEntry
};

pub static mut IDTR: DescriptorTablePointer = DescriptorTablePointer {
pub static mut IDTR: DescriptorTablePointer<X86IdtEntry> = DescriptorTablePointer {
limit: 0,
base: 0
base: 0 as *const X86IdtEntry
};

pub static mut IDT: [IdtEntry; 256] = [IdtEntry::new(); 256];
Expand All @@ -21,7 +22,7 @@ pub unsafe fn init() {

pub unsafe fn init_paging() {
IDTR.limit = (IDT.len() * mem::size_of::<IdtEntry>() - 1) as u16;
IDTR.base = IDT.as_ptr() as u64;
IDTR.base = IDT.as_ptr() as *const X86IdtEntry;

// Set up exceptions
IDT[0].set_func(exception::divide_by_zero);
Expand Down
15 changes: 5 additions & 10 deletions src/arch/x86_64/paging/mod.rs
Expand Up @@ -3,7 +3,7 @@

use core::{mem, ptr};
use core::ops::{Deref, DerefMut};
use x86::{msr, tlb};
use x86::shared::{control_regs, msr, tlb};

use memory::{allocate_frames, Frame};

Expand Down Expand Up @@ -283,15 +283,13 @@ impl ActivePageTable {
}

pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
use x86::controlregs;

let old_table = InactivePageTable {
p4_frame: Frame::containing_address(
PhysicalAddress::new(unsafe { controlregs::cr3() } as usize)
PhysicalAddress::new(unsafe { control_regs::cr3() } as usize)
),
};
unsafe {
controlregs::cr3_write(new_table.p4_frame.start_address().get() as u64);
control_regs::cr3_write(new_table.p4_frame.start_address().get() as u64);
}
old_table
}
Expand All @@ -307,10 +305,8 @@ impl ActivePageTable {
pub fn with<F>(&mut self, table: &mut InactivePageTable, temporary_page: &mut TemporaryPage, f: F)
where F: FnOnce(&mut Mapper)
{
use x86::controlregs;

{
let backup = Frame::containing_address(PhysicalAddress::new(unsafe { controlregs::cr3() as usize }));
let backup = Frame::containing_address(PhysicalAddress::new(unsafe { control_regs::cr3() as usize }));

// map temporary_page to current p4 table
let p4_table = temporary_page.map_table_frame(backup.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE, self);
Expand All @@ -331,8 +327,7 @@ impl ActivePageTable {
}

pub unsafe fn address(&self) -> usize {
use x86::controlregs;
controlregs::cr3() as usize
control_regs::cr3() as usize
}
}

Expand Down
1 change: 0 additions & 1 deletion src/context/file.rs
Expand Up @@ -5,7 +5,6 @@ use event;
use spin::RwLock;
use scheme::{self, SchemeId};
use syscall::error::{Result, Error, EBADF};
use scheme::FileHandle;

/// A file description
#[derive(Debug)]
Expand Down
5 changes: 2 additions & 3 deletions src/context/list.rs
@@ -1,8 +1,7 @@
use alloc::allocator::{Alloc, Layout};
use alloc::arc::Arc;
use alloc::boxed::Box;
use alloc::heap::Heap;
use alloc::BTreeMap;
use core::alloc::{Alloc, GlobalAlloc, Layout};
use core::mem;
use core::sync::atomic::Ordering;
use paging;
Expand Down Expand Up @@ -67,7 +66,7 @@ impl ContextList {
let context_lock = self.new_context()?;
{
let mut context = context_lock.write();
let mut fx = unsafe { Box::from_raw(Heap.alloc(Layout::from_size_align_unchecked(512, 16)).unwrap().as_ptr() as *mut [u8; 512]) };
let mut fx = unsafe { Box::from_raw(::ALLOCATOR.alloc(Layout::from_size_align_unchecked(512, 16)) as *mut [u8; 512]) };
for b in fx.iter_mut() {
*b = 0;
}
Expand Down
5 changes: 2 additions & 3 deletions src/context/mod.rs
@@ -1,7 +1,6 @@
//! Context management
use alloc::allocator::{Alloc, Layout};
use alloc::boxed::Box;
use alloc::heap::Heap;
use core::alloc::{Alloc, GlobalAlloc, Layout};
use core::sync::atomic::Ordering;
use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard};

Expand Down Expand Up @@ -49,7 +48,7 @@ pub fn init() {
let mut contexts = contexts_mut();
let context_lock = contexts.new_context().expect("could not initialize first context");
let mut context = context_lock.write();
let mut fx = unsafe { Box::from_raw(Heap.alloc(Layout::from_size_align_unchecked(512, 16)).unwrap().as_ptr() as *mut [u8; 512]) };
let mut fx = unsafe { Box::from_raw(::ALLOCATOR.alloc(Layout::from_size_align_unchecked(512, 16)) as *mut [u8; 512]) };
for b in fx.iter_mut() {
*b = 0;
}
Expand Down
1 change: 1 addition & 0 deletions src/lib.rs
Expand Up @@ -27,6 +27,7 @@
#![feature(lang_items)]
#![feature(naked_functions)]
#![feature(never_type)]
#![feature(panic_implementation)]
#![feature(ptr_internals)]
#![feature(thread_local)]
#![feature(unique)]
Expand Down
10 changes: 5 additions & 5 deletions src/panic.rs
@@ -1,18 +1,18 @@
//! Intrinsics for panic handling

use core::panic::PanicInfo;

use interrupt;

#[lang = "eh_personality"]
#[no_mangle]
pub extern "C" fn rust_eh_personality() {}

/// Required to handle panics
#[lang = "panic_fmt"]
#[panic_implementation]
#[no_mangle]
pub extern "C" fn rust_begin_unwind(fmt: ::core::fmt::Arguments, file: &str, line: u32) -> ! {
println!("PANIC: {}", fmt);
println!("FILE: {}", file);
println!("LINE: {}", line);
pub extern "C" fn rust_begin_unwind(info: &PanicInfo) -> ! {
println!("KERNEL PANIC: {}", info);

unsafe { interrupt::stack_trace(); }

Expand Down
5 changes: 2 additions & 3 deletions src/syscall/process.rs
@@ -1,8 +1,7 @@
use alloc::allocator::{Alloc, Layout};
use alloc::arc::Arc;
use alloc::boxed::Box;
use alloc::heap::Heap;
use alloc::{BTreeMap, Vec};
use core::alloc::{Alloc, GlobalAlloc, Layout};
use core::{intrinsics, mem, str};
use core::ops::DerefMut;
use spin::Mutex;
Expand Down Expand Up @@ -113,7 +112,7 @@ pub fn clone(flags: usize, stack_base: usize) -> Result<ContextId> {
arch = context.arch.clone();

if let Some(ref fx) = context.kfx {
let mut new_fx = unsafe { Box::from_raw(Heap.alloc(Layout::from_size_align_unchecked(512, 16)).unwrap().as_ptr() as *mut [u8; 512]) };
let mut new_fx = unsafe { Box::from_raw(::ALLOCATOR.alloc(Layout::from_size_align_unchecked(512, 16)) as *mut [u8; 512]) };
for (new_b, b) in new_fx.iter_mut().zip(fx.iter()) {
*new_b = *b;
}
Expand Down

0 comments on commit c28c147

Please sign in to comment.