forked from enarx/enarx
/
gdt.rs
184 lines (148 loc) · 5.94 KB
/
gdt.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
// SPDX-License-Identifier: Apache-2.0
//! Global Descriptor Table init
use crate::shim_stack::{init_stack_with_guard, GuardedStack};
use crate::syscall::_syscall_enter;
use core::ops::Deref;
use nbytes::bytes;
use spin::Lazy;
use x86_64::instructions::segmentation::{Segment, Segment64, CS, DS, ES, FS, GS, SS};
use x86_64::instructions::tables::load_tss;
use x86_64::registers::model_specific::{KernelGsBase, LStar, SFMask, Star};
use x86_64::registers::rflags::RFlags;
use x86_64::structures::gdt::{Descriptor, GlobalDescriptorTable, SegmentSelector};
use x86_64::structures::paging::{Page, PageTableFlags, Size2MiB, Size4KiB};
use x86_64::structures::tss::TaskStateSegment;
use x86_64::{align_up, VirtAddr};
/// The virtual address of the main kernel stack
pub const SHIM_STACK_START: u64 = 0xFFFF_FF48_4800_0000;
/// The size of the main kernel stack
#[allow(clippy::integer_arithmetic)]
pub const SHIM_STACK_SIZE: u64 = bytes![2; MiB];
/// The virtual address of the exception kernel stacks
pub const SHIM_EX_STACK_START: u64 = 0xFFFF_FF48_F000_0000;
/// The size of the main kernel stack for exceptions
#[allow(clippy::integer_arithmetic)]
pub const SHIM_EX_STACK_SIZE: u64 = {
if cfg!(feature = "gdb") {
bytes![2; MiB]
} else {
bytes![32; KiB]
}
};
#[cfg_attr(coverage, no_coverage)]
fn lazy_initial_stack() -> GuardedStack {
init_stack_with_guard(
VirtAddr::new(SHIM_STACK_START),
SHIM_STACK_SIZE,
PageTableFlags::empty(),
)
}
/// The initial shim stack
pub static INITIAL_STACK: Lazy<GuardedStack> = Lazy::new(lazy_initial_stack);
#[cfg_attr(coverage, no_coverage)]
fn lazy_tss() -> TaskStateSegment {
let mut tss = TaskStateSegment::new();
tss.privilege_stack_table[0] = INITIAL_STACK.pointer;
let ptr_interrupt_stack_table = core::ptr::addr_of_mut!(tss.interrupt_stack_table);
let mut interrupt_stack_table = unsafe { ptr_interrupt_stack_table.read_unaligned() };
// Assign the stacks for the exceptions and interrupts
if !cfg!(feature = "dbg") {
// Only the vmm_communication_exception is needed
let start = VirtAddr::new(SHIM_EX_STACK_START);
let ptr = init_stack_with_guard(start, SHIM_EX_STACK_SIZE, PageTableFlags::empty()).pointer;
interrupt_stack_table[0] = ptr;
} else {
// Allocate all for debug
interrupt_stack_table
.iter_mut()
.enumerate()
.for_each(|(idx, p)| {
let offset: u64 = align_up(
SHIM_EX_STACK_SIZE
.checked_add(Page::<Size4KiB>::SIZE.checked_mul(2).unwrap())
.unwrap(),
Page::<Size2MiB>::SIZE,
);
let stack_offset = offset.checked_mul(idx as _).unwrap();
let start = VirtAddr::new(SHIM_EX_STACK_START.checked_add(stack_offset).unwrap());
*p = init_stack_with_guard(start, SHIM_EX_STACK_SIZE, PageTableFlags::empty())
.pointer;
});
}
unsafe {
ptr_interrupt_stack_table.write_unaligned(interrupt_stack_table);
}
tss
}
/// The global TSS
pub static TSS: Lazy<TaskStateSegment> = Lazy::new(lazy_tss);
/// The Selectors used in the GDT setup
pub struct Selectors {
/// shim code selector
pub code: SegmentSelector,
/// shim data selector
pub data: SegmentSelector,
/// exec data selector
pub user_data: SegmentSelector,
/// exec code selector
pub user_code: SegmentSelector,
/// TSS selector
pub tss: SegmentSelector,
}
#[cfg_attr(coverage, no_coverage)]
fn lazy_gdt() -> (GlobalDescriptorTable, Selectors) {
let mut gdt = GlobalDescriptorTable::new();
// `syscall` loads segments from STAR MSR assuming a data_segment follows `kernel_code_segment`
// so the ordering is crucial here. Star::write() will panic otherwise later.
let code = gdt.add_entry(Descriptor::kernel_code_segment());
let data = gdt.add_entry(Descriptor::kernel_data_segment());
// `sysret` loads segments from STAR MSR assuming `user_code_segment` follows `user_data_segment`
// so the ordering is crucial here. Star::write() will panic otherwise later.
let user_data = gdt.add_entry(Descriptor::user_data_segment());
let user_code = gdt.add_entry(Descriptor::user_code_segment());
// Important: TSS.deref() != &TSS because of lazy_static
let tss = gdt.add_entry(Descriptor::tss_segment(TSS.deref()));
let selectors = Selectors {
code,
data,
user_data,
user_code,
tss,
};
(gdt, selectors)
}
/// The global GDT
pub static GDT: Lazy<(GlobalDescriptorTable, Selectors)> = Lazy::new(lazy_gdt);
/// Initialize the GDT
///
/// # Safety
///
/// `unsafe` because the caller has to ensure it is only called once
/// and in a single-threaded context.
#[cfg_attr(coverage, no_coverage)]
pub unsafe fn init() {
#[cfg(debug_assertions)]
crate::eprintln!("init_gdt");
GDT.0.load();
// Setup the segment registers with the corresponding selectors
CS::set_reg(GDT.1.code);
SS::set_reg(GDT.1.data);
load_tss(GDT.1.tss);
// Clear the other segment registers
SS::set_reg(SegmentSelector(0));
DS::set_reg(SegmentSelector(0));
ES::set_reg(SegmentSelector(0));
FS::set_reg(SegmentSelector(0));
GS::set_reg(SegmentSelector(0));
// Set the selectors to be set when userspace uses `syscall`
Star::write(GDT.1.user_code, GDT.1.user_data, GDT.1.code, GDT.1.data).unwrap();
// Set the pointer to the function to be called when userspace uses `syscall`
LStar::write(VirtAddr::new(_syscall_enter as usize as u64));
// Clear trap flag and interrupt enable
SFMask::write(RFlags::INTERRUPT_FLAG | RFlags::TRAP_FLAG);
// Set the kernel gs base to the TSS to be used in `_syscall_enter`
// Important: TSS.deref() != &TSS because of lazy_static
let base = VirtAddr::new(TSS.deref() as *const _ as u64);
KernelGsBase::write(base);
GS::write_base(base);
}