-
Notifications
You must be signed in to change notification settings - Fork 14
/
sync.rs
213 lines (195 loc) · 6.47 KB
/
sync.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
use core::time::Duration;
use alloc::{collections::BTreeMap, vec::Vec};
use twizzler_abi::syscall::{
ThreadSync, ThreadSyncError, ThreadSyncReference, ThreadSyncSleep, ThreadSyncWake,
};
use x86_64::VirtAddr;
use crate::{
obj::{LookupFlags, ObjectRef},
once::Once,
spinlock::Spinlock,
thread::{current_memory_context, current_thread_ref, CriticalGuard, ThreadRef, ThreadState},
};
struct Requeue {
list: Spinlock<BTreeMap<u64, ThreadRef>>,
}
/* TODO: make this thread-local */
static mut REQUEUE: Once<Requeue> = Once::new();
fn get_requeue_list() -> &'static Requeue {
unsafe {
REQUEUE.call_once(|| Requeue {
list: Spinlock::new(BTreeMap::new()),
})
}
}
pub fn requeue_all() {
let requeue = get_requeue_list();
let mut list = requeue.list.lock();
for (_, thread) in list.drain_filter(|_, v| v.reset_sync_sleep_done()) {
crate::sched::schedule_thread(thread);
}
}
pub fn add_to_requeue(thread: ThreadRef) {
let requeue = get_requeue_list();
requeue.list.lock().insert(thread.id(), thread);
}
// TODO: this is gross, we're manually trading out a critical guard with an interrupt guard because
// we don't want to get interrupted... we need a better way to do this kind of consumable "don't
// schedule until I say so".
fn finish_blocking(guard: CriticalGuard) {
let thread = current_thread_ref().unwrap();
crate::interrupt::with_disabled(|| {
thread.set_state(ThreadState::Blocked);
drop(guard);
crate::sched::schedule(false);
thread.set_state(ThreadState::Running);
});
}
fn get_obj_and_offset(addr: VirtAddr) -> Result<(ObjectRef, usize), ThreadSyncError> {
// let t = current_thread_ref().unwrap();
let vmc = current_memory_context().ok_or(ThreadSyncError::Unknown)?;
let mapping = { vmc.inner().lookup_object(addr) }.ok_or(ThreadSyncError::InvalidReference)?;
let offset = (addr.as_u64() as usize) % (1024 * 1024 * 1024); //TODO: arch-dep, centralize these calculations somewhere, see PageNumber
Ok((mapping.obj.clone(), offset))
}
fn get_obj(reference: ThreadSyncReference) -> Result<(ObjectRef, usize), ThreadSyncError> {
Ok(match reference {
ThreadSyncReference::ObjectRef(id, offset) => {
let obj = match crate::obj::lookup_object(id, LookupFlags::empty()) {
crate::obj::LookupResult::Found(o) => o,
_ => return Err(ThreadSyncError::InvalidReference),
};
(obj, offset)
}
ThreadSyncReference::Virtual(addr) => get_obj_and_offset(VirtAddr::new(addr as u64))?,
ThreadSyncReference::Virtual32(addr) => get_obj_and_offset(VirtAddr::new(addr as u64))?,
})
}
struct SleepEvent {
obj: ObjectRef,
offset: usize,
did_sleep: bool,
}
fn prep_sleep(sleep: &ThreadSyncSleep, first_sleep: bool) -> Result<SleepEvent, ThreadSyncError> {
let (obj, offset) = get_obj(sleep.reference)?;
/*
logln!(
"{} sleep {} {:x}",
current_thread_ref().unwrap().id(),
obj.id(),
offset
);
if let ThreadSyncReference::Virtual(p) = &sleep.reference {
logln!(" => {:p} {}", *p, unsafe {
(**p).load(core::sync::atomic::Ordering::SeqCst)
});
}
*/
let did_sleep = obj.setup_sleep_word(offset, sleep.op, sleep.value, first_sleep);
Ok(SleepEvent {
obj,
offset,
did_sleep,
})
}
fn undo_sleep(sleep: SleepEvent) {
sleep.obj.remove_from_sleep_word(sleep.offset);
}
fn wakeup(wake: &ThreadSyncWake) -> Result<usize, ThreadSyncError> {
let (obj, offset) = get_obj(wake.reference)?;
Ok(obj.wakeup_word(offset, wake.count))
}
fn thread_sync_cb_timeout(thread: ThreadRef) {
if thread.reset_sync_sleep() {
add_to_requeue(thread);
}
requeue_all();
}
fn simple_timed_sleep(timeout: &&mut Duration) {
let thread = current_thread_ref().unwrap();
let guard = thread.enter_critical();
thread.set_sync_sleep();
crate::clock::register_timeout_callback(
// TODO: fix all our time types
timeout.as_nanos() as u64,
thread_sync_cb_timeout,
thread.clone(),
);
thread.set_sync_sleep_done();
requeue_all();
finish_blocking(guard);
}
// TODO: #42 on timeout, try to return Err(Timeout).
pub fn sys_thread_sync(
ops: &mut [ThreadSync],
timeout: Option<&mut Duration>,
) -> Result<usize, ThreadSyncError> {
if let Some(ref timeout) = timeout {
if ops.is_empty() {
simple_timed_sleep(timeout);
return Ok(0);
}
}
let mut ready_count = 0;
let mut unsleeps = Vec::new();
for op in ops {
match op {
ThreadSync::Sleep(sleep, result) => match prep_sleep(sleep, unsleeps.is_empty()) {
Ok(se) => {
*result = Ok(if se.did_sleep { 0 } else { 1 });
if se.did_sleep {
unsleeps.push(se);
} else {
ready_count += 1;
}
}
Err(x) => *result = Err(x),
},
ThreadSync::Wake(wake, result) => {
/*
if let ThreadSyncReference::Virtual(p) = &wake.reference {
logln!(" wake => {:p} {}", *p, unsafe {
(**p).load(core::sync::atomic::Ordering::SeqCst)
});
}
*/
match wakeup(wake) {
Ok(count) => {
*result = Ok(count);
if count > 0 {
ready_count += 1;
}
}
Err(x) => {
*result = Err(x);
}
}
}
}
}
let thread = current_thread_ref().unwrap();
{
let guard = thread.enter_critical();
if !unsleeps.is_empty() {
if let Some(timeout) = timeout {
crate::clock::register_timeout_callback(
// TODO: fix all our time types
timeout.as_nanos() as u64,
thread_sync_cb_timeout,
thread.clone(),
);
}
thread.set_sync_sleep_done();
}
requeue_all();
if !unsleeps.is_empty() {
finish_blocking(guard);
} else {
drop(guard);
}
}
for op in unsleeps {
undo_sleep(op);
}
Ok(ready_count)
}