-
-
Notifications
You must be signed in to change notification settings - Fork 379
/
threadpool.rs
289 lines (238 loc) · 8.72 KB
/
threadpool.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
//! A simple adaptive threadpool that returns a oneshot future.
use std::sync::Arc;
use crate::{OneShot, Result};
#[cfg(not(miri))]
mod queue {
use std::{
cell::RefCell,
collections::VecDeque,
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering},
Once,
},
time::{Duration, Instant},
};
use parking_lot::{Condvar, Mutex};
use crate::{debug_delay, Lazy, OneShot};
thread_local! {
static WORKER: RefCell<bool> = RefCell::new(false);
}
fn is_worker() -> bool {
WORKER.with(|w| *w.borrow())
}
pub(super) static BLOCKING_QUEUE: Lazy<Queue, fn() -> Queue> =
Lazy::new(Default::default);
pub(super) static IO_QUEUE: Lazy<Queue, fn() -> Queue> =
Lazy::new(Default::default);
pub(super) static SNAPSHOT_QUEUE: Lazy<Queue, fn() -> Queue> =
Lazy::new(Default::default);
pub(super) static TRUNCATE_QUEUE: Lazy<Queue, fn() -> Queue> =
Lazy::new(Default::default);
type Work = Box<dyn FnOnce() + Send + 'static>;
pub(super) fn spawn_to<F, R>(work: F, queue: &'static Queue) -> OneShot<R>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static + Sized,
{
static START_THREADS: Once = Once::new();
START_THREADS.call_once(|| {
std::thread::Builder::new()
.name("sled-io-thread".into())
.spawn(|| IO_QUEUE.perform_work(true, false))
.expect("failed to spawn critical IO thread");
std::thread::Builder::new()
.name("sled-blocking-thread".into())
.spawn(|| BLOCKING_QUEUE.perform_work(true, false))
.expect("failed to spawn critical blocking thread");
std::thread::Builder::new()
.name("sled-snapshot-thread".into())
.spawn(|| SNAPSHOT_QUEUE.perform_work(true, false))
.expect("failed to spawn critical snapshot thread");
std::thread::Builder::new()
.name("sled-truncate-thread".into())
.spawn(|| TRUNCATE_QUEUE.perform_work(false, false))
.expect("failed to spawn critical truncation thread");
});
let (promise_filler, promise) = OneShot::pair();
let task = move || {
promise_filler.fill((work)());
};
if is_worker() {
// NB this could prevent deadlocks because
// if a threadpool thread spawns work into
// the threadpool's queue, which it later
// blocks on the completion of, it would be
// possible for threadpool threads to block
// forever on the completion of work that
// exists in the queue but will never be
// scheduled.
task();
} else {
queue.send(Box::new(task));
}
promise
}
#[derive(Default)]
pub(super) struct Queue {
cv: Condvar,
mu: Mutex<VecDeque<Work>>,
temporary_threads: AtomicUsize,
spawning: AtomicBool,
}
#[allow(unsafe_code)]
unsafe impl Send for Queue {}
impl Queue {
fn recv_timeout(&self, duration: Duration) -> Option<(Work, usize)> {
let mut queue = self.mu.lock();
let cutoff = Instant::now() + duration;
while queue.is_empty() {
let res = self.cv.wait_until(&mut queue, cutoff);
if res.timed_out() {
break;
}
}
queue.pop_front().map(|w| (w, queue.len()))
}
fn send(&self, work: Work) -> usize {
let mut queue = self.mu.lock();
queue.push_back(work);
let len = queue.len();
// having held the mutex makes this linearized
// with the notify below.
drop(queue);
self.cv.notify_all();
len
}
fn perform_work(&'static self, elastic: bool, temporary: bool) {
const MAX_TEMPORARY_THREADS: usize = 16;
WORKER.with(|w| *w.borrow_mut() = true);
self.spawning.store(false, Ordering::SeqCst);
let wait_limit = Duration::from_millis(100);
let mut unemployed_loops = 0;
while !temporary || unemployed_loops < 50 {
// take on a bit of GC labor
let guard = crate::pin();
guard.flush();
drop(guard);
debug_delay();
let task_opt = self.recv_timeout(wait_limit);
if let Some((task, outstanding_work)) = task_opt {
// execute the work sent to this thread
(task)();
// spin up some help if we're falling behind
let temporary_threads =
self.temporary_threads.load(Ordering::Acquire);
if elastic
&& outstanding_work > 5
&& temporary_threads < MAX_TEMPORARY_THREADS
&& self
.spawning
.compare_exchange(
false,
true,
Ordering::SeqCst,
Ordering::SeqCst,
)
.is_ok()
{
self.temporary_threads.fetch_add(1, Ordering::SeqCst);
let spawn_res = std::thread::Builder::new()
.name("sled-temporary-thread".into())
.spawn(move || self.perform_work(false, true));
if let Err(e) = spawn_res {
log::error!(
"failed to spin-up temporary work thread: {:?}",
e
);
self.temporary_threads
.fetch_sub(1, Ordering::SeqCst);
}
}
unemployed_loops = 0;
} else {
unemployed_loops += 1;
}
}
assert!(temporary);
self.temporary_threads.fetch_sub(1, Ordering::SeqCst);
}
}
}
/// Spawn a function on the threadpool.
pub fn spawn<F, R>(work: F) -> OneShot<R>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static + Sized,
{
spawn_to(work, &queue::BLOCKING_QUEUE)
}
#[cfg(miri)]
mod queue {
/// This is the polyfill that just executes things synchronously.
use crate::{OneShot, Result};
pub(super) fn spawn_to<F, R>(work: F, _: &()) -> OneShot<R>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
// Polyfill for platforms other than those we explicitly trust to
// perform threaded work on. Just execute a task without involving threads.
let (promise_filler, promise) = OneShot::pair();
promise_filler.fill((work)());
promise
}
pub(super) const IO_QUEUE: () = ();
pub(super) const BLOCKING_QUEUE: () = ();
pub(super) const SNAPSHOT_QUEUE: () = ();
pub(super) const TRUNCATE_QUEUE: () = ();
}
use queue::spawn_to;
pub fn truncate(config: crate::RunningConfig, at: u64) -> OneShot<Result<()>> {
spawn_to(
move || {
log::debug!("truncating file to length {}", at);
let ret: Result<()> = config
.file
.set_len(at)
.and_then(|_| config.file.sync_all())
.map_err(Into::into);
if let Err(e) = &ret {
config.set_global_error(*e);
}
ret
},
&queue::TRUNCATE_QUEUE,
)
}
pub fn take_fuzzy_snapshot(pc: crate::pagecache::PageCache) -> OneShot<()> {
spawn_to(
move || {
if let Err(e) = pc.take_fuzzy_snapshot() {
log::error!("failed to write snapshot: {:?}", e);
pc.log.iobufs.set_global_error(e);
}
},
&queue::SNAPSHOT_QUEUE,
)
}
pub(crate) fn write_to_log(
iobuf: Arc<crate::pagecache::iobuf::IoBuf>,
iobufs: Arc<crate::pagecache::iobuf::IoBufs>,
) -> OneShot<()> {
spawn_to(
move || {
let lsn = iobuf.lsn;
if let Err(e) = iobufs.write_to_log(iobuf) {
log::error!(
"hit error while writing iobuf with lsn {}: {:?}",
lsn,
e
);
// store error before notifying so that waiting threads will see
// it
iobufs.set_global_error(e);
}
},
&queue::IO_QUEUE,
)
}