Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use a single ReentrantMutex implementation on all platforms. #96042

Merged
merged 2 commits into from
Apr 18, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 0 additions & 36 deletions library/std/src/sys/hermit/mutex.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
use crate::cell::UnsafeCell;
use crate::collections::VecDeque;
use crate::ffi::c_void;
use crate::hint;
use crate::ops::{Deref, DerefMut, Drop};
use crate::ptr;
Expand Down Expand Up @@ -220,38 +219,3 @@ impl Mutex {
#[inline]
pub unsafe fn destroy(&self) {}
}

pub struct ReentrantMutex {
inner: *const c_void,
}

impl ReentrantMutex {
pub const unsafe fn uninitialized() -> ReentrantMutex {
ReentrantMutex { inner: ptr::null() }
}

#[inline]
pub unsafe fn init(&self) {
let _ = abi::recmutex_init(&self.inner as *const *const c_void as *mut _);
}

#[inline]
pub unsafe fn lock(&self) {
let _ = abi::recmutex_lock(self.inner);
}

#[inline]
pub unsafe fn try_lock(&self) -> bool {
true
}

#[inline]
pub unsafe fn unlock(&self) {
let _ = abi::recmutex_unlock(self.inner);
}

#[inline]
pub unsafe fn destroy(&self) {
let _ = abi::recmutex_destroy(self.inner);
}
}
93 changes: 0 additions & 93 deletions library/std/src/sys/itron/mutex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ use super::{
error::{expect_success, expect_success_aborting, fail, ItronError},
spin::SpinIdOnceCell,
};
use crate::cell::UnsafeCell;

pub struct Mutex {
/// The ID of the underlying mutex object
Expand Down Expand Up @@ -89,95 +88,3 @@ impl Drop for MutexGuard<'_> {
unsafe { self.0.unlock() };
}
}

// All empty stubs because this platform does not yet support threads, so lock
// acquisition always succeeds.
pub struct ReentrantMutex {
/// The ID of the underlying mutex object
mtx: abi::ID,
/// The lock count.
count: UnsafeCell<usize>,
}

unsafe impl Send for ReentrantMutex {}
unsafe impl Sync for ReentrantMutex {}

impl ReentrantMutex {
pub const unsafe fn uninitialized() -> ReentrantMutex {
ReentrantMutex { mtx: 0, count: UnsafeCell::new(0) }
}

pub unsafe fn init(&mut self) {
self.mtx = expect_success(
unsafe {
abi::acre_mtx(&abi::T_CMTX {
// Priority inheritance mutex
mtxatr: abi::TA_INHERIT,
// Unused
ceilpri: 0,
})
},
&"acre_mtx",
);
}

pub unsafe fn lock(&self) {
match unsafe { abi::loc_mtx(self.mtx) } {
abi::E_OBJ => {
// Recursive lock
unsafe {
let count = &mut *self.count.get();
if let Some(new_count) = count.checked_add(1) {
*count = new_count;
} else {
// counter overflow
rtabort!("lock count overflow");
}
}
}
er => {
expect_success(er, &"loc_mtx");
}
}
}

pub unsafe fn unlock(&self) {
unsafe {
let count = &mut *self.count.get();
if *count > 0 {
*count -= 1;
return;
}
}

expect_success_aborting(unsafe { abi::unl_mtx(self.mtx) }, &"unl_mtx");
}

pub unsafe fn try_lock(&self) -> bool {
let er = unsafe { abi::ploc_mtx(self.mtx) };
if er == abi::E_OBJ {
// Recursive lock
unsafe {
let count = &mut *self.count.get();
if let Some(new_count) = count.checked_add(1) {
*count = new_count;
} else {
// counter overflow
rtabort!("lock count overflow");
}
}
true
} else if er == abi::E_TMOUT {
// Locked by another thread
false
} else {
expect_success(er, &"ploc_mtx");
// Top-level lock by the current thread
true
}
}

pub unsafe fn destroy(&self) {
expect_success_aborting(unsafe { abi::del_mtx(self.mtx) }, &"del_mtx");
}
}
87 changes: 1 addition & 86 deletions library/std/src/sys/sgx/mutex.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,4 @@
use fortanix_sgx_abi::Tcs;

use super::abi::thread;

use super::waitqueue::{try_lock_or_false, NotifiedTcs, SpinMutex, WaitQueue, WaitVariable};
use super::waitqueue::{try_lock_or_false, SpinMutex, WaitQueue, WaitVariable};

pub struct Mutex {
inner: SpinMutex<WaitVariable<bool>>,
Expand Down Expand Up @@ -60,84 +56,3 @@ impl Mutex {
#[inline]
pub unsafe fn destroy(&self) {}
}

struct ReentrantLock {
owner: Option<Tcs>,
count: usize,
}

pub struct ReentrantMutex {
inner: SpinMutex<WaitVariable<ReentrantLock>>,
}

impl ReentrantMutex {
pub const fn uninitialized() -> ReentrantMutex {
ReentrantMutex {
inner: SpinMutex::new(WaitVariable::new(ReentrantLock { owner: None, count: 0 })),
}
}

#[inline]
pub unsafe fn init(&self) {}

#[inline]
pub unsafe fn lock(&self) {
let mut guard = self.inner.lock();
match guard.lock_var().owner {
Some(tcs) if tcs != thread::current() => {
// Another thread has the lock, wait
WaitQueue::wait(guard, || {});
// Another thread has passed the lock to us
}
_ => {
// We are just now obtaining the lock
guard.lock_var_mut().owner = Some(thread::current());
guard.lock_var_mut().count += 1;
}
}
}

#[inline]
pub unsafe fn unlock(&self) {
let mut guard = self.inner.lock();
if guard.lock_var().count > 1 {
guard.lock_var_mut().count -= 1;
} else {
match WaitQueue::notify_one(guard) {
Err(mut guard) => {
// No other waiters, unlock
guard.lock_var_mut().count = 0;
guard.lock_var_mut().owner = None;
}
Ok(mut guard) => {
// There was a thread waiting, just pass the lock
if let NotifiedTcs::Single(tcs) = guard.notified_tcs() {
guard.lock_var_mut().owner = Some(tcs)
} else {
unreachable!() // called notify_one
}
}
}
}
}

#[inline]
pub unsafe fn try_lock(&self) -> bool {
let mut guard = try_lock_or_false!(self.inner);
match guard.lock_var().owner {
Some(tcs) if tcs != thread::current() => {
// Another thread has the lock
false
}
_ => {
// We are just now obtaining the lock
guard.lock_var_mut().owner = Some(thread::current());
guard.lock_var_mut().count += 1;
true
}
}
}

#[inline]
pub unsafe fn destroy(&self) {}
}
98 changes: 1 addition & 97 deletions library/std/src/sys/unix/locks/futex.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
use crate::cell::UnsafeCell;
use crate::sync::atomic::{
AtomicU32, AtomicUsize,
AtomicU32,
Ordering::{Acquire, Relaxed, Release},
};
use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all};
Expand Down Expand Up @@ -163,98 +162,3 @@ impl Condvar {
r
}
}

/// A reentrant mutex. Used by stdout().lock() and friends.
///
/// The 'owner' field tracks which thread has locked the mutex.
///
/// We use current_thread_unique_ptr() as the thread identifier,
/// which is just the address of a thread local variable.
///
/// If `owner` is set to the identifier of the current thread,
/// we assume the mutex is already locked and instead of locking it again,
/// we increment `lock_count`.
///
/// When unlocking, we decrement `lock_count`, and only unlock the mutex when
/// it reaches zero.
///
/// `lock_count` is protected by the mutex and only accessed by the thread that has
/// locked the mutex, so needs no synchronization.
///
/// `owner` can be checked by other threads that want to see if they already
/// hold the lock, so needs to be atomic. If it compares equal, we're on the
/// same thread that holds the mutex and memory access can use relaxed ordering
/// since we're not dealing with multiple threads. If it compares unequal,
/// synchronization is left to the mutex, making relaxed memory ordering for
/// the `owner` field fine in all cases.
pub struct ReentrantMutex {
mutex: Mutex,
owner: AtomicUsize,
lock_count: UnsafeCell<u32>,
}

unsafe impl Send for ReentrantMutex {}
unsafe impl Sync for ReentrantMutex {}

impl ReentrantMutex {
#[inline]
pub const unsafe fn uninitialized() -> Self {
Self { mutex: Mutex::new(), owner: AtomicUsize::new(0), lock_count: UnsafeCell::new(0) }
}

#[inline]
pub unsafe fn init(&self) {}

#[inline]
pub unsafe fn destroy(&self) {}

pub unsafe fn try_lock(&self) -> bool {
let this_thread = current_thread_unique_ptr();
if self.owner.load(Relaxed) == this_thread {
self.increment_lock_count();
true
} else if self.mutex.try_lock() {
self.owner.store(this_thread, Relaxed);
debug_assert_eq!(*self.lock_count.get(), 0);
*self.lock_count.get() = 1;
true
} else {
false
}
}

pub unsafe fn lock(&self) {
let this_thread = current_thread_unique_ptr();
if self.owner.load(Relaxed) == this_thread {
self.increment_lock_count();
} else {
self.mutex.lock();
self.owner.store(this_thread, Relaxed);
debug_assert_eq!(*self.lock_count.get(), 0);
*self.lock_count.get() = 1;
}
}

unsafe fn increment_lock_count(&self) {
*self.lock_count.get() = (*self.lock_count.get())
.checked_add(1)
.expect("lock count overflow in reentrant mutex");
}

pub unsafe fn unlock(&self) {
*self.lock_count.get() -= 1;
if *self.lock_count.get() == 0 {
self.owner.store(0, Relaxed);
self.mutex.unlock();
}
}
}

/// Get an address that is unique per running thread.
///
/// This can be used as a non-null usize-sized ID.
pub fn current_thread_unique_ptr() -> usize {
// Use a non-drop type to make sure it's still available during thread destruction.
thread_local! { static X: u8 = const { 0 } }
X.with(|x| <*const _>::addr(x))
}
4 changes: 1 addition & 3 deletions library/std/src/sys/unix/locks/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,13 @@ cfg_if::cfg_if! {
))] {
mod futex;
mod futex_rwlock;
pub use futex::{Mutex, MovableMutex, Condvar, MovableCondvar, ReentrantMutex};
pub use futex::{Mutex, MovableMutex, Condvar, MovableCondvar};
pub use futex_rwlock::{RwLock, MovableRwLock};
} else {
mod pthread_mutex;
mod pthread_remutex;
mod pthread_rwlock;
mod pthread_condvar;
pub use pthread_mutex::{Mutex, MovableMutex};
pub use pthread_remutex::ReentrantMutex;
pub use pthread_rwlock::{RwLock, MovableRwLock};
pub use pthread_condvar::{Condvar, MovableCondvar};
}
Expand Down
Loading