Skip to content

Commit

Permalink
Auto merge of #76645 - fusion-engineering-forks:windows-lock, r=kennytm
Browse files Browse the repository at this point in the history
 Small cleanups in Windows Mutex.

 - Move `held` into the boxed part, since the SRW lock implementation does not use this. This makes the Mutex 50% smaller.
 - Use `Cell` instead of `UnsafeCell` for `held`, such that `.replace()` can be used.
 - Add some comments.
 - Avoid creating multiple `&mut`s to the critical section object in `ReentrantMutex`.
  • Loading branch information
bors committed Sep 17, 2020
2 parents 7bdb5de + 0bb96e7 commit f3c923a
Show file tree
Hide file tree
Showing 2 changed files with 42 additions and 43 deletions.
1 change: 1 addition & 0 deletions library/std/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -315,6 +315,7 @@
#![feature(try_reserve)]
#![feature(unboxed_closures)]
#![feature(unsafe_block_in_unsafe_fn)]
#![feature(unsafe_cell_raw_get)]
#![feature(untagged_unions)]
#![feature(unwind_attributes)]
#![feature(vec_into_raw_parts)]
Expand Down
84 changes: 41 additions & 43 deletions library/std/src/sys/windows/mutex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,20 +19,25 @@
//! CriticalSection is used and we keep track of who's holding the mutex to
//! detect recursive locks.

use crate::cell::UnsafeCell;
use crate::cell::{Cell, UnsafeCell};
use crate::mem::{self, MaybeUninit};
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sys::c;
use crate::sys::compat;

pub struct Mutex {
// This is either directly an SRWLOCK (if supported), or a Box<Inner> otherwise.
lock: AtomicUsize,
held: UnsafeCell<bool>,
}

unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}

struct Inner {
remutex: ReentrantMutex,
held: Cell<bool>,
}

#[derive(Clone, Copy)]
enum Kind {
SRWLock = 1,
Expand All @@ -51,7 +56,6 @@ impl Mutex {
// This works because SRWLOCK_INIT is 0 (wrapped in a struct), so we are also properly
// initializing an SRWLOCK here.
lock: AtomicUsize::new(0),
held: UnsafeCell::new(false),
}
}
#[inline]
Expand All @@ -60,10 +64,11 @@ impl Mutex {
match kind() {
Kind::SRWLock => c::AcquireSRWLockExclusive(raw(self)),
Kind::CriticalSection => {
let re = self.remutex();
(*re).lock();
if !self.flag_locked() {
(*re).unlock();
let inner = &*self.inner();
inner.remutex.lock();
if inner.held.replace(true) {
// It was already locked, so we got a recursive lock which we do not want.
inner.remutex.unlock();
panic!("cannot recursively lock a mutex");
}
}
Expand All @@ -73,62 +78,55 @@ impl Mutex {
match kind() {
Kind::SRWLock => c::TryAcquireSRWLockExclusive(raw(self)) != 0,
Kind::CriticalSection => {
let re = self.remutex();
if !(*re).try_lock() {
let inner = &*self.inner();
if !inner.remutex.try_lock() {
false
} else if self.flag_locked() {
true
} else {
(*re).unlock();
} else if inner.held.replace(true) {
// It was already locked, so we got a recursive lock which we do not want.
inner.remutex.unlock();
false
} else {
true
}
}
}
}
pub unsafe fn unlock(&self) {
*self.held.get() = false;
match kind() {
Kind::SRWLock => c::ReleaseSRWLockExclusive(raw(self)),
Kind::CriticalSection => (*self.remutex()).unlock(),
Kind::CriticalSection => {
let inner = &*(self.lock.load(Ordering::SeqCst) as *const Inner);
inner.held.set(false);
inner.remutex.unlock();
}
}
}
pub unsafe fn destroy(&self) {
match kind() {
Kind::SRWLock => {}
Kind::CriticalSection => match self.lock.load(Ordering::SeqCst) {
0 => {}
n => {
Box::from_raw(n as *mut ReentrantMutex).destroy();
}
n => Box::from_raw(n as *mut Inner).remutex.destroy(),
},
}
}

unsafe fn remutex(&self) -> *mut ReentrantMutex {
unsafe fn inner(&self) -> *const Inner {
match self.lock.load(Ordering::SeqCst) {
0 => {}
n => return n as *mut _,
n => return n as *const _,
}
let re = box ReentrantMutex::uninitialized();
re.init();
let re = Box::into_raw(re);
match self.lock.compare_and_swap(0, re as usize, Ordering::SeqCst) {
0 => re,
let inner = box Inner { remutex: ReentrantMutex::uninitialized(), held: Cell::new(false) };
inner.remutex.init();
let inner = Box::into_raw(inner);
match self.lock.compare_and_swap(0, inner as usize, Ordering::SeqCst) {
0 => inner,
n => {
Box::from_raw(re).destroy();
n as *mut _
Box::from_raw(inner).remutex.destroy();
n as *const _
}
}
}

unsafe fn flag_locked(&self) -> bool {
if *self.held.get() {
false
} else {
*self.held.get() = true;
true
}
}
}

fn kind() -> Kind {
Expand All @@ -150,35 +148,35 @@ fn kind() -> Kind {
}

pub struct ReentrantMutex {
inner: UnsafeCell<MaybeUninit<c::CRITICAL_SECTION>>,
inner: MaybeUninit<UnsafeCell<c::CRITICAL_SECTION>>,
}

unsafe impl Send for ReentrantMutex {}
unsafe impl Sync for ReentrantMutex {}

impl ReentrantMutex {
pub const fn uninitialized() -> ReentrantMutex {
ReentrantMutex { inner: UnsafeCell::new(MaybeUninit::uninit()) }
ReentrantMutex { inner: MaybeUninit::uninit() }
}

pub unsafe fn init(&self) {
c::InitializeCriticalSection((&mut *self.inner.get()).as_mut_ptr());
c::InitializeCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
}

pub unsafe fn lock(&self) {
c::EnterCriticalSection((&mut *self.inner.get()).as_mut_ptr());
c::EnterCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
}

#[inline]
pub unsafe fn try_lock(&self) -> bool {
c::TryEnterCriticalSection((&mut *self.inner.get()).as_mut_ptr()) != 0
c::TryEnterCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr())) != 0
}

pub unsafe fn unlock(&self) {
c::LeaveCriticalSection((&mut *self.inner.get()).as_mut_ptr());
c::LeaveCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
}

pub unsafe fn destroy(&self) {
c::DeleteCriticalSection((&mut *self.inner.get()).as_mut_ptr());
c::DeleteCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
}
}

0 comments on commit f3c923a

Please sign in to comment.