Skip to content

Commit

Permalink
Fix Stacked Borrows UB (and format the code) (#136)
Browse files Browse the repository at this point in the history
* Fixed stacked borrows UB

* fmt
  • Loading branch information
zesterer committed Feb 7, 2023
1 parent 7cd5133 commit 14ecef6
Show file tree
Hide file tree
Showing 9 changed files with 258 additions and 164 deletions.
16 changes: 11 additions & 5 deletions src/barrier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,7 @@ impl<R: RelaxStrategy> Barrier<R> {
// not the leader
let local_gen = lock.generation_id;

while local_gen == lock.generation_id &&
lock.count < self.num_threads {
while local_gen == lock.generation_id && lock.count < self.num_threads {
drop(lock);
R::relax();
lock = self.lock.lock();
Expand Down Expand Up @@ -176,7 +175,9 @@ impl BarrierWaitResult {
/// let barrier_wait_result = barrier.wait();
/// println!("{:?}", barrier_wait_result.is_leader());
/// ```
pub fn is_leader(&self) -> bool { self.0 }
pub fn is_leader(&self) -> bool {
self.0
}
}

#[cfg(test)]
Expand All @@ -192,12 +193,13 @@ mod tests {
fn use_barrier(n: usize, barrier: Arc<Barrier>) {
let (tx, rx) = channel();

let mut ts = Vec::new();
for _ in 0..n - 1 {
let c = barrier.clone();
let tx = tx.clone();
thread::spawn(move|| {
ts.push(thread::spawn(move || {
tx.send(c.wait().is_leader()).unwrap();
});
}));
}

// At this point, all spawned threads should be blocked,
Expand All @@ -217,6 +219,10 @@ mod tests {
}
}
assert!(leader_found);

for t in ts {
t.join().unwrap();
}
}

#[test]
Expand Down
12 changes: 9 additions & 3 deletions src/lazy.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
//! Implementation adapted from the `SyncLazy` type of the standard library. See:
//! <https://doc.rust-lang.org/std/lazy/struct.SyncLazy.html>

use core::{cell::Cell, fmt, ops::Deref};
use crate::{once::Once, RelaxStrategy, Spin};
use core::{cell::Cell, fmt, ops::Deref};

/// A value which is initialized on the first access.
///
Expand Down Expand Up @@ -45,7 +45,10 @@ pub struct Lazy<T, F = fn() -> T, R = Spin> {

impl<T: fmt::Debug, F, R> fmt::Debug for Lazy<T, F, R> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Lazy").field("cell", &self.cell).field("init", &"..").finish()
f.debug_struct("Lazy")
.field("cell", &self.cell)
.field("init", &"..")
.finish()
}
}

Expand All @@ -61,7 +64,10 @@ impl<T, F, R> Lazy<T, F, R> {
/// Creates a new lazy value with the given initializing
/// function.
pub const fn new(f: F) -> Self {
Self { cell: Once::new(), init: Cell::new(Some(f)) }
Self {
cell: Once::new(),
init: Cell::new(Some(f)),
}
}
/// Retrieves a mutable pointer to the inner data.
///
Expand Down
20 changes: 10 additions & 10 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@
//! - `lock_api` enables support for [`lock_api`](https://crates.io/crates/lock_api)
//!
//! - `ticket_mutex` uses a ticket lock for the implementation of `Mutex`
//!
//!
//! - `fair_mutex` enables a fairer implementation of `Mutex` that uses eventual fairness to avoid
//! starvation
//!
Expand All @@ -66,10 +66,10 @@ extern crate core;
#[cfg(feature = "portable_atomic")]
extern crate portable_atomic;

#[cfg(feature = "portable_atomic")]
use portable_atomic as atomic;
#[cfg(not(feature = "portable_atomic"))]
use core::sync::atomic;
#[cfg(feature = "portable_atomic")]
use portable_atomic as atomic;

#[cfg(feature = "barrier")]
#[cfg_attr(docsrs, doc(cfg(feature = "barrier")))]
Expand All @@ -83,21 +83,21 @@ pub mod mutex;
#[cfg(feature = "once")]
#[cfg_attr(docsrs, doc(cfg(feature = "once")))]
pub mod once;
pub mod relax;
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub mod rwlock;
pub mod relax;

#[cfg(feature = "mutex")]
#[cfg_attr(docsrs, doc(cfg(feature = "mutex")))]
pub use mutex::MutexGuard;
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub use rwlock::RwLockReadGuard;
pub use relax::{Spin, RelaxStrategy};
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub use relax::Yield;
pub use relax::{RelaxStrategy, Spin};
#[cfg(feature = "rwlock")]
#[cfg_attr(docsrs, doc(cfg(feature = "rwlock")))]
pub use rwlock::RwLockReadGuard;

// Avoid confusing inference errors by aliasing away the relax strategy parameter. Users that need to use a different
// relax strategy can do so by accessing the types through their fully-qualified path. This is a little bit horrible
Expand Down Expand Up @@ -198,7 +198,7 @@ pub mod lock_api {

/// In the event of an invalid operation, it's best to abort the current process.
#[cfg(feature = "fair_mutex")]
fn abort() -> !{
fn abort() -> ! {
#[cfg(not(feature = "std"))]
{
// Panicking while panicking is defined by Rust to result in an abort.
Expand All @@ -218,4 +218,4 @@ fn abort() -> !{
{
std::process::abort();
}
}
}
13 changes: 11 additions & 2 deletions src/mutex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,11 @@ pub mod fair;
#[cfg_attr(docsrs, doc(cfg(feature = "fair_mutex")))]
pub use self::fair::{FairMutex, FairMutexGuard, Starvation};

use crate::{RelaxStrategy, Spin};
use core::{
fmt,
ops::{Deref, DerefMut},
};
use crate::{RelaxStrategy, Spin};

#[cfg(all(not(feature = "spin_mutex"), not(feature = "use_ticket_mutex")))]
compile_error!("The `mutex` feature flag was used (perhaps through another feature?) without either `spin_mutex` or `use_ticket_mutex`. One of these is required.");
Expand Down Expand Up @@ -85,9 +85,11 @@ type InnerMutexGuard<'a, T> = self::ticket::TicketMutexGuard<'a, T>;
/// // We use a barrier to ensure the readout happens after all writing
/// let barrier = Arc::new(Barrier::new(thread_count + 1));
///
/// # let mut ts = Vec::new();
/// for _ in (0..thread_count) {
/// let my_barrier = barrier.clone();
/// let my_lock = spin_mutex.clone();
/// # let t =
/// std::thread::spawn(move || {
/// let mut guard = my_lock.lock();
/// *guard += 1;
Expand All @@ -96,12 +98,17 @@ type InnerMutexGuard<'a, T> = self::ticket::TicketMutexGuard<'a, T>;
/// drop(guard);
/// my_barrier.wait();
/// });
/// # ts.push(t);
/// }
///
/// barrier.wait();
///
/// let answer = { *spin_mutex.lock() };
/// assert_eq!(answer, thread_count);
///
/// # for t in ts {
/// # t.join().unwrap();
/// # }
/// ```
pub struct Mutex<T: ?Sized, R = Spin> {
inner: InnerMutex<T, R>,
Expand Down Expand Up @@ -139,7 +146,9 @@ impl<T, R> Mutex<T, R> {
/// ```
#[inline(always)]
pub const fn new(value: T) -> Self {
Self { inner: InnerMutex::new(value) }
Self {
inner: InnerMutex::new(value),
}
}

/// Consumes this [`Mutex`] and unwraps the underlying data.
Expand Down

0 comments on commit 14ecef6

Please sign in to comment.