diff --git a/conc/src/atomic.rs b/conc/src/atomic.rs index f299daa..a4fcaa7 100644 --- a/conc/src/atomic.rs +++ b/conc/src/atomic.rs @@ -31,11 +31,27 @@ impl Atomic { } /// Get a mutable reference to the underlying `std::sync::AtomicPtr`. + /// + /// There is no overhead in this. + /// + /// # Safety + /// + /// This is unsafe as you can easily invalidate the invariants. When using, you must ensure + /// that, if you drop, there are no existing readers/hazards of the `Atomic` and that, if you + /// mutate, the value, you change to is valid. pub unsafe fn get_inner(&self) -> &AtomicPtr { &self.inner } /// Get an immutable reference to the underlying `std::sync::AtomicPtr` + /// + /// There is no overhead in this. + /// + /// # Safety + /// + /// This is unsafe as you can easily invalidate the invariants. When using, you must ensure + /// that, if you drop, there are no existing readers/hazards of the `Atomic` and that, if you + /// mutate, the value, you change to is valid. pub unsafe fn get_inner_mut(&mut self) -> &mut AtomicPtr { &mut self.inner } diff --git a/conc/src/sync/treiber.rs b/conc/src/sync/treiber.rs index dd72c4d..6e7112f 100644 --- a/conc/src/sync/treiber.rs +++ b/conc/src/sync/treiber.rs @@ -17,14 +17,6 @@ pub struct Treiber { head: Atomic>, } -/// A node in the stack. -struct Node { - /// The data this node holds. - data: T, - /// The next node. - next: *const Node, -} - impl Treiber { /// Create a new, empty Treiber stack. pub fn new() -> Treiber { @@ -34,6 +26,7 @@ impl Treiber { } /// Pop an item from the stack. + // TODO: Change this return type. pub fn pop(&self) -> Option> { // TODO: Use `catch {}` here when it lands. // Read the head snapshot. @@ -105,9 +98,26 @@ impl Treiber { } } -impl Drop for Treiber { - fn drop(&mut self) { +/// A node in the stack. +struct Node { + /// The data this node holds. + data: T, + /// The next node. + next: *const Node, +} +impl Drop for Node { + fn drop(&mut self) { + // FIXME: Since this is recursive (and although it is likely optimized out), there might be + // cases where this leads to stack overflow, given correct compilation flags and + // sufficiently many elements. + + // Recursively drop the next node, if it exists. + if !self.next.is_null() { + unsafe { + drop(Box::from_raw(self.next as *mut Node)); + } + } } } @@ -116,6 +126,18 @@ mod tests { use super::*; use std::thread; use std::sync::Arc; + use std::sync::atomic::AtomicUsize; + + #[derive(Clone)] + struct Dropper { + d: Arc, + } + + impl Drop for Dropper { + fn drop(&mut self) { + self.d.fetch_add(1, atomic::Ordering::Relaxed); + } + } #[test] fn simple1() { @@ -272,4 +294,43 @@ mod tests { } assert_eq!(sum, 10000); } + + #[test] + fn drop() { + let drops = Arc::new(AtomicUsize::default()); + let stack = Arc::new(Treiber::new()); + + let d = Dropper { + d: drops.clone(), + }; + + let mut j = Vec::new(); + for _ in 0..16 { + let d = d.clone(); + let stack = stack.clone(); + + j.push(thread::spawn(move || { + for _ in 0..20 { + stack.push(d.clone()); + } + + stack.pop(); + stack.pop(); + })) + } + + for i in j { + i.join().unwrap(); + } + + ::gc(); + // The 16 are for the `d` variable in the loop above. + assert_eq!(drops.load(atomic::Ordering::Relaxed), 32 + 16); + + // Drop the last arc. + drop(stack); + ::gc(); + + assert_eq!(drops.load(atomic::Ordering::Relaxed), 200 + 16); + } }