Skip to content

Commit

Permalink
Check for leaks (#73)
Browse files Browse the repository at this point in the history
A first pass at adding some leak checking to loom + other improvements.

`Arc` is promoted to a full "object". This allows optimizing the behavior of
the mocked arc with the loom scheduler. Now, using a mocked `Arc` requires
less permutations as loom is able to "see" through it. Also, by promoting it to
an object, at the end of each execution, all arcs are checked to see if they were
correctly freed. In the event that they were not freed, the execution fails.

The waker behind `block_on` is updated to use a mocked `Arc`. This allows
catching leaking wakers.

Mocked versions of `std::alloc::{alloc, dealloc}` are added. These do not impact
the scheduler or permutations, but it allows tracking correct usage of
allocation & deallocation within the context of an execution. If an allocation leaks,
loom fail the execution.

A `loom::alloc::Track<T>` type is added to allow tracking arbitrary values an
 ensure that the drop handlers are called. This functions in a similar way as
allocation tracking.

Finally, `Notify` is improved to permute on spurious wake ups. When waiting on
a notify, loom will first try the execution without any spurious wake ups, then it
will try the execution with a spurious wake up.

`futures::block_on` is updated to remove the double poll simulating spurious
wake ups as doing so could hide bugs.
  • Loading branch information
carllerche committed Oct 5, 2019
1 parent 92943fd commit a857427
Show file tree
Hide file tree
Showing 25 changed files with 675 additions and 172 deletions.
50 changes: 50 additions & 0 deletions src/alloc.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
//! Memory allocation APIs

use crate::rt;

pub use std::alloc::Layout;

/// Allocate memory with the global allocator.
pub unsafe fn alloc(layout: Layout) -> *mut u8 {
let ptr = std::alloc::alloc(layout);
rt::alloc(ptr);
ptr
}

/// Deallocate memory with the global allocator.
pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
rt::dealloc(ptr);
std::alloc::dealloc(ptr, layout)
}

/// Track allocations, detecting leaks
#[derive(Debug)]
pub struct Track<T> {
value: T,
obj: rt::Allocation,
}

impl<T> Track<T> {
/// Track a value for leaks
pub fn new(value: T) -> Track<T> {
Track {
value,
obj: rt::Allocation::new(),
}
}

/// Get a reference to the value
pub fn get_ref(&self) -> &T {
&self.value
}

/// Get a mutable reference to the value
pub fn get_mut(&mut self) -> &mut T {
&mut self.value
}

/// Stop tracking the value for leaks
pub fn into_inner(self) -> T {
self.value
}
}
File renamed without changes.
5 changes: 5 additions & 0 deletions src/cell/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
//! Shareable mutable containers.

mod causal;

pub use self::causal::{CausalCell, CausalCheck};
63 changes: 40 additions & 23 deletions src/future/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,12 @@ mod atomic_waker;
pub use self::atomic_waker::AtomicWaker;

use crate::rt;
use crate::sync::Arc;

use futures_util::pin_mut;
use futures_util::task::{self, ArcWake};
use std::future::Future;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::mem;
use std::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};

/// Block the current thread, driving `f` to completion.
pub fn block_on<F>(f: F) -> F::Output
Expand All @@ -19,11 +19,12 @@ where
{
pin_mut!(f);

let notify = Arc::new(NotifyWaker {
notify: rt::Notify::new(false)
});
let notify = Arc::new(rt::Notify::new(false, true));

let mut waker = unsafe {
mem::ManuallyDrop::new(Waker::from_raw(RawWaker::new(&*notify as *const _ as *const (), waker_vtable())))
};

let mut waker = task::waker(notify.clone());
let mut cx = Context::from_waker(&mut waker);

loop {
Expand All @@ -32,26 +33,42 @@ where
Poll::Pending => {}
}

// Simulate spurious wakeups by running again
match f.as_mut().poll(&mut cx) {
Poll::Ready(val) => return val,
Poll::Pending => {}
}

notify.notify.wait();
notify.wait();
}
}

struct NotifyWaker {
notify: rt::Notify,
pub(super) fn waker_vtable() -> &'static RawWakerVTable {
&RawWakerVTable::new(
clone_arc_raw,
wake_arc_raw,
wake_by_ref_arc_raw,
drop_arc_raw,
)
}

impl ArcWake for NotifyWaker {
fn wake_by_ref(me: &Arc<Self>) {
me.notify.notify();
}
unsafe fn increase_refcount(data: *const ()) {
// Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
let arc = mem::ManuallyDrop::new(Arc::<rt::Notify>::from_raw(data as *const _));
// Now increase refcount, but don't drop new refcount either
let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
}

// `Notify` is only !Send & !Sync to prevent logic errors, not memory unsafety.
unsafe impl Send for NotifyWaker {}
unsafe impl Sync for NotifyWaker {}
unsafe fn clone_arc_raw(data: *const ()) -> RawWaker {
increase_refcount(data);
RawWaker::new(data, waker_vtable())
}

unsafe fn wake_arc_raw(data: *const ()) {
let notify: Arc<rt::Notify> = Arc::from_raw(data as *const _);
notify.notify();
}

unsafe fn wake_by_ref_arc_raw(data: *const ()) {
// Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
let arc = mem::ManuallyDrop::new(Arc::<rt::Notify>::from_raw(data as *const _));
arc.notify();
}

unsafe fn drop_arc_raw(data: *const ()) {
drop(Arc::<rt::Notify>::from_raw(data as *const _))
}
2 changes: 2 additions & 0 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,8 @@ macro_rules! dbg {
};
}

pub mod alloc;
pub mod cell;
pub mod model;
mod rt;
pub mod sync;
Expand Down
2 changes: 2 additions & 0 deletions src/model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,8 @@ impl Builder {
rt::thread_done();
});

execution.check_for_leaks();

if let Some(next) = execution.step() {
execution = next;
} else {
Expand Down
64 changes: 64 additions & 0 deletions src/rt/alloc.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
use crate::rt;
use crate::rt::object::Object;

/// Tracks an allocation
#[derive(Debug)]
pub(crate) struct Allocation {
obj: Object,
}

#[derive(Debug)]
pub(super) struct State {
is_dropped: bool,
}

/// Track a raw allocation
pub(crate) fn alloc(ptr: *mut u8) {
rt::execution(|execution| {
let obj = execution.objects.insert_alloc(State { is_dropped: false });

let allocation = Allocation { obj };

let prev = execution.raw_allocations.insert(ptr as usize, allocation);
assert!(prev.is_none(), "pointer already tracked");
});
}

/// Track a raw deallocation
pub(crate) fn dealloc(ptr: *mut u8) {
let allocation =
rt::execution(
|execution| match execution.raw_allocations.remove(&(ptr as usize)) {
Some(allocation) => allocation,
None => panic!("pointer not tracked"),
},
);

// Drop outside of the `rt::execution` block
drop(allocation);
}

impl Allocation {
pub(crate) fn new() -> Allocation {
rt::execution(|execution| {
let obj = execution.objects.insert_alloc(State { is_dropped: false });

Allocation { obj }
})
}
}

impl Drop for Allocation {
fn drop(&mut self) {
rt::execution(|execution| {
let state = self.obj.alloc(&mut execution.objects);
state.is_dropped = true;
});
}
}

impl State {
pub(super) fn check_for_leaks(&self) {
assert!(self.is_dropped, "object leaked");
}
}
121 changes: 121 additions & 0 deletions src/rt/arc.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
#![allow(warnings)]
use crate::rt::object::Object;
use crate::rt::{self, Access, Synchronize};

use std::sync::atomic::Ordering::{Acquire, Release};

#[derive(Debug, Copy, Clone)]
pub(crate) struct Arc {
obj: Object,
}

#[derive(Debug)]
pub(super) struct State {
/// Reference count
ref_cnt: usize,

/// Causality transfers between threads
///
/// Only updated on on ref dec and acquired before drop
synchronize: Synchronize,

/// Tracks access to the arc object
last_ref_inc: Option<Access>,
last_ref_dec: Option<Access>,
}

/// Actions performed on the Arc
///
/// Clones are only dependent with inspections. Drops are dependent between each
/// other.
#[derive(Debug, Copy, Clone)]
pub(super) enum Action {
/// Clone the arc
RefInc,

/// Drop the Arc
RefDec,
/*
/// Inspect internals (such as get ref count). This is done with SeqCst
/// causality
Inspect,
*/
}

impl Arc {
pub(crate) fn new() -> Arc {
rt::execution(|execution| {
let obj = execution.objects.insert_arc(State {
ref_cnt: 1,
synchronize: Synchronize::new(execution.max_threads),
last_ref_inc: None,
last_ref_dec: None,
});

Arc { obj }
})
}

pub(crate) fn ref_inc(self) {
self.obj.branch(Action::RefInc);

rt::execution(|execution| {
let state = self.obj.arc_mut(&mut execution.objects);
state.ref_cnt = state.ref_cnt.checked_add(1).expect("overflow");
})
}

/// Returns true if the memory should be dropped.
pub(crate) fn ref_dec(self) -> bool {
self.obj.branch(Action::RefDec);

rt::execution(|execution| {
let state = self.obj.arc_mut(&mut execution.objects);

assert!(state.ref_cnt >= 1, "Arc is already released");

// Decrement the ref count
state.ref_cnt -= 1;

// SYnchronize the threads.
state
.synchronize
.sync_store(&mut execution.threads, Release);

if state.ref_cnt == 0 {
// Final ref count, the arc will be dropped. This requires
// acquiring the causality
//
// In the real implementation, this is done with a fence.
state.synchronize.sync_load(&mut execution.threads, Acquire);
true
} else {
false
}
})
}
}

impl State {
pub(super) fn check_for_leaks(&self) {
assert_eq!(0, self.ref_cnt, "Arc leaked");
}

pub(super) fn last_dependent_accesses<'a>(
&'a self,
action: Action,
) -> Box<dyn Iterator<Item = &'a Access> + 'a> {
match action {
// RefIncs are not dependent w/ RefDec, only inspections
Action::RefInc => Box::new([].into_iter()),
Action::RefDec => Box::new(self.last_ref_dec.iter()),
}
}

pub(super) fn set_last_access(&mut self, action: Action, access: Access) {
match action {
Action::RefInc => self.last_ref_inc = Some(access),
Action::RefDec => self.last_ref_dec = Some(access),
}
}
}
Loading

0 comments on commit a857427

Please sign in to comment.