Skip to content

Commit

Permalink
Auto merge of #29031 - cristicbz:mtx_inner, r=alexcrichton
Browse files Browse the repository at this point in the history
The implementation for `into_inner` was a bit more complex than I had hoped for---is there any simpler, less unsafe way of getting around the fact that one can't move out of a `Drop` struct?

See #28968 and rust-lang/rfcs#1269 .
  • Loading branch information
bors committed Oct 15, 2015
2 parents be3d390 + 90ccefd commit 6cdf31b
Show file tree
Hide file tree
Showing 2 changed files with 233 additions and 0 deletions.
115 changes: 115 additions & 0 deletions src/libstd/sync/mutex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@ use prelude::v1::*;
use cell::UnsafeCell;
use fmt;
use marker;
use mem;
use ops::{Deref, DerefMut};
use ptr;
use sys_common::mutex as sys;
use sys_common::poison::{self, TryLockError, TryLockResult, LockResult};

Expand Down Expand Up @@ -243,6 +245,50 @@ impl<T: ?Sized> Mutex<T> {
pub fn is_poisoned(&self) -> bool {
self.inner.poison.get()
}

/// Consumes this mutex, returning the underlying data.
///
/// # Failure
///
/// If another user of this mutex panicked while holding the mutex, then
/// this call will return an error instead.
#[unstable(feature = "mutex_into_inner", reason = "recently added", issue = "28968")]
pub fn into_inner(self) -> LockResult<T> where T: Sized {
// We know statically that there are no outstanding references to
// `self` so there's no need to lock the inner StaticMutex.
//
// To get the inner value, we'd like to call `data.into_inner()`,
// but because `Mutex` impl-s `Drop`, we can't move out of it, so
// we'll have to destructure it manually instead.
unsafe {
// Like `let Mutex { inner, data } = self`.
let (inner, data) = {
let Mutex { ref inner, ref data } = self;
(ptr::read(inner), ptr::read(data))
};
mem::forget(self);
inner.lock.destroy(); // Keep in sync with the `Drop` impl.

poison::map_result(inner.poison.borrow(), |_| data.into_inner())
}
}

/// Returns a mutable reference to the underlying data.
///
/// Since this call borrows the `Mutex` mutably, no actual locking needs to
/// take place---the mutable borrow statically guarantees no locks exist.
///
/// # Failure
///
/// If another user of this mutex panicked while holding the mutex, then
/// this call will return an error instead.
#[unstable(feature = "mutex_get_mut", reason = "recently added", issue = "28968")]
pub fn get_mut(&mut self) -> LockResult<&mut T> {
// We know statically that there are no other references to `self`, so
// there's no need to lock the inner StaticMutex.
let data = unsafe { &mut *self.data.get() };
poison::map_result(self.inner.poison.borrow(), |_| data )
}
}

#[stable(feature = "rust1", since = "1.0.0")]
Expand All @@ -251,6 +297,8 @@ impl<T: ?Sized> Drop for Mutex<T> {
// This is actually safe b/c we know that there is no further usage of
// this mutex (it's up to the user to arrange for a mutex to get
// dropped, that's not our job)
//
// IMPORTANT: This code must be kept in sync with `Mutex::into_inner`.
unsafe { self.inner.lock.destroy() }
}
}
Expand Down Expand Up @@ -371,10 +419,14 @@ mod tests {

use sync::mpsc::channel;
use sync::{Arc, Mutex, StaticMutex, Condvar};
use sync::atomic::{AtomicUsize, Ordering};
use thread;

struct Packet<T>(Arc<(Mutex<T>, Condvar)>);

#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);

unsafe impl<T: Send> Send for Packet<T> {}
unsafe impl<T> Sync for Packet<T> {}

Expand Down Expand Up @@ -435,6 +487,69 @@ mod tests {
*m.try_lock().unwrap() = ();
}

#[test]
fn test_into_inner() {
let m = Mutex::new(NonCopy(10));
assert_eq!(m.into_inner().unwrap(), NonCopy(10));
}

#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = Mutex::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner().unwrap();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}

#[test]
fn test_into_inner_poison() {
let m = Arc::new(Mutex::new(NonCopy(10)));
let m2 = m.clone();
let _ = thread::spawn(move || {
let _lock = m2.lock().unwrap();
panic!("test panic in inner thread to poison mutex");
}).join();

assert!(m.is_poisoned());
match Arc::try_unwrap(m).unwrap().into_inner() {
Err(e) => assert_eq!(e.into_inner(), NonCopy(10)),
Ok(x) => panic!("into_inner of poisoned Mutex is Ok: {:?}", x),
}
}

#[test]
fn test_get_mut() {
let mut m = Mutex::new(NonCopy(10));
*m.get_mut().unwrap() = NonCopy(20);
assert_eq!(m.into_inner().unwrap(), NonCopy(20));
}

#[test]
fn test_get_mut_poison() {
let m = Arc::new(Mutex::new(NonCopy(10)));
let m2 = m.clone();
let _ = thread::spawn(move || {
let _lock = m2.lock().unwrap();
panic!("test panic in inner thread to poison mutex");
}).join();

assert!(m.is_poisoned());
match Arc::try_unwrap(m).unwrap().get_mut() {
Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)),
Ok(x) => panic!("get_mut of poisoned Mutex is Ok: {:?}", x),
}
}

#[test]
fn test_mutex_arc_condvar() {
let packet = Packet(Arc::new((Mutex::new(false), Condvar::new())));
Expand Down
118 changes: 118 additions & 0 deletions src/libstd/sync/rwlock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@ use prelude::v1::*;
use cell::UnsafeCell;
use fmt;
use marker;
use mem;
use ops::{Deref, DerefMut};
use ptr;
use sys_common::poison::{self, LockResult, TryLockError, TryLockResult};
use sys_common::rwlock as sys;

Expand Down Expand Up @@ -260,11 +262,60 @@ impl<T: ?Sized> RwLock<T> {
pub fn is_poisoned(&self) -> bool {
self.inner.poison.get()
}

/// Consumes this `RwLock`, returning the underlying data.
///
/// # Failure
///
/// This function will return an error if the RwLock is poisoned. An RwLock
/// is poisoned whenever a writer panics while holding an exclusive lock. An
/// error will only be returned if the lock would have otherwise been
/// acquired.
#[unstable(feature = "rwlock_into_inner", reason = "recently added", issue = "28968")]
pub fn into_inner(self) -> LockResult<T> where T: Sized {
// We know statically that there are no outstanding references to
// `self` so there's no need to lock the inner StaticRwLock.
//
// To get the inner value, we'd like to call `data.into_inner()`,
// but because `RwLock` impl-s `Drop`, we can't move out of it, so
// we'll have to destructure it manually instead.
unsafe {
// Like `let RwLock { inner, data } = self`.
let (inner, data) = {
let RwLock { ref inner, ref data } = self;
(ptr::read(inner), ptr::read(data))
};
mem::forget(self);
inner.lock.destroy(); // Keep in sync with the `Drop` impl.

poison::map_result(inner.poison.borrow(), |_| data.into_inner())
}
}

/// Returns a mutable reference to the underlying data.
///
/// Since this call borrows the `RwLock` mutably, no actual locking needs to
/// take place---the mutable borrow statically guarantees no locks exist.
///
/// # Failure
///
/// This function will return an error if the RwLock is poisoned. An RwLock
/// is poisoned whenever a writer panics while holding an exclusive lock. An
/// error will only be returned if the lock would have otherwise been
/// acquired.
#[unstable(feature = "rwlock_get_mut", reason = "recently added", issue = "28968")]
pub fn get_mut(&mut self) -> LockResult<&mut T> {
// We know statically that there are no other references to `self`, so
// there's no need to lock the inner StaticRwLock.
let data = unsafe { &mut *self.data.get() };
poison::map_result(self.inner.poison.borrow(), |_| data )
}
}

#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Drop for RwLock<T> {
fn drop(&mut self) {
// IMPORTANT: This code needs to be kept in sync with `RwLock::into_inner`.
unsafe { self.inner.lock.destroy() }
}
}
Expand Down Expand Up @@ -426,6 +477,10 @@ mod tests {
use sync::mpsc::channel;
use thread;
use sync::{Arc, RwLock, StaticRwLock, TryLockError};
use sync::atomic::{AtomicUsize, Ordering};

#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);

#[test]
fn smoke() {
Expand Down Expand Up @@ -606,4 +661,67 @@ mod tests {

drop(read_guard);
}

#[test]
fn test_into_inner() {
let m = RwLock::new(NonCopy(10));
assert_eq!(m.into_inner().unwrap(), NonCopy(10));
}

#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = RwLock::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner().unwrap();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}

#[test]
fn test_into_inner_poison() {
let m = Arc::new(RwLock::new(NonCopy(10)));
let m2 = m.clone();
let _ = thread::spawn(move || {
let _lock = m2.write().unwrap();
panic!("test panic in inner thread to poison RwLock");
}).join();

assert!(m.is_poisoned());
match Arc::try_unwrap(m).unwrap().into_inner() {
Err(e) => assert_eq!(e.into_inner(), NonCopy(10)),
Ok(x) => panic!("into_inner of poisoned RwLock is Ok: {:?}", x),
}
}

#[test]
fn test_get_mut() {
let mut m = RwLock::new(NonCopy(10));
*m.get_mut().unwrap() = NonCopy(20);
assert_eq!(m.into_inner().unwrap(), NonCopy(20));
}

#[test]
fn test_get_mut_poison() {
let m = Arc::new(RwLock::new(NonCopy(10)));
let m2 = m.clone();
let _ = thread::spawn(move || {
let _lock = m2.write().unwrap();
panic!("test panic in inner thread to poison RwLock");
}).join();

assert!(m.is_poisoned());
match Arc::try_unwrap(m).unwrap().get_mut() {
Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)),
Ok(x) => panic!("get_mut of poisoned RwLock is Ok: {:?}", x),
}
}
}

0 comments on commit 6cdf31b

Please sign in to comment.