@@ -84,15 +84,15 @@ pub type Callback = fn(msg: &(Any + Send), file: &'static str, line: uint);
// For more information, see below.
const MAX_CALLBACKS: uint = 16;
static CALLBACKS: [atomic::AtomicUint; MAX_CALLBACKS] =
[atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT];
static CALLBACK_CNT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
[atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT,
atomic::ATOMIC_UINT_INIT, atomic::ATOMIC_UINT_INIT];
static CALLBACK_CNT: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;

thread_local! { static PANICKING: Cell<bool> = Cell::new(false) }

@@ -544,7 +544,7 @@ fn begin_unwind_inner(msg: Box<Any + Send>, file_line: &(&'static str, uint)) ->
// Make sure the default failure handler is registered before we look at the
// callbacks.
static INIT: Once = ONCE_INIT;
INIT.doit(|| unsafe { register(failure::on_fail); });
INIT.call_once(|| unsafe { register(failure::on_fail); });

// First, invoke call the user-defined callbacks triggered on thread panic.
//
@@ -47,7 +47,7 @@ pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool {
}

pub fn min_stack() -> uint {
static MIN: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
static MIN: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;
match MIN.load(atomic::SeqCst) {
0 => {}
n => return n - 1,
@@ -86,22 +86,23 @@
//! Keep a global count of live tasks:
//!
//! ```
//! use std::sync::atomic::{AtomicUint, SeqCst, INIT_ATOMIC_UINT};
//! use std::sync::atomic::{AtomicUint, SeqCst, ATOMIC_UINT_INIT};
//!
//! static GLOBAL_TASK_COUNT: AtomicUint = INIT_ATOMIC_UINT;
//! static GLOBAL_TASK_COUNT: AtomicUint = ATOMIC_UINT_INIT;
//!
//! let old_task_count = GLOBAL_TASK_COUNT.fetch_add(1, SeqCst);
//! println!("live tasks: {}", old_task_count + 1);
//! ```

#![allow(deprecated)]
#![stable]

use alloc::boxed::Box;
use core::mem;
use core::prelude::{Send, Drop, None, Option, Some};

pub use core::atomic::{AtomicBool, AtomicInt, AtomicUint, AtomicPtr};
pub use core::atomic::{INIT_ATOMIC_BOOL, INIT_ATOMIC_INT, INIT_ATOMIC_UINT};
pub use core::atomic::{ATOMIC_BOOL_INIT, ATOMIC_INT_INIT, ATOMIC_UINT_INIT};
pub use core::atomic::fence;
pub use core::atomic::Ordering::{mod, Relaxed, Release, Acquire, AcqRel, SeqCst};

@@ -116,6 +117,7 @@ pub struct AtomicOption<T> {
p: AtomicUint,
}

#[allow(deprecated)]
impl<T: Send> AtomicOption<T> {
/// Create a new `AtomicOption`
pub fn new(p: Box<T>) -> AtomicOption<T> {
@@ -8,7 +8,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.

use kinds::{Send, Sync};
use sync::{Mutex, Condvar};

/// A barrier enables multiple tasks to synchronize the beginning
@@ -30,29 +29,32 @@ use sync::{Mutex, Condvar};
/// }).detach();
/// }
/// ```
#[stable]
pub struct Barrier {
lock: Mutex<BarrierState>,
cvar: Condvar,
num_threads: uint,
}

unsafe impl Send for Barrier {}
unsafe impl Sync for Barrier {}

// The inner state of a double barrier
struct BarrierState {
count: uint,
generation_id: uint,
}

unsafe impl Send for BarrierState {}
unsafe impl Sync for BarrierState {}
/// A result returned from wait.
///
/// Currently this opaque structure only has one method, `.is_leader()`. Only
/// one thread will receive a result that will return `true` from this function.
#[allow(missing_copy_implementations)]
pub struct BarrierWaitResult(bool);

impl Barrier {
/// Create a new barrier that can block a given number of threads.
///
/// A barrier will block `n`-1 threads which call `wait` and then wake up
/// all threads at once when the `n`th thread calls `wait`.
#[stable]
pub fn new(n: uint) -> Barrier {
Barrier {
lock: Mutex::new(BarrierState {
@@ -68,7 +70,13 @@ impl Barrier {
///
/// Barriers are re-usable after all threads have rendezvoused once, and can
/// be used continuously.
pub fn wait(&self) {
///
/// A single (arbitrary) thread will receive a `BarrierWaitResult` that
/// returns `true` from `is_leader` when returning from this function, and
/// all other threads will receive a result that will return `false` from
/// `is_leader`
#[stable]
pub fn wait(&self) -> BarrierWaitResult {
let mut lock = self.lock.lock().unwrap();
let local_gen = lock.generation_id;
lock.count += 1;
@@ -79,32 +87,44 @@ impl Barrier {
lock.count < self.num_threads {
lock = self.cvar.wait(lock).unwrap();
}
BarrierWaitResult(false)
} else {
lock.count = 0;
lock.generation_id += 1;
self.cvar.notify_all();
BarrierWaitResult(true)
}
}
}

impl BarrierWaitResult {
/// Return whether this thread from `wait` is the "leader thread".
///
/// Only one thread will have `true` returned from their result, all other
/// threads will have `false` returned.
#[stable]
pub fn is_leader(&self) -> bool { self.0 }
}

#[cfg(test)]
mod tests {
use prelude::*;

use sync::{Arc, Barrier};
use comm::Empty;
use sync::{Arc, Barrier};

#[test]
fn test_barrier() {
let barrier = Arc::new(Barrier::new(10));
const N: uint = 10;

let barrier = Arc::new(Barrier::new(N));
let (tx, rx) = channel();

for _ in range(0u, 9) {
for _ in range(0u, N - 1) {
let c = barrier.clone();
let tx = tx.clone();
spawn(move|| {
c.wait();
tx.send(true);
tx.send(c.wait().is_leader());
});
}

@@ -115,10 +135,15 @@ mod tests {
_ => false,
});

barrier.wait();
let mut leader_found = barrier.wait().is_leader();

// Now, the barrier is cleared and we should get data.
for _ in range(0u, 9) {
rx.recv();
for _ in range(0u, N - 1) {
if rx.recv() {
assert!(!leader_found);
leader_found = true;
}
}
assert!(leader_found);
}
}
@@ -88,7 +88,7 @@ unsafe impl Sync for StaticCondvar {}
#[unstable = "may be merged with Condvar in the future"]
pub const CONDVAR_INIT: StaticCondvar = StaticCondvar {
inner: sys::CONDVAR_INIT,
mutex: atomic::INIT_ATOMIC_UINT,
mutex: atomic::ATOMIC_UINT_INIT,
};

impl Condvar {
@@ -8,8 +8,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.

//! A type representing values that may be computed concurrently and operations for working with
//! them.
//! A type representing values that may be computed concurrently and operations
//! for working with them.
//!
//! # Example
//!
@@ -23,6 +23,9 @@
//! ```

#![allow(missing_docs)]
#![unstable = "futures as-is have yet to be deeply reevaluated with recent \
core changes to Rust's synchronization story, and will likely \
become stable in the future but are unstable until that time"]

use core::prelude::*;
use core::mem::replace;
@@ -26,7 +26,7 @@ pub use self::rwlock::{RWLockReadGuard, RWLockWriteGuard};
pub use self::condvar::{Condvar, StaticCondvar, CONDVAR_INIT};
pub use self::once::{Once, ONCE_INIT};
pub use self::semaphore::{Semaphore, SemaphoreGuard};
pub use self::barrier::Barrier;
pub use self::barrier::{Barrier, BarrierWaitResult};
pub use self::poison::{PoisonError, TryLockError, TryLockResult, LockResult};

pub use self::future::Future;
@@ -32,10 +32,11 @@ use sync::{StaticMutex, MUTEX_INIT};
///
/// static START: Once = ONCE_INIT;
///
/// START.doit(|| {
/// START.call_once(|| {
/// // run initialization here
/// });
/// ```
#[stable]
pub struct Once {
mutex: StaticMutex,
cnt: atomic::AtomicInt,
@@ -45,23 +46,25 @@ pub struct Once {
unsafe impl Sync for Once {}

/// Initialization value for static `Once` values.
#[stable]
pub const ONCE_INIT: Once = Once {
mutex: MUTEX_INIT,
cnt: atomic::INIT_ATOMIC_INT,
lock_cnt: atomic::INIT_ATOMIC_INT,
cnt: atomic::ATOMIC_INT_INIT,
lock_cnt: atomic::ATOMIC_INT_INIT,
};

impl Once {
/// Perform an initialization routine once and only once. The given closure
/// will be executed if this is the first time `doit` has been called, and
/// otherwise the routine will *not* be invoked.
/// will be executed if this is the first time `call_once` has been called,
/// and otherwise the routine will *not* be invoked.
///
/// This method will block the calling task if another initialization
/// routine is currently running.
///
/// When this function returns, it is guaranteed that some initialization
/// has run and completed (it may not be the closure specified).
pub fn doit<F>(&'static self, f: F) where F: FnOnce() {
#[stable]
pub fn call_once<F>(&'static self, f: F) where F: FnOnce() {
// Optimize common path: load is much cheaper than fetch_add.
if self.cnt.load(atomic::SeqCst) < 0 {
return
@@ -91,13 +94,13 @@ impl Once {
//
// It is crucial that the negative value is swapped in *after* the
// initialization routine has completed because otherwise new threads
// calling `doit` will return immediately before the initialization has
// completed.
// calling `call_once` will return immediately before the initialization
// has completed.

let prev = self.cnt.fetch_add(1, atomic::SeqCst);
if prev < 0 {
// Make sure we never overflow, we'll never have int::MIN
// simultaneous calls to `doit` to make this value go back to 0
// simultaneous calls to `call_once` to make this value go back to 0
self.cnt.store(int::MIN, atomic::SeqCst);
return
}
@@ -118,6 +121,10 @@ impl Once {
unsafe { self.mutex.destroy() }
}
}

/// Deprecated
#[deprecated = "renamed to `call_once`"]
pub fn doit<F>(&'static self, f: F) where F: FnOnce() { self.call_once(f) }
}

#[cfg(test)]
@@ -131,9 +138,9 @@ mod test {
fn smoke_once() {
static O: Once = ONCE_INIT;
let mut a = 0i;
O.doit(|| a += 1);
O.call_once(|| a += 1);
assert_eq!(a, 1);
O.doit(|| a += 1);
O.call_once(|| a += 1);
assert_eq!(a, 1);
}

@@ -148,7 +155,7 @@ mod test {
spawn(move|| {
for _ in range(0u, 4) { Thread::yield_now() }
unsafe {
O.doit(|| {
O.call_once(|| {
assert!(!run);
run = true;
});
@@ -159,7 +166,7 @@ mod test {
}

unsafe {
O.doit(|| {
O.call_once(|| {
assert!(!run);
run = true;
});
@@ -8,6 +8,9 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.

#![unstable = "the interaction between semaphores and the acquisition/release \
of resources is currently unclear"]

use ops::Drop;
use sync::{Mutex, Condvar};

@@ -10,6 +10,11 @@

//! Abstraction of a thread pool for basic parallelism.

#![unstable = "the semantics of a failing task and whether a thread is \
re-attached to a thread pool are somewhat unclear, and the \
utility of this type in `std::sync` is questionable with \
respect to the jobs of other primitives"]

use core::prelude::*;

use thread::Thread;
@@ -137,7 +137,7 @@ pub const INIT: StaticKey = StaticKey {
///
/// This value allows specific configuration of the destructor for a TLS key.
pub const INIT_INNER: StaticKeyInner = StaticKeyInner {
key: atomic::INIT_ATOMIC_UINT,
key: atomic::ATOMIC_UINT_INIT,
};

static INIT_KEYS: Once = ONCE_INIT;
@@ -18,7 +18,7 @@ use io::{IoError, IoResult};
use libc::{mod, c_int, c_char, c_void};
use path::BytesContainer;
use ptr;
use sync::atomic::{AtomicInt, INIT_ATOMIC_INT, SeqCst};
use sync::atomic::{AtomicInt, SeqCst};
use sys::fs::FileDesc;
use os;

@@ -8,13 +8,14 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.

use prelude::*;

use alloc::arc::Arc;
use libc;
use c_str::CString;
use mem;
use sync::{atomic, Mutex};
use io::{mod, IoResult, IoError};
use prelude::*;

use sys::{mod, timer, retry, c, set_nonblocking, wouldblock};
use sys::fs::{fd_t, FileDesc};
@@ -117,9 +118,6 @@ pub struct UnixStream {
write_deadline: u64,
}

unsafe impl Send for UnixStream {}
unsafe impl Sync for UnixStream {}

impl UnixStream {
pub fn connect(addr: &CString,
timeout: Option<u64>) -> IoResult<UnixStream> {
@@ -218,6 +216,7 @@ pub struct UnixListener {
path: CString,
}

// we currently own the CString, so these impls should be safe
unsafe impl Send for UnixListener {}
unsafe impl Sync for UnixListener {}

@@ -265,9 +264,6 @@ struct AcceptorInner {
closed: atomic::AtomicBool,
}

unsafe impl Send for AcceptorInner {}
unsafe impl Sync for AcceptorInner {}

impl UnixAcceptor {
pub fn fd(&self) -> fd_t { self.inner.listener.fd() }

@@ -211,7 +211,7 @@ impl Timer {
// instead of ()
HELPER.boot(|| {}, helper);

static ID: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
static ID: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;
let id = ID.fetch_add(1, atomic::Relaxed);
Ok(Timer {
id: id,
@@ -171,7 +171,7 @@ pub fn init_net() {
unsafe {
static START: Once = ONCE_INIT;

START.doit(|| {
START.call_once(|| {
let mut data: c::WSADATA = mem::zeroed();
let ret = c::WSAStartup(0x202, // version 2.2
&mut data);
@@ -20,7 +20,7 @@ const SPIN_COUNT: DWORD = 4000;

pub struct Mutex { inner: atomic::AtomicUint }

pub const MUTEX_INIT: Mutex = Mutex { inner: atomic::INIT_ATOMIC_UINT };
pub const MUTEX_INIT: Mutex = Mutex { inner: atomic::ATOMIC_UINT_INIT };

unsafe impl Sync for Mutex {}

@@ -45,7 +45,7 @@ fn precise_time_ns() -> u64 {
denom: 0 };
static ONCE: sync::Once = sync::ONCE_INIT;
unsafe {
ONCE.doit(|| {
ONCE.call_once(|| {
imp::mach_timebase_info(&mut TIMEBASE);
});
let time = imp::mach_absolute_time();
@@ -198,7 +198,7 @@ pub fn precise_time_ns() -> u64 {
denom: 0 };
static ONCE: std::sync::Once = std::sync::ONCE_INIT;
unsafe {
ONCE.doit(|| {
ONCE.call_once(|| {
imp::mach_timebase_info(&mut TIMEBASE);
});
let time = imp::mach_absolute_time();
@@ -11,12 +11,12 @@
use std::sync::atomic;

pub const C1: uint = 1;
pub const C2: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
pub const C2: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;
pub const C3: fn() = foo;
pub const C4: uint = C1 * C1 + C1 / C1;
pub const C5: &'static uint = &C4;

pub static S1: uint = 3;
pub static S2: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
pub static S2: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;

fn foo() {}
@@ -41,7 +41,7 @@
extern crate arena;

use std::iter::range_step;
use std::sync::Future;
use std::thread::Thread;
use arena::TypedArena;

enum Tree<'a> {
@@ -95,7 +95,7 @@ fn main() {
let mut messages = range_step(min_depth, max_depth + 1, 2).map(|depth| {
use std::num::Int;
let iterations = 2i.pow((max_depth - depth + min_depth) as uint);
Future::spawn(move|| {
Thread::spawn(move|| {
let mut chk = 0;
for i in range(1, iterations + 1) {
let arena = TypedArena::new();
@@ -106,10 +106,10 @@ fn main() {
format!("{}\t trees of depth {}\t check: {}",
iterations * 2, depth, chk)
})
}).collect::<Vec<Future<String>>>();
}).collect::<Vec<_>>();

for message in messages.iter_mut() {
println!("{}", *message.get_ref());
for message in messages.into_iter() {
println!("{}", message.join().ok().unwrap());
}

println!("long lived tree of depth {}\t check: {}",
@@ -41,7 +41,7 @@
#![feature(slicing_syntax)]

use std::{cmp, iter, mem};
use std::sync::Future;
use std::thread::Thread;

fn rotate(x: &mut [i32]) {
let mut prev = x[0];
@@ -168,15 +168,15 @@ fn fannkuch(n: i32) -> (i32, i32) {
for (i, j) in range(0, N).zip(iter::count(0, k)) {
let max = cmp::min(j+k, perm.max());

futures.push(Future::spawn(move|| {
futures.push(Thread::spawn(move|| {
work(perm, j as uint, max as uint)
}))
}

let mut checksum = 0;
let mut maxflips = 0;
for fut in futures.iter_mut() {
let (cs, mf) = fut.get();
for fut in futures.into_iter() {
let (cs, mf) = fut.join().ok().unwrap();
checksum += cs;
maxflips = cmp::max(maxflips, mf);
}
@@ -16,11 +16,11 @@ use std::sync::atomic::*;
use std::ptr;

fn main() {
let x = INIT_ATOMIC_BOOL;
let x = ATOMIC_BOOL_INIT;
let x = *&x; //~ ERROR: cannot move out of dereference
let x = INIT_ATOMIC_INT;
let x = ATOMIC_INT_INIT;
let x = *&x; //~ ERROR: cannot move out of dereference
let x = INIT_ATOMIC_UINT;
let x = ATOMIC_UINT_INIT;
let x = *&x; //~ ERROR: cannot move out of dereference
let x: AtomicPtr<uint> = AtomicPtr::new(ptr::null_mut());
let x = *&x; //~ ERROR: cannot move out of dereference
@@ -15,7 +15,7 @@ extern crate "issue-17718" as other;
use std::sync::atomic;

const C1: uint = 1;
const C2: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
const C2: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;
const C3: fn() = foo;
const C4: uint = C1 * C1 + C1 / C1;
const C5: &'static uint = &C4;
@@ -25,7 +25,7 @@ const C6: uint = {
};

static S1: uint = 3;
static S2: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
static S2: atomic::AtomicUint = atomic::ATOMIC_UINT_INIT;

mod test {
static A: uint = 4;
@@ -9,26 +9,26 @@
// except according to those terms.

use std::task;
use std::sync::atomic::{AtomicUint, INIT_ATOMIC_UINT, Relaxed};
use std::sync::atomic::{AtomicUint, ATOMIC_UINT_INIT, Relaxed};
use std::rand::{thread_rng, Rng, Rand};

const REPEATS: uint = 5;
const MAX_LEN: uint = 32;
static drop_counts: [AtomicUint; MAX_LEN] =
// FIXME #5244: AtomicUint is not Copy.
[
INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT,
INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT,
INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT,
INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT,
ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT,
ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT,
ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT,
ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT,

INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT,
INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT,
INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT,
INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT, INIT_ATOMIC_UINT,
ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT,
ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT,
ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT,
ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT, ATOMIC_UINT_INIT,
];

static creation_count: AtomicUint = INIT_ATOMIC_UINT;
static creation_count: AtomicUint = ATOMIC_UINT_INIT;

#[deriving(Clone, PartialEq, PartialOrd, Eq, Ord)]
struct DropCounter { x: uint, creation_id: uint }