diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs index 80f0f2acc99a2..7e3e2fb38b13e 100644 --- a/library/alloc/src/sync.rs +++ b/library/alloc/src/sync.rs @@ -233,7 +233,7 @@ macro_rules! acquire { /// let val = Arc::clone(&val); /// /// thread::spawn(move || { -/// let v = val.fetch_add(1, Ordering::SeqCst); +/// let v = val.fetch_add(1, Ordering::Relaxed); /// println!("{v:?}"); /// }); /// } diff --git a/library/core/src/alloc/global.rs b/library/core/src/alloc/global.rs index a1fff6707bd0f..8df3ace54ffe1 100644 --- a/library/core/src/alloc/global.rs +++ b/library/core/src/alloc/global.rs @@ -24,10 +24,7 @@ use crate::ptr; /// use std::alloc::{GlobalAlloc, Layout}; /// use std::cell::UnsafeCell; /// use std::ptr::null_mut; -/// use std::sync::atomic::{ -/// AtomicUsize, -/// Ordering::{Acquire, SeqCst}, -/// }; +/// use std::sync::atomic::{AtomicUsize, Ordering::Relaxed}; /// /// const ARENA_SIZE: usize = 128 * 1024; /// const MAX_SUPPORTED_ALIGN: usize = 4096; @@ -61,7 +58,7 @@ use crate::ptr; /// let mut allocated = 0; /// if self /// .remaining -/// .fetch_update(SeqCst, SeqCst, |mut remaining| { +/// .fetch_update(Relaxed, Relaxed, |mut remaining| { /// if size > remaining { /// return None; /// } @@ -81,7 +78,7 @@ use crate::ptr; /// /// fn main() { /// let _s = format!("allocating a string!"); -/// let currently = ALLOCATOR.remaining.load(Acquire); +/// let currently = ALLOCATOR.remaining.load(Relaxed); /// println!("allocated so far: {}", ARENA_SIZE - currently); /// } /// ``` diff --git a/library/panic_unwind/src/emcc.rs b/library/panic_unwind/src/emcc.rs index af18e19337c7a..fed4c52e83c5f 100644 --- a/library/panic_unwind/src/emcc.rs +++ b/library/panic_unwind/src/emcc.rs @@ -84,7 +84,7 @@ pub unsafe fn cleanup(ptr: *mut u8) -> Box { super::__rust_foreign_exception(); } - let was_caught = (*adjusted_ptr).caught.swap(true, Ordering::SeqCst); + let was_caught = (*adjusted_ptr).caught.swap(true, Ordering::Relaxed); if was_caught { // Since cleanup() isn't allowed to panic, we just abort instead. intrinsics::abort(); diff --git a/library/proc_macro/src/bridge/handle.rs b/library/proc_macro/src/bridge/handle.rs index 894acae217e44..8c53bb609f60c 100644 --- a/library/proc_macro/src/bridge/handle.rs +++ b/library/proc_macro/src/bridge/handle.rs @@ -21,7 +21,7 @@ impl OwnedStore { pub(super) fn new(counter: &'static AtomicU32) -> Self { // Ensure the handle counter isn't 0, which would panic later, // when `NonZero::new` (aka `Handle::new`) is called in `alloc`. - assert_ne!(counter.load(Ordering::SeqCst), 0); + assert_ne!(counter.load(Ordering::Relaxed), 0); OwnedStore { counter, data: BTreeMap::new() } } @@ -29,7 +29,7 @@ impl OwnedStore { impl OwnedStore { pub(super) fn alloc(&mut self, x: T) -> Handle { - let counter = self.counter.fetch_add(1, Ordering::SeqCst); + let counter = self.counter.fetch_add(1, Ordering::Relaxed); let handle = Handle::new(counter).expect("`proc_macro` handle counter overflowed"); assert!(self.data.insert(handle, x).is_none()); handle diff --git a/library/std/src/alloc.rs b/library/std/src/alloc.rs index a834b36697c00..dc0e302a81088 100644 --- a/library/std/src/alloc.rs +++ b/library/std/src/alloc.rs @@ -329,7 +329,7 @@ static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut()); /// ``` #[unstable(feature = "alloc_error_hook", issue = "51245")] pub fn set_alloc_error_hook(hook: fn(Layout)) { - HOOK.store(hook as *mut (), Ordering::SeqCst); + HOOK.store(hook as *mut (), Ordering::Release); } /// Unregisters the current allocation error hook, returning it. @@ -339,7 +339,7 @@ pub fn set_alloc_error_hook(hook: fn(Layout)) { /// If no custom hook is registered, the default hook will be returned. #[unstable(feature = "alloc_error_hook", issue = "51245")] pub fn take_alloc_error_hook() -> fn(Layout) { - let hook = HOOK.swap(ptr::null_mut(), Ordering::SeqCst); + let hook = HOOK.swap(ptr::null_mut(), Ordering::Acquire); if hook.is_null() { default_alloc_error_hook } else { unsafe { mem::transmute(hook) } } } @@ -362,7 +362,7 @@ fn default_alloc_error_hook(layout: Layout) { #[alloc_error_handler] #[unstable(feature = "alloc_internals", issue = "none")] pub fn rust_oom(layout: Layout) -> ! { - let hook = HOOK.load(Ordering::SeqCst); + let hook = HOOK.load(Ordering::Acquire); let hook: fn(Layout) = if hook.is_null() { default_alloc_error_hook } else { unsafe { mem::transmute(hook) } }; hook(layout); diff --git a/library/std/src/net/test.rs b/library/std/src/net/test.rs index 37937b5ea9541..d318d457f3569 100644 --- a/library/std/src/net/test.rs +++ b/library/std/src/net/test.rs @@ -7,12 +7,12 @@ use crate::sync::atomic::{AtomicUsize, Ordering}; static PORT: AtomicUsize = AtomicUsize::new(0); pub fn next_test_ip4() -> SocketAddr { - let port = PORT.fetch_add(1, Ordering::SeqCst) as u16 + base_port(); + let port = PORT.fetch_add(1, Ordering::Relaxed) as u16 + base_port(); SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), port)) } pub fn next_test_ip6() -> SocketAddr { - let port = PORT.fetch_add(1, Ordering::SeqCst) as u16 + base_port(); + let port = PORT.fetch_add(1, Ordering::Relaxed) as u16 + base_port(); SocketAddr::V6(SocketAddrV6::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), port, 0, 0)) } diff --git a/library/std/src/panicking.rs b/library/std/src/panicking.rs index 464a46264cbdd..b0bcab7994c76 100644 --- a/library/std/src/panicking.rs +++ b/library/std/src/panicking.rs @@ -272,7 +272,7 @@ fn default_hook(info: &PanicInfo<'_>) { drop(backtrace::print(err, crate::backtrace_rs::PrintFmt::Full)) } Some(BacktraceStyle::Off) => { - if FIRST_PANIC.swap(false, Ordering::SeqCst) { + if FIRST_PANIC.swap(false, Ordering::Relaxed) { let _ = writeln!( err, "note: run with `RUST_BACKTRACE=1` environment variable to display a \ diff --git a/library/std/src/sync/condvar/tests.rs b/library/std/src/sync/condvar/tests.rs index 24f467f0b03d7..12d13a6b20be3 100644 --- a/library/std/src/sync/condvar/tests.rs +++ b/library/std/src/sync/condvar/tests.rs @@ -170,14 +170,14 @@ fn wait_timeout_wake() { let t = thread::spawn(move || { let _g = m2.lock().unwrap(); thread::sleep(Duration::from_millis(1)); - notified_copy.store(true, Ordering::SeqCst); + notified_copy.store(true, Ordering::Relaxed); c2.notify_one(); }); let (g, timeout_res) = c.wait_timeout(g, Duration::from_millis(u64::MAX)).unwrap(); assert!(!timeout_res.timed_out()); // spurious wakeups mean this isn't necessarily true // so execute test again, if not notified - if !notified.load(Ordering::SeqCst) { + if !notified.load(Ordering::Relaxed) { t.join().unwrap(); continue; } diff --git a/library/std/src/sys/pal/unix/thread_parking/pthread.rs b/library/std/src/sys/pal/unix/thread_parking/pthread.rs index ae805d8439945..bb79cf9548e68 100644 --- a/library/std/src/sys/pal/unix/thread_parking/pthread.rs +++ b/library/std/src/sys/pal/unix/thread_parking/pthread.rs @@ -5,7 +5,7 @@ use crate::marker::PhantomPinned; use crate::pin::Pin; use crate::ptr::addr_of_mut; use crate::sync::atomic::AtomicUsize; -use crate::sync::atomic::Ordering::SeqCst; +use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release}; #[cfg(not(target_os = "nto"))] use crate::sys::time::TIMESPEC_MAX; #[cfg(target_os = "nto")] @@ -150,16 +150,18 @@ impl Parker { // This implementation doesn't require `unsafe`, but other implementations // may assume this is only called by the thread that owns the Parker. + // + // For memory ordering, see std/src/sys_common/thread_parking/futex.rs pub unsafe fn park(self: Pin<&Self>) { // If we were previously notified then we consume this notification and // return quickly. - if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { + if self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Relaxed).is_ok() { return; } // Otherwise we need to coordinate going to sleep lock(self.lock.get()); - match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { + match self.state.compare_exchange(EMPTY, PARKED, Relaxed, Relaxed) { Ok(_) => {} Err(NOTIFIED) => { // We must read here, even though we know it will be `NOTIFIED`. @@ -168,7 +170,7 @@ impl Parker { // acquire operation that synchronizes with that `unpark` to observe // any writes it made before the call to unpark. To do that we must // read from the write it made to `state`. - let old = self.state.swap(EMPTY, SeqCst); + let old = self.state.swap(EMPTY, Acquire); unlock(self.lock.get()); @@ -185,7 +187,7 @@ impl Parker { loop { wait(self.cvar.get(), self.lock.get()); - match self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) { + match self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Relaxed) { Ok(_) => break, // got a notification Err(_) => {} // spurious wakeup, go back to sleep } @@ -201,16 +203,16 @@ impl Parker { // Like `park` above we have a fast path for an already-notified thread, and // afterwards we start coordinating for a sleep. // return quickly. - if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { + if self.state.compare_exchange(NOTIFIED, EMPTY, Acquire, Relaxed).is_ok() { return; } lock(self.lock.get()); - match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { + match self.state.compare_exchange(EMPTY, PARKED, Relaxed, Relaxed) { Ok(_) => {} Err(NOTIFIED) => { // We must read again here, see `park`. - let old = self.state.swap(EMPTY, SeqCst); + let old = self.state.swap(EMPTY, Acquire); unlock(self.lock.get()); assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); @@ -228,7 +230,7 @@ impl Parker { // parked. wait_timeout(self.cvar.get(), self.lock.get(), dur); - match self.state.swap(EMPTY, SeqCst) { + match self.state.swap(EMPTY, Acquire) { NOTIFIED => unlock(self.lock.get()), // got a notification, hurray! PARKED => unlock(self.lock.get()), // no notification, alas n => { @@ -245,7 +247,7 @@ impl Parker { // `state` is already `NOTIFIED`. That is why this must be a swap // rather than a compare-and-swap that returns if it reads `NOTIFIED` // on failure. - match self.state.swap(NOTIFIED, SeqCst) { + match self.state.swap(NOTIFIED, Release) { EMPTY => return, // no one was waiting NOTIFIED => return, // already unparked PARKED => {} // gotta go wake someone up diff --git a/library/std/src/sys/pal/wasm/alloc.rs b/library/std/src/sys/pal/wasm/alloc.rs index 6dceb1689a8b7..b74ce0d47425a 100644 --- a/library/std/src/sys/pal/wasm/alloc.rs +++ b/library/std/src/sys/pal/wasm/alloc.rs @@ -57,7 +57,10 @@ unsafe impl GlobalAlloc for System { #[cfg(target_feature = "atomics")] mod lock { - use crate::sync::atomic::{AtomicI32, Ordering::SeqCst}; + use crate::sync::atomic::{ + AtomicI32, + Ordering::{Acquire, Release}, + }; static LOCKED: AtomicI32 = AtomicI32::new(0); @@ -65,7 +68,7 @@ mod lock { pub fn lock() -> DropLock { loop { - if LOCKED.swap(1, SeqCst) == 0 { + if LOCKED.swap(1, Acquire) == 0 { return DropLock; } // Ok so here's where things get a little depressing. At this point @@ -143,7 +146,7 @@ mod lock { impl Drop for DropLock { fn drop(&mut self) { - let r = LOCKED.swap(0, SeqCst); + let r = LOCKED.swap(0, Release); debug_assert_eq!(r, 1); // Note that due to the above logic we don't actually need to wake diff --git a/library/std/src/sys/pal/windows/pipe.rs b/library/std/src/sys/pal/windows/pipe.rs index 013f588676ae8..dfa938d4d5769 100644 --- a/library/std/src/sys/pal/windows/pipe.rs +++ b/library/std/src/sys/pal/windows/pipe.rs @@ -7,7 +7,7 @@ use crate::path::Path; use crate::ptr; use crate::slice; use crate::sync::atomic::AtomicUsize; -use crate::sync::atomic::Ordering::SeqCst; +use crate::sync::atomic::Ordering::Relaxed; use crate::sys::c; use crate::sys::fs::{File, OpenOptions}; use crate::sys::handle::Handle; @@ -214,11 +214,11 @@ pub fn spawn_pipe_relay( fn random_number() -> usize { static N: AtomicUsize = AtomicUsize::new(0); loop { - if N.load(SeqCst) != 0 { - return N.fetch_add(1, SeqCst); + if N.load(Relaxed) != 0 { + return N.fetch_add(1, Relaxed); } - N.store(hashmap_random_keys().0 as usize, SeqCst); + N.store(hashmap_random_keys().0 as usize, Relaxed); } } diff --git a/library/std/src/sys/pal/xous/alloc.rs b/library/std/src/sys/pal/xous/alloc.rs index 0d540e9552072..601411173aacb 100644 --- a/library/std/src/sys/pal/xous/alloc.rs +++ b/library/std/src/sys/pal/xous/alloc.rs @@ -46,7 +46,10 @@ unsafe impl GlobalAlloc for System { } mod lock { - use crate::sync::atomic::{AtomicI32, Ordering::SeqCst}; + use crate::sync::atomic::{ + AtomicI32, + Ordering::{Acquire, Release}, + }; static LOCKED: AtomicI32 = AtomicI32::new(0); @@ -54,7 +57,7 @@ mod lock { pub fn lock() -> DropLock { loop { - if LOCKED.swap(1, SeqCst) == 0 { + if LOCKED.swap(1, Acquire) == 0 { return DropLock; } crate::os::xous::ffi::do_yield(); @@ -63,7 +66,7 @@ mod lock { impl Drop for DropLock { fn drop(&mut self) { - let r = LOCKED.swap(0, SeqCst); + let r = LOCKED.swap(0, Release); debug_assert_eq!(r, 1); } } diff --git a/library/std/src/sys/pal/xous/net/tcpstream.rs b/library/std/src/sys/pal/xous/net/tcpstream.rs index 7149678118ab6..aebef02acdad5 100644 --- a/library/std/src/sys/pal/xous/net/tcpstream.rs +++ b/library/std/src/sys/pal/xous/net/tcpstream.rs @@ -406,7 +406,7 @@ impl TcpStream { } pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { - self.nonblocking.store(nonblocking, Ordering::SeqCst); + self.nonblocking.store(nonblocking, Ordering::Relaxed); Ok(()) } } diff --git a/library/std/src/sys/pal/xous/thread_local_key.rs b/library/std/src/sys/pal/xous/thread_local_key.rs index 59a668c3df6ff..2aaf46d0244a7 100644 --- a/library/std/src/sys/pal/xous/thread_local_key.rs +++ b/library/std/src/sys/pal/xous/thread_local_key.rs @@ -2,7 +2,7 @@ use crate::mem::ManuallyDrop; use crate::ptr; use crate::sync::atomic::AtomicPtr; use crate::sync::atomic::AtomicUsize; -use crate::sync::atomic::Ordering::SeqCst; +use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release}; use core::arch::asm; use crate::os::xous::ffi::{map_memory, unmap_memory, MemoryFlags}; @@ -92,7 +92,7 @@ fn tls_table() -> &'static mut [*mut u8] { pub unsafe fn create(dtor: Option) -> Key { // Allocate a new TLS key. These keys are shared among all threads. #[allow(unused_unsafe)] - let key = unsafe { TLS_KEY_INDEX.fetch_add(1, SeqCst) }; + let key = unsafe { TLS_KEY_INDEX.fetch_add(1, Relaxed) }; if let Some(f) = dtor { unsafe { register_dtor(key, f) }; } @@ -154,11 +154,11 @@ unsafe fn register_dtor(key: Key, dtor: Dtor) { let mut node = ManuallyDrop::new(Box::new(Node { key, dtor, next: ptr::null_mut() })); #[allow(unused_unsafe)] - let mut head = unsafe { DTORS.load(SeqCst) }; + let mut head = unsafe { DTORS.load(Acquire) }; loop { node.next = head; #[allow(unused_unsafe)] - match unsafe { DTORS.compare_exchange(head, &mut **node, SeqCst, SeqCst) } { + match unsafe { DTORS.compare_exchange(head, &mut **node, Release, Acquire) } { Ok(_) => return, // nothing to drop, we successfully added the node to the list Err(cur) => head = cur, } @@ -199,7 +199,7 @@ unsafe fn run_dtors() { } any_run = false; #[allow(unused_unsafe)] - let mut cur = unsafe { DTORS.load(SeqCst) }; + let mut cur = unsafe { DTORS.load(Acquire) }; while !cur.is_null() { let ptr = unsafe { get((*cur).key) }; diff --git a/library/std/src/sys/sync/mutex/xous.rs b/library/std/src/sys/sync/mutex/xous.rs index a8c9518ff0bcf..1426e48f8b7af 100644 --- a/library/std/src/sys/sync/mutex/xous.rs +++ b/library/std/src/sys/sync/mutex/xous.rs @@ -1,6 +1,9 @@ use crate::os::xous::ffi::{blocking_scalar, do_yield}; use crate::os::xous::services::{ticktimer_server, TicktimerScalar}; -use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering::Relaxed, Ordering::SeqCst}; +use crate::sync::atomic::{ + AtomicBool, AtomicUsize, + Ordering::{Acquire, Relaxed, Release}, +}; pub struct Mutex { /// The "locked" value indicates how many threads are waiting on this @@ -68,7 +71,7 @@ impl Mutex { #[inline] pub unsafe fn unlock(&self) { - let prev = self.locked.fetch_sub(1, SeqCst); + let prev = self.locked.fetch_sub(1, Release); // If the previous value was 1, then this was a "fast path" unlock, so no // need to involve the Ticktimer server @@ -89,12 +92,12 @@ impl Mutex { #[inline] pub unsafe fn try_lock(&self) -> bool { - self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok() + self.locked.compare_exchange(0, 1, Acquire, Relaxed).is_ok() } #[inline] pub unsafe fn try_lock_or_poison(&self) -> bool { - self.locked.fetch_add(1, SeqCst) == 0 + self.locked.fetch_add(1, Acquire) == 0 } } diff --git a/library/std/src/sys_common/thread_local_key.rs b/library/std/src/sys_common/thread_local_key.rs index 204834984a227..7dcc114109958 100644 --- a/library/std/src/sys_common/thread_local_key.rs +++ b/library/std/src/sys_common/thread_local_key.rs @@ -128,7 +128,7 @@ impl StaticKey { #[inline] unsafe fn key(&self) -> imp::Key { - match self.key.load(Ordering::Relaxed) { + match self.key.load(Ordering::Acquire) { KEY_SENTVAL => self.lazy_init() as imp::Key, n => n as imp::Key, } @@ -156,8 +156,8 @@ impl StaticKey { match self.key.compare_exchange( KEY_SENTVAL, key as usize, - Ordering::SeqCst, - Ordering::SeqCst, + Ordering::Release, + Ordering::Acquire, ) { // The CAS succeeded, so we've created the actual key Ok(_) => key as usize, diff --git a/library/std/src/thread/local/tests.rs b/library/std/src/thread/local/tests.rs index 964c7fc5b0ca2..25019b554bb6a 100644 --- a/library/std/src/thread/local/tests.rs +++ b/library/std/src/thread/local/tests.rs @@ -255,6 +255,9 @@ fn join_orders_after_tls_destructors() { // observe the channel in the `THREAD1_WAITING` state. If this does occur, // we switch to the “poison” state `THREAD2_JOINED` and panic all around. // (This is equivalent to “sending” from an alternate producer thread.) + // + // Relaxed memory ordering is fine because and spawn()/join() already provide all the + // synchronization we need here. const FRESH: u8 = 0; const THREAD2_LAUNCHED: u8 = 1; const THREAD1_WAITING: u8 = 2; @@ -263,7 +266,7 @@ fn join_orders_after_tls_destructors() { static SYNC_STATE: AtomicU8 = AtomicU8::new(FRESH); for _ in 0..10 { - SYNC_STATE.store(FRESH, Ordering::SeqCst); + SYNC_STATE.store(FRESH, Ordering::Relaxed); let jh = thread::Builder::new() .name("thread1".into()) @@ -272,7 +275,7 @@ fn join_orders_after_tls_destructors() { impl Drop for TlDrop { fn drop(&mut self) { - let mut sync_state = SYNC_STATE.swap(THREAD1_WAITING, Ordering::SeqCst); + let mut sync_state = SYNC_STATE.swap(THREAD1_WAITING, Ordering::Relaxed); loop { match sync_state { THREAD2_LAUNCHED | THREAD1_WAITING => thread::yield_now(), @@ -282,7 +285,7 @@ fn join_orders_after_tls_destructors() { ), v => unreachable!("sync state: {}", v), } - sync_state = SYNC_STATE.load(Ordering::SeqCst); + sync_state = SYNC_STATE.load(Ordering::Relaxed); } } } @@ -294,7 +297,7 @@ fn join_orders_after_tls_destructors() { TL_DROP.with(|_| {}); loop { - match SYNC_STATE.load(Ordering::SeqCst) { + match SYNC_STATE.load(Ordering::Relaxed) { FRESH => thread::yield_now(), THREAD2_LAUNCHED => break, v => unreachable!("sync state: {}", v), @@ -306,9 +309,9 @@ fn join_orders_after_tls_destructors() { let jh2 = thread::Builder::new() .name("thread2".into()) .spawn(move || { - assert_eq!(SYNC_STATE.swap(THREAD2_LAUNCHED, Ordering::SeqCst), FRESH); + assert_eq!(SYNC_STATE.swap(THREAD2_LAUNCHED, Ordering::Relaxed), FRESH); jh.join().unwrap(); - match SYNC_STATE.swap(THREAD2_JOINED, Ordering::SeqCst) { + match SYNC_STATE.swap(THREAD2_JOINED, Ordering::Relaxed) { MAIN_THREAD_RENDEZVOUS => return, THREAD2_LAUNCHED | THREAD1_WAITING => { panic!("Thread 2 running after thread 1 join before main thread rendezvous") @@ -322,8 +325,8 @@ fn join_orders_after_tls_destructors() { match SYNC_STATE.compare_exchange( THREAD1_WAITING, MAIN_THREAD_RENDEZVOUS, - Ordering::SeqCst, - Ordering::SeqCst, + Ordering::Relaxed, + Ordering::Relaxed, ) { Ok(_) => break, Err(FRESH) => thread::yield_now(),