From f238513efac3193ed7d05222d03b8052690de0a3 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Fri, 1 Jul 2022 16:21:46 -0400 Subject: [PATCH 1/3] rename some data_race types for more clarity --- src/concurrency/data_race.rs | 56 +++++----- src/concurrency/weak_memory.rs | 24 ++-- src/lib.rs | 2 +- src/shims/intrinsics.rs | 198 ++++++++++++++++----------------- src/shims/unix/linux/sync.rs | 6 +- src/shims/unix/sync.rs | 28 ++--- src/shims/windows/sync.rs | 4 +- 7 files changed, 159 insertions(+), 159 deletions(-) diff --git a/src/concurrency/data_race.rs b/src/concurrency/data_race.rs index 36178269e0..ef0920d969 100644 --- a/src/concurrency/data_race.rs +++ b/src/concurrency/data_race.rs @@ -62,9 +62,9 @@ use super::weak_memory::EvalContextExt as _; pub type AllocExtra = VClockAlloc; -/// Valid atomic read-write operations, alias of atomic::Ordering (not non-exhaustive). +/// Valid atomic read-write orderings, alias of atomic::Ordering (not non-exhaustive). #[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub enum AtomicRwOp { +pub enum AtomicRwOrd { Relaxed, Acquire, Release, @@ -72,25 +72,25 @@ pub enum AtomicRwOp { SeqCst, } -/// Valid atomic read operations, subset of atomic::Ordering. +/// Valid atomic read orderings, subset of atomic::Ordering. #[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub enum AtomicReadOp { +pub enum AtomicReadOrd { Relaxed, Acquire, SeqCst, } -/// Valid atomic write operations, subset of atomic::Ordering. +/// Valid atomic write orderings, subset of atomic::Ordering. #[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub enum AtomicWriteOp { +pub enum AtomicWriteOrd { Relaxed, Release, SeqCst, } -/// Valid atomic fence operations, subset of atomic::Ordering. +/// Valid atomic fence orderings, subset of atomic::Ordering. #[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub enum AtomicFenceOp { +pub enum AtomicFenceOrd { Acquire, Release, AcqRel, @@ -486,7 +486,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { op: &OpTy<'tcx, Tag>, offset: u64, layout: TyAndLayout<'tcx>, - atomic: AtomicReadOp, + atomic: AtomicReadOrd, ) -> InterpResult<'tcx, ScalarMaybeUninit> { let this = self.eval_context_ref(); let value_place = this.deref_operand_and_offset(op, offset, layout)?; @@ -500,7 +500,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { offset: u64, value: impl Into>, layout: TyAndLayout<'tcx>, - atomic: AtomicWriteOp, + atomic: AtomicWriteOrd, ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); let value_place = this.deref_operand_and_offset(op, offset, layout)?; @@ -511,7 +511,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { fn read_scalar_atomic( &self, place: &MPlaceTy<'tcx, Tag>, - atomic: AtomicReadOp, + atomic: AtomicReadOrd, ) -> InterpResult<'tcx, ScalarMaybeUninit> { let this = self.eval_context_ref(); // This will read from the last store in the modification order of this location. In case @@ -531,7 +531,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { &mut self, val: ScalarMaybeUninit, dest: &MPlaceTy<'tcx, Tag>, - atomic: AtomicWriteOp, + atomic: AtomicWriteOrd, ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); this.validate_overlapping_atomic(dest)?; @@ -552,7 +552,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { rhs: &ImmTy<'tcx, Tag>, op: mir::BinOp, neg: bool, - atomic: AtomicRwOp, + atomic: AtomicRwOrd, ) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> { let this = self.eval_context_mut(); @@ -581,7 +581,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { &mut self, place: &MPlaceTy<'tcx, Tag>, new: ScalarMaybeUninit, - atomic: AtomicRwOp, + atomic: AtomicRwOrd, ) -> InterpResult<'tcx, ScalarMaybeUninit> { let this = self.eval_context_mut(); @@ -602,7 +602,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { place: &MPlaceTy<'tcx, Tag>, rhs: ImmTy<'tcx, Tag>, min: bool, - atomic: AtomicRwOp, + atomic: AtomicRwOrd, ) -> InterpResult<'tcx, ImmTy<'tcx, Tag>> { let this = self.eval_context_mut(); @@ -642,8 +642,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { place: &MPlaceTy<'tcx, Tag>, expect_old: &ImmTy<'tcx, Tag>, new: ScalarMaybeUninit, - success: AtomicRwOp, - fail: AtomicReadOp, + success: AtomicRwOrd, + fail: AtomicReadOrd, can_fail_spuriously: bool, ) -> InterpResult<'tcx, Immediate> { use rand::Rng as _; @@ -696,7 +696,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { fn validate_atomic_load( &self, place: &MPlaceTy<'tcx, Tag>, - atomic: AtomicReadOp, + atomic: AtomicReadOrd, ) -> InterpResult<'tcx> { let this = self.eval_context_ref(); this.validate_overlapping_atomic(place)?; @@ -705,7 +705,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { atomic, "Atomic Load", move |memory, clocks, index, atomic| { - if atomic == AtomicReadOp::Relaxed { + if atomic == AtomicReadOrd::Relaxed { memory.load_relaxed(&mut *clocks, index) } else { memory.load_acquire(&mut *clocks, index) @@ -719,7 +719,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { fn validate_atomic_store( &mut self, place: &MPlaceTy<'tcx, Tag>, - atomic: AtomicWriteOp, + atomic: AtomicWriteOrd, ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); this.validate_overlapping_atomic(place)?; @@ -728,7 +728,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { atomic, "Atomic Store", move |memory, clocks, index, atomic| { - if atomic == AtomicWriteOp::Relaxed { + if atomic == AtomicWriteOrd::Relaxed { memory.store_relaxed(clocks, index) } else { memory.store_release(clocks, index) @@ -742,9 +742,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { fn validate_atomic_rmw( &mut self, place: &MPlaceTy<'tcx, Tag>, - atomic: AtomicRwOp, + atomic: AtomicRwOrd, ) -> InterpResult<'tcx> { - use AtomicRwOp::*; + use AtomicRwOrd::*; let acquire = matches!(atomic, Acquire | AcqRel | SeqCst); let release = matches!(atomic, Release | AcqRel | SeqCst); let this = self.eval_context_mut(); @@ -764,7 +764,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { } /// Update the data-race detector for an atomic fence on the current thread. - fn validate_atomic_fence(&mut self, atomic: AtomicFenceOp) -> InterpResult<'tcx> { + fn validate_atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> { let this = self.eval_context_mut(); if let Some(data_race) = &mut this.machine.data_race { data_race.maybe_perform_sync_operation(|index, mut clocks| { @@ -773,22 +773,22 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { // Apply data-race detection for the current fences // this treats AcqRel and SeqCst as the same as an acquire // and release fence applied in the same timestamp. - if atomic != AtomicFenceOp::Release { + if atomic != AtomicFenceOrd::Release { // Either Acquire | AcqRel | SeqCst clocks.apply_acquire_fence(); } - if atomic != AtomicFenceOp::Acquire { + if atomic != AtomicFenceOrd::Acquire { // Either Release | AcqRel | SeqCst clocks.apply_release_fence(); } - if atomic == AtomicFenceOp::SeqCst { + if atomic == AtomicFenceOrd::SeqCst { data_race.last_sc_fence.borrow_mut().set_at_index(&clocks.clock, index); clocks.fence_seqcst.join(&data_race.last_sc_fence.borrow()); clocks.write_seqcst.join(&data_race.last_sc_write.borrow()); } // Increment timestamp in case of release semantics. - Ok(atomic != AtomicFenceOp::Acquire) + Ok(atomic != AtomicFenceOrd::Acquire) }) } else { Ok(()) diff --git a/src/concurrency/weak_memory.rs b/src/concurrency/weak_memory.rs index e5f58ee5dd..28a54c2e3b 100644 --- a/src/concurrency/weak_memory.rs +++ b/src/concurrency/weak_memory.rs @@ -82,7 +82,7 @@ use rustc_const_eval::interpret::{ }; use rustc_data_structures::fx::FxHashMap; -use crate::{AtomicReadOp, AtomicRwOp, AtomicWriteOp, Tag, VClock, VTimestamp, VectorIdx}; +use crate::{AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, Tag, VClock, VTimestamp, VectorIdx}; use super::{ data_race::{GlobalState, ThreadClockSet}, @@ -443,7 +443,7 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>: &mut self, new_val: ScalarMaybeUninit, place: &MPlaceTy<'tcx, Tag>, - atomic: AtomicRwOp, + atomic: AtomicRwOrd, init: ScalarMaybeUninit, ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); @@ -453,14 +453,14 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>: crate::Evaluator { data_race: Some(global), .. }, ) = this.get_alloc_extra_mut(alloc_id)? { - if atomic == AtomicRwOp::SeqCst { + if atomic == AtomicRwOrd::SeqCst { global.sc_read(); global.sc_write(); } let range = alloc_range(base_offset, place.layout.size); let buffer = alloc_buffers.get_or_create_store_buffer_mut(range, init)?; buffer.read_from_last_store(global); - buffer.buffered_write(new_val, global, atomic == AtomicRwOp::SeqCst)?; + buffer.buffered_write(new_val, global, atomic == AtomicRwOrd::SeqCst)?; } Ok(()) } @@ -468,7 +468,7 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>: fn buffered_atomic_read( &self, place: &MPlaceTy<'tcx, Tag>, - atomic: AtomicReadOp, + atomic: AtomicReadOrd, latest_in_mo: ScalarMaybeUninit, validate: impl FnOnce() -> InterpResult<'tcx>, ) -> InterpResult<'tcx, ScalarMaybeUninit> { @@ -476,7 +476,7 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>: if let Some(global) = &this.machine.data_race { let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?; if let Some(alloc_buffers) = this.get_alloc_extra(alloc_id)?.weak_memory.as_ref() { - if atomic == AtomicReadOp::SeqCst { + if atomic == AtomicReadOrd::SeqCst { global.sc_read(); } let mut rng = this.machine.rng.borrow_mut(); @@ -486,7 +486,7 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>: )?; let loaded = buffer.buffered_read( global, - atomic == AtomicReadOp::SeqCst, + atomic == AtomicReadOrd::SeqCst, &mut *rng, validate, )?; @@ -504,7 +504,7 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>: &mut self, val: ScalarMaybeUninit, dest: &MPlaceTy<'tcx, Tag>, - atomic: AtomicWriteOp, + atomic: AtomicWriteOrd, init: ScalarMaybeUninit, ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); @@ -514,7 +514,7 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>: crate::Evaluator { data_race: Some(global), .. }, ) = this.get_alloc_extra_mut(alloc_id)? { - if atomic == AtomicWriteOp::SeqCst { + if atomic == AtomicWriteOrd::SeqCst { global.sc_write(); } @@ -535,7 +535,7 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>: buffer.buffer.pop_front(); } - buffer.buffered_write(val, global, atomic == AtomicWriteOp::SeqCst)?; + buffer.buffered_write(val, global, atomic == AtomicWriteOrd::SeqCst)?; } // Caller should've written to dest with the vanilla scalar write, we do nothing here @@ -548,13 +548,13 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>: fn perform_read_on_buffered_latest( &self, place: &MPlaceTy<'tcx, Tag>, - atomic: AtomicReadOp, + atomic: AtomicReadOrd, init: ScalarMaybeUninit, ) -> InterpResult<'tcx> { let this = self.eval_context_ref(); if let Some(global) = &this.machine.data_race { - if atomic == AtomicReadOp::SeqCst { + if atomic == AtomicReadOrd::SeqCst { global.sc_read(); } let size = place.layout.size; diff --git a/src/lib.rs b/src/lib.rs index e199fae31e..b3d408a6dc 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -70,7 +70,7 @@ pub use crate::shims::tls::{EvalContextExt as _, TlsData}; pub use crate::shims::EvalContextExt as _; pub use crate::concurrency::data_race::{ - AtomicFenceOp, AtomicReadOp, AtomicRwOp, AtomicWriteOp, + AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, EvalContextExt as DataRaceEvalContextExt, }; pub use crate::diagnostics::{ diff --git a/src/shims/intrinsics.rs b/src/shims/intrinsics.rs index 5926832011..9705f56cd1 100644 --- a/src/shims/intrinsics.rs +++ b/src/shims/intrinsics.rs @@ -864,216 +864,216 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } // Atomic operations - "atomic_load_seqcst" => this.atomic_load(args, dest, AtomicReadOp::SeqCst)?, - "atomic_load_relaxed" => this.atomic_load(args, dest, AtomicReadOp::Relaxed)?, - "atomic_load_acquire" => this.atomic_load(args, dest, AtomicReadOp::Acquire)?, + "atomic_load_seqcst" => this.atomic_load(args, dest, AtomicReadOrd::SeqCst)?, + "atomic_load_relaxed" => this.atomic_load(args, dest, AtomicReadOrd::Relaxed)?, + "atomic_load_acquire" => this.atomic_load(args, dest, AtomicReadOrd::Acquire)?, - "atomic_store_seqcst" => this.atomic_store(args, AtomicWriteOp::SeqCst)?, - "atomic_store_relaxed" => this.atomic_store(args, AtomicWriteOp::Relaxed)?, - "atomic_store_release" => this.atomic_store(args, AtomicWriteOp::Release)?, + "atomic_store_seqcst" => this.atomic_store(args, AtomicWriteOrd::SeqCst)?, + "atomic_store_relaxed" => this.atomic_store(args, AtomicWriteOrd::Relaxed)?, + "atomic_store_release" => this.atomic_store(args, AtomicWriteOrd::Release)?, - "atomic_fence_acquire" => this.atomic_fence(args, AtomicFenceOp::Acquire)?, - "atomic_fence_release" => this.atomic_fence(args, AtomicFenceOp::Release)?, - "atomic_fence_acqrel" => this.atomic_fence(args, AtomicFenceOp::AcqRel)?, - "atomic_fence_seqcst" => this.atomic_fence(args, AtomicFenceOp::SeqCst)?, + "atomic_fence_acquire" => this.atomic_fence(args, AtomicFenceOrd::Acquire)?, + "atomic_fence_release" => this.atomic_fence(args, AtomicFenceOrd::Release)?, + "atomic_fence_acqrel" => this.atomic_fence(args, AtomicFenceOrd::AcqRel)?, + "atomic_fence_seqcst" => this.atomic_fence(args, AtomicFenceOrd::SeqCst)?, "atomic_singlethreadfence_acquire" => - this.compiler_fence(args, AtomicFenceOp::Acquire)?, + this.compiler_fence(args, AtomicFenceOrd::Acquire)?, "atomic_singlethreadfence_release" => - this.compiler_fence(args, AtomicFenceOp::Release)?, + this.compiler_fence(args, AtomicFenceOrd::Release)?, "atomic_singlethreadfence_acqrel" => - this.compiler_fence(args, AtomicFenceOp::AcqRel)?, + this.compiler_fence(args, AtomicFenceOrd::AcqRel)?, "atomic_singlethreadfence_seqcst" => - this.compiler_fence(args, AtomicFenceOp::SeqCst)?, + this.compiler_fence(args, AtomicFenceOrd::SeqCst)?, - "atomic_xchg_seqcst" => this.atomic_exchange(args, dest, AtomicRwOp::SeqCst)?, - "atomic_xchg_acquire" => this.atomic_exchange(args, dest, AtomicRwOp::Acquire)?, - "atomic_xchg_release" => this.atomic_exchange(args, dest, AtomicRwOp::Release)?, - "atomic_xchg_acqrel" => this.atomic_exchange(args, dest, AtomicRwOp::AcqRel)?, - "atomic_xchg_relaxed" => this.atomic_exchange(args, dest, AtomicRwOp::Relaxed)?, + "atomic_xchg_seqcst" => this.atomic_exchange(args, dest, AtomicRwOrd::SeqCst)?, + "atomic_xchg_acquire" => this.atomic_exchange(args, dest, AtomicRwOrd::Acquire)?, + "atomic_xchg_release" => this.atomic_exchange(args, dest, AtomicRwOrd::Release)?, + "atomic_xchg_acqrel" => this.atomic_exchange(args, dest, AtomicRwOrd::AcqRel)?, + "atomic_xchg_relaxed" => this.atomic_exchange(args, dest, AtomicRwOrd::Relaxed)?, #[rustfmt::skip] "atomic_cxchg_seqcst_seqcst" => - this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst)?, + this.atomic_compare_exchange(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::SeqCst)?, #[rustfmt::skip] "atomic_cxchg_acquire_acquire" => - this.atomic_compare_exchange(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire)?, + this.atomic_compare_exchange(args, dest, AtomicRwOrd::Acquire, AtomicReadOrd::Acquire)?, #[rustfmt::skip] "atomic_cxchg_release_relaxed" => - this.atomic_compare_exchange(args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed)?, + this.atomic_compare_exchange(args, dest, AtomicRwOrd::Release, AtomicReadOrd::Relaxed)?, #[rustfmt::skip] "atomic_cxchg_acqrel_acquire" => - this.atomic_compare_exchange(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire)?, + this.atomic_compare_exchange(args, dest, AtomicRwOrd::AcqRel, AtomicReadOrd::Acquire)?, #[rustfmt::skip] "atomic_cxchg_relaxed_relaxed" => - this.atomic_compare_exchange(args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed)?, + this.atomic_compare_exchange(args, dest, AtomicRwOrd::Relaxed, AtomicReadOrd::Relaxed)?, #[rustfmt::skip] "atomic_cxchg_acquire_relaxed" => - this.atomic_compare_exchange(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed)?, + this.atomic_compare_exchange(args, dest, AtomicRwOrd::Acquire, AtomicReadOrd::Relaxed)?, #[rustfmt::skip] "atomic_cxchg_acqrel_relaxed" => - this.atomic_compare_exchange(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed)?, + this.atomic_compare_exchange(args, dest, AtomicRwOrd::AcqRel, AtomicReadOrd::Relaxed)?, #[rustfmt::skip] "atomic_cxchg_seqcst_relaxed" => - this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed)?, + this.atomic_compare_exchange(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::Relaxed)?, #[rustfmt::skip] "atomic_cxchg_seqcst_acquire" => - this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire)?, + this.atomic_compare_exchange(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::Acquire)?, #[rustfmt::skip] "atomic_cxchgweak_seqcst_seqcst" => - this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst)?, + this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::SeqCst)?, #[rustfmt::skip] "atomic_cxchgweak_acquire_acquire" => - this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire)?, + this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::Acquire, AtomicReadOrd::Acquire)?, #[rustfmt::skip] "atomic_cxchgweak_release_relaxed" => - this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed)?, + this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::Release, AtomicReadOrd::Relaxed)?, #[rustfmt::skip] "atomic_cxchgweak_acqrel_acquire" => - this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire)?, + this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::AcqRel, AtomicReadOrd::Acquire)?, #[rustfmt::skip] "atomic_cxchgweak_relaxed_relaxed" => - this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed)?, + this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::Relaxed, AtomicReadOrd::Relaxed)?, #[rustfmt::skip] "atomic_cxchgweak_acquire_relaxed" => - this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed)?, + this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::Acquire, AtomicReadOrd::Relaxed)?, #[rustfmt::skip] "atomic_cxchgweak_acqrel_relaxed" => - this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed)?, + this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::AcqRel, AtomicReadOrd::Relaxed)?, #[rustfmt::skip] "atomic_cxchgweak_seqcst_relaxed" => - this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed)?, + this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::Relaxed)?, #[rustfmt::skip] "atomic_cxchgweak_seqcst_acquire" => - this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire)?, + this.atomic_compare_exchange_weak(args, dest, AtomicRwOrd::SeqCst, AtomicReadOrd::Acquire)?, #[rustfmt::skip] "atomic_or_seqcst" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::SeqCst)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::SeqCst)?, #[rustfmt::skip] "atomic_or_acquire" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Acquire)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::Acquire)?, #[rustfmt::skip] "atomic_or_release" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Release)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::Release)?, #[rustfmt::skip] "atomic_or_acqrel" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::AcqRel)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::AcqRel)?, #[rustfmt::skip] "atomic_or_relaxed" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Relaxed)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOrd::Relaxed)?, #[rustfmt::skip] "atomic_xor_seqcst" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::SeqCst)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::SeqCst)?, #[rustfmt::skip] "atomic_xor_acquire" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Acquire)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::Acquire)?, #[rustfmt::skip] "atomic_xor_release" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Release)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::Release)?, #[rustfmt::skip] "atomic_xor_acqrel" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::AcqRel)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::AcqRel)?, #[rustfmt::skip] "atomic_xor_relaxed" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Relaxed)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOrd::Relaxed)?, #[rustfmt::skip] "atomic_and_seqcst" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::SeqCst)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::SeqCst)?, #[rustfmt::skip] "atomic_and_acquire" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Acquire)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::Acquire)?, #[rustfmt::skip] "atomic_and_release" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Release)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::Release)?, #[rustfmt::skip] "atomic_and_acqrel" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::AcqRel)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::AcqRel)?, #[rustfmt::skip] "atomic_and_relaxed" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Relaxed)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOrd::Relaxed)?, #[rustfmt::skip] "atomic_nand_seqcst" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::SeqCst)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::SeqCst)?, #[rustfmt::skip] "atomic_nand_acquire" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Acquire)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::Acquire)?, #[rustfmt::skip] "atomic_nand_release" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Release)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::Release)?, #[rustfmt::skip] "atomic_nand_acqrel" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::AcqRel)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::AcqRel)?, #[rustfmt::skip] "atomic_nand_relaxed" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Relaxed)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOrd::Relaxed)?, #[rustfmt::skip] "atomic_xadd_seqcst" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::SeqCst)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::SeqCst)?, #[rustfmt::skip] "atomic_xadd_acquire" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Acquire)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::Acquire)?, #[rustfmt::skip] "atomic_xadd_release" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Release)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::Release)?, #[rustfmt::skip] "atomic_xadd_acqrel" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::AcqRel)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::AcqRel)?, #[rustfmt::skip] "atomic_xadd_relaxed" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Relaxed)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOrd::Relaxed)?, #[rustfmt::skip] "atomic_xsub_seqcst" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::SeqCst)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::SeqCst)?, #[rustfmt::skip] "atomic_xsub_acquire" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Acquire)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::Acquire)?, #[rustfmt::skip] "atomic_xsub_release" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Release)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::Release)?, #[rustfmt::skip] "atomic_xsub_acqrel" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::AcqRel)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::AcqRel)?, #[rustfmt::skip] "atomic_xsub_relaxed" => - this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Relaxed)?, - "atomic_min_seqcst" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::SeqCst)?, + this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::Relaxed)?, + "atomic_min_seqcst" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::SeqCst)?, "atomic_min_acquire" => - this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Acquire)?, + this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Acquire)?, "atomic_min_release" => - this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Release)?, - "atomic_min_acqrel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::AcqRel)?, + this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Release)?, + "atomic_min_acqrel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::AcqRel)?, "atomic_min_relaxed" => - this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Relaxed)?, - "atomic_max_seqcst" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::SeqCst)?, + this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Relaxed)?, + "atomic_max_seqcst" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::SeqCst)?, "atomic_max_acquire" => - this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Acquire)?, + this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Acquire)?, "atomic_max_release" => - this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Release)?, - "atomic_max_acqrel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::AcqRel)?, + this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Release)?, + "atomic_max_acqrel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::AcqRel)?, "atomic_max_relaxed" => - this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Relaxed)?, + this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Relaxed)?, "atomic_umin_seqcst" => - this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::SeqCst)?, + this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::SeqCst)?, "atomic_umin_acquire" => - this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Acquire)?, + this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Acquire)?, "atomic_umin_release" => - this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Release)?, + this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Release)?, "atomic_umin_acqrel" => - this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::AcqRel)?, + this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::AcqRel)?, "atomic_umin_relaxed" => - this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Relaxed)?, + this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Relaxed)?, "atomic_umax_seqcst" => - this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::SeqCst)?, + this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::SeqCst)?, "atomic_umax_acquire" => - this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Acquire)?, + this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Acquire)?, "atomic_umax_release" => - this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Release)?, + this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Release)?, "atomic_umax_acqrel" => - this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::AcqRel)?, + this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::AcqRel)?, "atomic_umax_relaxed" => - this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Relaxed)?, + this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Relaxed)?, // Other "exact_div" => { @@ -1101,7 +1101,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx &mut self, args: &[OpTy<'tcx, Tag>], dest: &PlaceTy<'tcx, Tag>, - atomic: AtomicReadOp, + atomic: AtomicReadOrd, ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); @@ -1129,7 +1129,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn atomic_store( &mut self, args: &[OpTy<'tcx, Tag>], - atomic: AtomicWriteOp, + atomic: AtomicWriteOrd, ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); @@ -1156,7 +1156,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn compiler_fence( &mut self, args: &[OpTy<'tcx, Tag>], - atomic: AtomicFenceOp, + atomic: AtomicFenceOrd, ) -> InterpResult<'tcx> { let [] = check_arg_count(args)?; let _ = atomic; @@ -1167,7 +1167,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn atomic_fence( &mut self, args: &[OpTy<'tcx, Tag>], - atomic: AtomicFenceOp, + atomic: AtomicFenceOrd, ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); let [] = check_arg_count(args)?; @@ -1180,7 +1180,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx args: &[OpTy<'tcx, Tag>], dest: &PlaceTy<'tcx, Tag>, atomic_op: AtomicOp, - atomic: AtomicRwOp, + atomic: AtomicRwOrd, ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); @@ -1226,7 +1226,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx &mut self, args: &[OpTy<'tcx, Tag>], dest: &PlaceTy<'tcx, Tag>, - atomic: AtomicRwOp, + atomic: AtomicRwOrd, ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); @@ -1254,8 +1254,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx &mut self, args: &[OpTy<'tcx, Tag>], dest: &PlaceTy<'tcx, Tag>, - success: AtomicRwOp, - fail: AtomicReadOp, + success: AtomicRwOrd, + fail: AtomicReadOrd, can_fail_spuriously: bool, ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); @@ -1294,8 +1294,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx &mut self, args: &[OpTy<'tcx, Tag>], dest: &PlaceTy<'tcx, Tag>, - success: AtomicRwOp, - fail: AtomicReadOp, + success: AtomicRwOrd, + fail: AtomicReadOrd, ) -> InterpResult<'tcx> { self.atomic_compare_exchange_impl(args, dest, success, fail, false) } @@ -1304,8 +1304,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx &mut self, args: &[OpTy<'tcx, Tag>], dest: &PlaceTy<'tcx, Tag>, - success: AtomicRwOp, - fail: AtomicReadOp, + success: AtomicRwOrd, + fail: AtomicReadOrd, ) -> InterpResult<'tcx> { self.atomic_compare_exchange_impl(args, dest, success, fail, true) } diff --git a/src/shims/unix/linux/sync.rs b/src/shims/unix/linux/sync.rs index 6be1e672f8..a0e35c730c 100644 --- a/src/shims/unix/linux/sync.rs +++ b/src/shims/unix/linux/sync.rs @@ -169,7 +169,7 @@ pub fn futex<'tcx>( // // Thankfully, preemptions cannot happen inside a Miri shim, so we do not need to // do anything special to guarantee fence-load-comparison atomicity. - this.atomic_fence(&[], AtomicFenceOp::SeqCst)?; + this.atomic_fence(&[], AtomicFenceOrd::SeqCst)?; // Read an `i32` through the pointer, regardless of any wrapper types. // It's not uncommon for `addr` to be passed as another type than `*mut i32`, such as `*const AtomicI32`. let futex_val = this @@ -177,7 +177,7 @@ pub fn futex<'tcx>( &addr.into(), 0, this.machine.layouts.i32, - AtomicReadOp::Relaxed, + AtomicReadOrd::Relaxed, )? .to_i32()?; if val == futex_val { @@ -240,7 +240,7 @@ pub fn futex<'tcx>( // Together with the SeqCst fence in futex_wait, this makes sure that futex_wait // will see the latest value on addr which could be changed by our caller // before doing the syscall. - this.atomic_fence(&[], AtomicFenceOp::SeqCst)?; + this.atomic_fence(&[], AtomicFenceOrd::SeqCst)?; let mut n = 0; for _ in 0..val { if let Some(thread) = this.futex_wake(addr_usize, bitset) { diff --git a/src/shims/unix/sync.rs b/src/shims/unix/sync.rs index 373996312e..ae63907c2c 100644 --- a/src/shims/unix/sync.rs +++ b/src/shims/unix/sync.rs @@ -68,7 +68,7 @@ fn mutex_get_kind<'mir, 'tcx: 'mir>( mutex_op, offset, ecx.machine.layouts.i32, - AtomicReadOp::Relaxed, + AtomicReadOrd::Relaxed, ) } @@ -83,7 +83,7 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>( offset, kind, layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.i32), - AtomicWriteOp::Relaxed, + AtomicWriteOrd::Relaxed, ) } @@ -91,7 +91,7 @@ fn mutex_get_id<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, mutex_op: &OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUninit> { - ecx.read_scalar_at_offset_atomic(mutex_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Relaxed) + ecx.read_scalar_at_offset_atomic(mutex_op, 4, ecx.machine.layouts.u32, AtomicReadOrd::Relaxed) } fn mutex_set_id<'mir, 'tcx: 'mir>( @@ -104,7 +104,7 @@ fn mutex_set_id<'mir, 'tcx: 'mir>( 4, id, layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32), - AtomicWriteOp::Relaxed, + AtomicWriteOrd::Relaxed, ) } @@ -120,8 +120,8 @@ fn mutex_get_or_create_id<'mir, 'tcx: 'mir>( &value_place, &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), next_id.to_u32_scalar().into(), - AtomicRwOp::Relaxed, - AtomicReadOp::Relaxed, + AtomicRwOrd::Relaxed, + AtomicReadOrd::Relaxed, false, )? .to_scalar_pair() @@ -147,7 +147,7 @@ fn rwlock_get_id<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, rwlock_op: &OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUninit> { - ecx.read_scalar_at_offset_atomic(rwlock_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Relaxed) + ecx.read_scalar_at_offset_atomic(rwlock_op, 4, ecx.machine.layouts.u32, AtomicReadOrd::Relaxed) } fn rwlock_set_id<'mir, 'tcx: 'mir>( @@ -160,7 +160,7 @@ fn rwlock_set_id<'mir, 'tcx: 'mir>( 4, id, layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32), - AtomicWriteOp::Relaxed, + AtomicWriteOrd::Relaxed, ) } @@ -176,8 +176,8 @@ fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>( &value_place, &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), next_id.to_u32_scalar().into(), - AtomicRwOp::Relaxed, - AtomicReadOp::Relaxed, + AtomicRwOrd::Relaxed, + AtomicReadOrd::Relaxed, false, )? .to_scalar_pair() @@ -231,7 +231,7 @@ fn cond_get_id<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, cond_op: &OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUninit> { - ecx.read_scalar_at_offset_atomic(cond_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Relaxed) + ecx.read_scalar_at_offset_atomic(cond_op, 4, ecx.machine.layouts.u32, AtomicReadOrd::Relaxed) } fn cond_set_id<'mir, 'tcx: 'mir>( @@ -244,7 +244,7 @@ fn cond_set_id<'mir, 'tcx: 'mir>( 4, id, layout_of_maybe_uninit(ecx.tcx, ecx.tcx.types.u32), - AtomicWriteOp::Relaxed, + AtomicWriteOrd::Relaxed, ) } @@ -260,8 +260,8 @@ fn cond_get_or_create_id<'mir, 'tcx: 'mir>( &value_place, &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), next_id.to_u32_scalar().into(), - AtomicRwOp::Relaxed, - AtomicReadOp::Relaxed, + AtomicRwOrd::Relaxed, + AtomicReadOrd::Relaxed, false, )? .to_scalar_pair() diff --git a/src/shims/windows/sync.rs b/src/shims/windows/sync.rs index 6a6b2269e6..35603f7f38 100644 --- a/src/shims/windows/sync.rs +++ b/src/shims/windows/sync.rs @@ -15,8 +15,8 @@ fn srwlock_get_or_create_id<'mir, 'tcx: 'mir>( &value_place, &ImmTy::from_uint(0u32, ecx.machine.layouts.u32), next_id.to_u32_scalar().into(), - AtomicRwOp::Relaxed, - AtomicReadOp::Relaxed, + AtomicRwOrd::Relaxed, + AtomicReadOrd::Relaxed, false, )? .to_scalar_pair() From dfdedae840a3703fc8fe4e7c958645f416087625 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Fri, 1 Jul 2022 17:07:29 -0400 Subject: [PATCH 2/3] avoid copying thread manager state in data race detector --- src/concurrency/data_race.rs | 224 +++++++++++++++------------------ src/concurrency/weak_memory.rs | 54 ++++---- src/machine.rs | 28 ++++- src/shims/intrinsics.rs | 12 +- src/thread.rs | 47 ++++--- 5 files changed, 186 insertions(+), 179 deletions(-) diff --git a/src/concurrency/data_race.rs b/src/concurrency/data_race.rs index ef0920d969..205b56ca4c 100644 --- a/src/concurrency/data_race.rs +++ b/src/concurrency/data_race.rs @@ -39,11 +39,6 @@ //! so some atomic operations that only perform acquires do not increment the timestamp. Due to shared //! code some atomic operations may increment the timestamp when not necessary but this has no effect //! on the data-race detection code. -//! -//! FIXME: -//! currently we have our own local copy of the currently active thread index and names, this is due -//! in part to the inability to access the current location of threads.active_thread inside the AllocExtra -//! read, write and deallocate functions and should be cleaned up in the future. use std::{ cell::{Cell, Ref, RefCell, RefMut}, @@ -767,7 +762,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { fn validate_atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> { let this = self.eval_context_mut(); if let Some(data_race) = &mut this.machine.data_race { - data_race.maybe_perform_sync_operation(|index, mut clocks| { + data_race.maybe_perform_sync_operation(&this.machine.threads, |index, mut clocks| { log::trace!("Atomic fence on {:?} with ordering {:?}", index, atomic); // Apply data-race detection for the current fences @@ -807,6 +802,7 @@ impl VClockAlloc { /// Create a new data-race detector for newly allocated memory. pub fn new_allocation( global: &GlobalState, + thread_mgr: &ThreadManager<'_, '_>, len: Size, kind: MemoryKind, ) -> VClockAlloc { @@ -816,7 +812,7 @@ impl VClockAlloc { MiriMemoryKind::Rust | MiriMemoryKind::C | MiriMemoryKind::WinHeap, ) | MemoryKind::Stack => { - let (alloc_index, clocks) = global.current_thread_state(); + let (alloc_index, clocks) = global.current_thread_state(thread_mgr); let alloc_timestamp = clocks.clock[alloc_index]; (alloc_timestamp, alloc_index) } @@ -878,12 +874,13 @@ impl VClockAlloc { #[inline(never)] fn report_data_race<'tcx>( global: &GlobalState, + thread_mgr: &ThreadManager<'_, '_>, range: &MemoryCellClocks, action: &str, is_atomic: bool, ptr_dbg: Pointer, ) -> InterpResult<'tcx> { - let (current_index, current_clocks) = global.current_thread_state(); + let (current_index, current_clocks) = global.current_thread_state(thread_mgr); let write_clock; let (other_action, other_thread, other_clock) = if range.write > current_clocks.clock[range.write_index] @@ -918,8 +915,8 @@ impl VClockAlloc { }; // Load elaborated thread information about the racing thread actions. - let current_thread_info = global.print_thread_metadata(current_index); - let other_thread_info = global.print_thread_metadata(other_thread); + let current_thread_info = global.print_thread_metadata(thread_mgr, current_index); + let other_thread_info = global.print_thread_metadata(thread_mgr, other_thread); // Throw the data-race detection. throw_ub_format!( @@ -936,9 +933,14 @@ impl VClockAlloc { /// Detect racing atomic read and writes (not data races) /// on every byte of the current access range - pub(super) fn race_free_with_atomic(&self, range: AllocRange, global: &GlobalState) -> bool { + pub(super) fn race_free_with_atomic( + &self, + range: AllocRange, + global: &GlobalState, + thread_mgr: &ThreadManager<'_, '_>, + ) -> bool { if global.race_detecting() { - let (_, clocks) = global.current_thread_state(); + let (_, clocks) = global.current_thread_state(thread_mgr); let alloc_ranges = self.alloc_ranges.borrow(); for (_, range) in alloc_ranges.iter(range.start, range.size) { if !range.race_free_with_atomic(&clocks) { @@ -959,15 +961,17 @@ impl VClockAlloc { alloc_id: AllocId, range: AllocRange, global: &GlobalState, + thread_mgr: &ThreadManager<'_, '_>, ) -> InterpResult<'tcx> { if global.race_detecting() { - let (index, clocks) = global.current_thread_state(); + let (index, clocks) = global.current_thread_state(thread_mgr); let mut alloc_ranges = self.alloc_ranges.borrow_mut(); for (offset, range) in alloc_ranges.iter_mut(range.start, range.size) { if let Err(DataRace) = range.read_race_detect(&clocks, index) { // Report data-race. return Self::report_data_race( global, + thread_mgr, range, "Read", false, @@ -988,14 +992,16 @@ impl VClockAlloc { range: AllocRange, write_type: WriteType, global: &mut GlobalState, + thread_mgr: &ThreadManager<'_, '_>, ) -> InterpResult<'tcx> { if global.race_detecting() { - let (index, clocks) = global.current_thread_state(); + let (index, clocks) = global.current_thread_state(thread_mgr); for (offset, range) in self.alloc_ranges.get_mut().iter_mut(range.start, range.size) { if let Err(DataRace) = range.write_race_detect(&clocks, index, write_type) { // Report data-race return Self::report_data_race( global, + thread_mgr, range, write_type.get_descriptor(), false, @@ -1018,8 +1024,9 @@ impl VClockAlloc { alloc_id: AllocId, range: AllocRange, global: &mut GlobalState, + thread_mgr: &ThreadManager<'_, '_>, ) -> InterpResult<'tcx> { - self.unique_access(alloc_id, range, WriteType::Write, global) + self.unique_access(alloc_id, range, WriteType::Write, global, thread_mgr) } /// Detect data-races for an unsynchronized deallocate operation, will not perform @@ -1031,8 +1038,9 @@ impl VClockAlloc { alloc_id: AllocId, range: AllocRange, global: &mut GlobalState, + thread_mgr: &ThreadManager<'_, '_>, ) -> InterpResult<'tcx> { - self.unique_access(alloc_id, range, WriteType::Deallocate, global) + self.unique_access(alloc_id, range, WriteType::Deallocate, global, thread_mgr) } } @@ -1068,26 +1076,30 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { ); // Perform the atomic operation. - data_race.maybe_perform_sync_operation(|index, mut clocks| { - for (offset, range) in - alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size) - { - if let Err(DataRace) = op(range, &mut clocks, index, atomic) { - mem::drop(clocks); - return VClockAlloc::report_data_race( - data_race, - range, - description, - true, - Pointer::new(alloc_id, offset), - ) - .map(|_| true); + data_race.maybe_perform_sync_operation( + &this.machine.threads, + |index, mut clocks| { + for (offset, range) in + alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size) + { + if let Err(DataRace) = op(range, &mut clocks, index, atomic) { + mem::drop(clocks); + return VClockAlloc::report_data_race( + data_race, + &this.machine.threads, + range, + description, + true, + Pointer::new(alloc_id, offset), + ) + .map(|_| true); + } } - } - // This conservatively assumes all operations have release semantics - Ok(true) - })?; + // This conservatively assumes all operations have release semantics + Ok(true) + }, + )?; // Log changes to atomic memory. if log::log_enabled!(log::Level::Trace) { @@ -1117,11 +1129,6 @@ struct ThreadExtraState { /// read during data-race reporting. vector_index: Option, - /// The name of the thread, updated for better - /// diagnostics when reporting detected data - /// races. - thread_name: Option>, - /// Thread termination vector clock, this /// is set on thread termination and is used /// for joining on threads since the vector_index @@ -1161,9 +1168,6 @@ pub struct GlobalState { /// The mapping of a given thread to associated thread metadata. thread_info: RefCell>, - /// The current vector index being executed. - current_index: Cell, - /// Potential vector indices that could be re-used on thread creation /// values are inserted here on after the thread has terminated and /// been joined with, and hence may potentially become free @@ -1173,12 +1177,6 @@ pub struct GlobalState { /// active vector-clocks catch up with the threads timestamp. reuse_candidates: RefCell>, - /// Counts the number of threads that are currently active - /// if the number of active threads reduces to 1 and then - /// a join operation occurs with the remaining main thread - /// then multi-threaded execution may be disabled. - active_thread_count: Cell, - /// This contains threads that have terminated, but not yet joined /// and so cannot become re-use candidates until a join operation /// occurs. @@ -1203,8 +1201,6 @@ impl GlobalState { vector_clocks: RefCell::new(IndexVec::new()), vector_info: RefCell::new(IndexVec::new()), thread_info: RefCell::new(IndexVec::new()), - current_index: Cell::new(VectorIdx::new(0)), - active_thread_count: Cell::new(1), reuse_candidates: RefCell::new(FxHashSet::default()), terminated_threads: RefCell::new(FxHashMap::default()), last_sc_fence: RefCell::new(VClock::default()), @@ -1216,11 +1212,10 @@ impl GlobalState { // the main-thread a name of "main". let index = global_state.vector_clocks.get_mut().push(ThreadClockSet::default()); global_state.vector_info.get_mut().push(ThreadId::new(0)); - global_state.thread_info.get_mut().push(ThreadExtraState { - vector_index: Some(index), - thread_name: Some("main".to_string().into_boxed_str()), - termination_vector_clock: None, - }); + global_state + .thread_info + .get_mut() + .push(ThreadExtraState { vector_index: Some(index), termination_vector_clock: None }); global_state } @@ -1274,14 +1269,10 @@ impl GlobalState { // Hook for thread creation, enabled multi-threaded execution and marks // the current thread timestamp as happening-before the current thread. #[inline] - pub fn thread_created(&mut self, thread: ThreadId) { - let current_index = self.current_index(); - - // Increment the number of active threads. - let active_threads = self.active_thread_count.get(); - self.active_thread_count.set(active_threads + 1); + pub fn thread_created(&mut self, thread_mgr: &ThreadManager<'_, '_>, thread: ThreadId) { + let current_index = self.current_index(thread_mgr); - // Enable multi-threaded execution, there are now two threads + // Enable multi-threaded execution, there are now at least two threads // so data-races are now possible. self.multi_threaded.set(true); @@ -1339,21 +1330,27 @@ impl GlobalState { created.increment_clock(created_index); } - /// Hook on a thread join to update the implicit happens-before relation - /// between the joined thread and the current thread. + /// Hook on a thread join to update the implicit happens-before relation between the joined + /// thread (the joinee, the thread that someone waited on) and the current thread (the joiner, + /// the thread who was waiting). #[inline] - pub fn thread_joined(&mut self, current_thread: ThreadId, join_thread: ThreadId) { + pub fn thread_joined( + &mut self, + thread_mgr: &ThreadManager<'_, '_>, + joiner: ThreadId, + joinee: ThreadId, + ) { let clocks_vec = self.vector_clocks.get_mut(); let thread_info = self.thread_info.get_mut(); // Load the vector clock of the current thread. - let current_index = thread_info[current_thread] + let current_index = thread_info[joiner] .vector_index .expect("Performed thread join on thread with no assigned vector"); let current = &mut clocks_vec[current_index]; // Load the associated vector clock for the terminated thread. - let join_clock = thread_info[join_thread] + let join_clock = thread_info[joinee] .termination_vector_clock .as_ref() .expect("Joined with thread but thread has not terminated"); @@ -1363,10 +1360,9 @@ impl GlobalState { // Is not a release operation so the clock is not incremented. current.clock.join(join_clock); - // Check the number of active threads, if the value is 1 + // Check the number of live threads, if the value is 1 // then test for potentially disabling multi-threaded execution. - let active_threads = self.active_thread_count.get(); - if active_threads == 1 { + if thread_mgr.get_live_thread_count() == 1 { // May potentially be able to disable multi-threaded execution. let current_clock = &clocks_vec[current_index]; if clocks_vec @@ -1383,7 +1379,7 @@ impl GlobalState { // If the thread is marked as terminated but not joined // then move the thread to the re-use set. let termination = self.terminated_threads.get_mut(); - if let Some(index) = termination.remove(&join_thread) { + if let Some(index) = termination.remove(&joinee) { let reuse = self.reuse_candidates.get_mut(); reuse.insert(index); } @@ -1397,8 +1393,8 @@ impl GlobalState { /// This should be called strictly before any calls to /// `thread_joined`. #[inline] - pub fn thread_terminated(&mut self) { - let current_index = self.current_index(); + pub fn thread_terminated(&mut self, thread_mgr: &ThreadManager<'_, '_>) { + let current_index = self.current_index(thread_mgr); // Increment the clock to a unique termination timestamp. let vector_clocks = self.vector_clocks.get_mut(); @@ -1420,35 +1416,6 @@ impl GlobalState { // occurs. let termination = self.terminated_threads.get_mut(); termination.insert(current_thread, current_index); - - // Reduce the number of active threads, now that a thread has - // terminated. - let mut active_threads = self.active_thread_count.get(); - active_threads -= 1; - self.active_thread_count.set(active_threads); - } - - /// Hook for updating the local tracker of the currently - /// enabled thread, should always be updated whenever - /// `active_thread` in thread.rs is updated. - #[inline] - pub fn thread_set_active(&self, thread: ThreadId) { - let thread_info = self.thread_info.borrow(); - let vector_idx = thread_info[thread] - .vector_index - .expect("Setting thread active with no assigned vector"); - self.current_index.set(vector_idx); - } - - /// Hook for updating the local tracker of the threads name - /// this should always mirror the local value in thread.rs - /// the thread name is used for improved diagnostics - /// during a data-race. - #[inline] - pub fn thread_set_name(&mut self, thread: ThreadId, name: String) { - let name = name.into_boxed_str(); - let thread_info = self.thread_info.get_mut(); - thread_info[thread].thread_name = Some(name); } /// Attempt to perform a synchronized operation, this @@ -1460,12 +1427,13 @@ impl GlobalState { /// operation may create. fn maybe_perform_sync_operation<'tcx>( &self, + thread_mgr: &ThreadManager<'_, '_>, op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx, bool>, ) -> InterpResult<'tcx> { if self.multi_threaded.get() { - let (index, clocks) = self.current_thread_state_mut(); + let (index, clocks) = self.current_thread_state_mut(thread_mgr); if op(index, clocks)? { - let (_, mut clocks) = self.current_thread_state_mut(); + let (_, mut clocks) = self.current_thread_state_mut(thread_mgr); clocks.increment_clock(index); } } @@ -1474,15 +1442,18 @@ impl GlobalState { /// Internal utility to identify a thread stored internally /// returns the id and the name for better diagnostics. - fn print_thread_metadata(&self, vector: VectorIdx) -> String { + fn print_thread_metadata( + &self, + thread_mgr: &ThreadManager<'_, '_>, + vector: VectorIdx, + ) -> String { let thread = self.vector_info.borrow()[vector]; - let thread_name = &self.thread_info.borrow()[thread].thread_name; - if let Some(name) = thread_name { - let name: &str = name; - format!("Thread(id = {:?}, name = {:?})", thread.to_u32(), name) - } else { - format!("Thread(id = {:?})", thread.to_u32()) - } + let thread_name = thread_mgr.get_thread_name(); + format!( + "Thread(id = {:?}, name = {:?})", + thread.to_u32(), + String::from_utf8_lossy(thread_name) + ) } /// Acquire a lock, express that the previous call of @@ -1534,8 +1505,11 @@ impl GlobalState { /// Load the current vector clock in use and the current set of thread clocks /// in use for the vector. #[inline] - pub(super) fn current_thread_state(&self) -> (VectorIdx, Ref<'_, ThreadClockSet>) { - let index = self.current_index(); + pub(super) fn current_thread_state( + &self, + thread_mgr: &ThreadManager<'_, '_>, + ) -> (VectorIdx, Ref<'_, ThreadClockSet>) { + let index = self.current_index(thread_mgr); let ref_vector = self.vector_clocks.borrow(); let clocks = Ref::map(ref_vector, |vec| &vec[index]); (index, clocks) @@ -1544,8 +1518,11 @@ impl GlobalState { /// Load the current vector clock in use and the current set of thread clocks /// in use for the vector mutably for modification. #[inline] - pub(super) fn current_thread_state_mut(&self) -> (VectorIdx, RefMut<'_, ThreadClockSet>) { - let index = self.current_index(); + pub(super) fn current_thread_state_mut( + &self, + thread_mgr: &ThreadManager<'_, '_>, + ) -> (VectorIdx, RefMut<'_, ThreadClockSet>) { + let index = self.current_index(thread_mgr); let ref_vector = self.vector_clocks.borrow_mut(); let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]); (index, clocks) @@ -1554,19 +1531,22 @@ impl GlobalState { /// Return the current thread, should be the same /// as the data-race active thread. #[inline] - fn current_index(&self) -> VectorIdx { - self.current_index.get() + fn current_index(&self, thread_mgr: &ThreadManager<'_, '_>) -> VectorIdx { + let active_thread_id = thread_mgr.get_active_thread_id(); + self.thread_info.borrow()[active_thread_id] + .vector_index + .expect("active thread has no assigned vector") } // SC ATOMIC STORE rule in the paper. - pub(super) fn sc_write(&self) { - let (index, clocks) = self.current_thread_state(); + pub(super) fn sc_write(&self, thread_mgr: &ThreadManager<'_, '_>) { + let (index, clocks) = self.current_thread_state(thread_mgr); self.last_sc_write.borrow_mut().set_at_index(&clocks.clock, index); } // SC ATOMIC READ rule in the paper. - pub(super) fn sc_read(&self) { - let (.., mut clocks) = self.current_thread_state_mut(); + pub(super) fn sc_read(&self, thread_mgr: &ThreadManager<'_, '_>) { + let (.., mut clocks) = self.current_thread_state_mut(thread_mgr); clocks.read_seqcst.join(&self.last_sc_fence.borrow()); } } diff --git a/src/concurrency/weak_memory.rs b/src/concurrency/weak_memory.rs index 28a54c2e3b..e7ed9ea09a 100644 --- a/src/concurrency/weak_memory.rs +++ b/src/concurrency/weak_memory.rs @@ -82,10 +82,12 @@ use rustc_const_eval::interpret::{ }; use rustc_data_structures::fx::FxHashMap; -use crate::{AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, Tag, VClock, VTimestamp, VectorIdx}; +use crate::{ + AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, Tag, ThreadManager, VClock, VTimestamp, VectorIdx, +}; use super::{ - data_race::{GlobalState, ThreadClockSet}, + data_race::{GlobalState as DataRaceState, ThreadClockSet}, range_object_map::{AccessType, RangeObjectMap}, }; @@ -149,7 +151,7 @@ impl StoreBufferAlloc { /// before without data race, we can determine that the non-atomic access fully happens /// after all the prior atomic accesses so the location no longer needs to exhibit /// any weak memory behaviours until further atomic accesses. - pub fn memory_accessed(&self, range: AllocRange, global: &GlobalState) { + pub fn memory_accessed(&self, range: AllocRange, global: &DataRaceState) { if !global.ongoing_action_data_race_free() { let mut buffers = self.store_buffers.borrow_mut(); let access_type = buffers.access_type(range); @@ -236,17 +238,18 @@ impl<'mir, 'tcx: 'mir> StoreBuffer { } /// Reads from the last store in modification order - fn read_from_last_store(&self, global: &GlobalState) { + fn read_from_last_store(&self, global: &DataRaceState, thread_mgr: &ThreadManager<'_, '_>) { let store_elem = self.buffer.back(); if let Some(store_elem) = store_elem { - let (index, clocks) = global.current_thread_state(); + let (index, clocks) = global.current_thread_state(thread_mgr); store_elem.load_impl(index, &clocks); } } fn buffered_read( &self, - global: &GlobalState, + global: &DataRaceState, + thread_mgr: &ThreadManager<'_, '_>, is_seqcst: bool, rng: &mut (impl rand::Rng + ?Sized), validate: impl FnOnce() -> InterpResult<'tcx>, @@ -257,7 +260,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer { let store_elem = { // The `clocks` we got here must be dropped before calling validate_atomic_load // as the race detector will update it - let (.., clocks) = global.current_thread_state(); + let (.., clocks) = global.current_thread_state(thread_mgr); // Load from a valid entry in the store buffer self.fetch_store(is_seqcst, &clocks, &mut *rng) }; @@ -268,7 +271,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer { // requires access to ThreadClockSet.clock, which is updated by the race detector validate()?; - let (index, clocks) = global.current_thread_state(); + let (index, clocks) = global.current_thread_state(thread_mgr); let loaded = store_elem.load_impl(index, &clocks); Ok(loaded) } @@ -276,10 +279,11 @@ impl<'mir, 'tcx: 'mir> StoreBuffer { fn buffered_write( &mut self, val: ScalarMaybeUninit, - global: &GlobalState, + global: &DataRaceState, + thread_mgr: &ThreadManager<'_, '_>, is_seqcst: bool, ) -> InterpResult<'tcx> { - let (index, clocks) = global.current_thread_state(); + let (index, clocks) = global.current_thread_state(thread_mgr); self.store_impl(val, index, &clocks.clock, is_seqcst); Ok(()) @@ -428,8 +432,11 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>: { let range = alloc_range(base_offset, place.layout.size); if alloc_buffers.is_overlapping(range) - && !alloc_clocks - .race_free_with_atomic(range, this.machine.data_race.as_ref().unwrap()) + && !alloc_clocks.race_free_with_atomic( + range, + this.machine.data_race.as_ref().unwrap(), + &this.machine.threads, + ) { throw_unsup_format!( "racy imperfectly overlapping atomic access is not possible in the C++20 memory model, and not supported by Miri's weak memory emulation" @@ -450,17 +457,17 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>: let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?; if let ( crate::AllocExtra { weak_memory: Some(alloc_buffers), .. }, - crate::Evaluator { data_race: Some(global), .. }, + crate::Evaluator { data_race: Some(global), threads, .. }, ) = this.get_alloc_extra_mut(alloc_id)? { if atomic == AtomicRwOrd::SeqCst { - global.sc_read(); - global.sc_write(); + global.sc_read(threads); + global.sc_write(threads); } let range = alloc_range(base_offset, place.layout.size); let buffer = alloc_buffers.get_or_create_store_buffer_mut(range, init)?; - buffer.read_from_last_store(global); - buffer.buffered_write(new_val, global, atomic == AtomicRwOrd::SeqCst)?; + buffer.read_from_last_store(global, threads); + buffer.buffered_write(new_val, global, threads, atomic == AtomicRwOrd::SeqCst)?; } Ok(()) } @@ -477,7 +484,7 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>: let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?; if let Some(alloc_buffers) = this.get_alloc_extra(alloc_id)?.weak_memory.as_ref() { if atomic == AtomicReadOrd::SeqCst { - global.sc_read(); + global.sc_read(&this.machine.threads); } let mut rng = this.machine.rng.borrow_mut(); let buffer = alloc_buffers.get_or_create_store_buffer( @@ -486,6 +493,7 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>: )?; let loaded = buffer.buffered_read( global, + &this.machine.threads, atomic == AtomicReadOrd::SeqCst, &mut *rng, validate, @@ -511,11 +519,11 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>: let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(dest.ptr)?; if let ( crate::AllocExtra { weak_memory: Some(alloc_buffers), .. }, - crate::Evaluator { data_race: Some(global), .. }, + crate::Evaluator { data_race: Some(global), threads, .. }, ) = this.get_alloc_extra_mut(alloc_id)? { if atomic == AtomicWriteOrd::SeqCst { - global.sc_write(); + global.sc_write(threads); } // UGLY HACK: in write_scalar_atomic() we don't know the value before our write, @@ -535,7 +543,7 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>: buffer.buffer.pop_front(); } - buffer.buffered_write(val, global, atomic == AtomicWriteOrd::SeqCst)?; + buffer.buffered_write(val, global, threads, atomic == AtomicWriteOrd::SeqCst)?; } // Caller should've written to dest with the vanilla scalar write, we do nothing here @@ -555,14 +563,14 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>: if let Some(global) = &this.machine.data_race { if atomic == AtomicReadOrd::SeqCst { - global.sc_read(); + global.sc_read(&this.machine.threads); } let size = place.layout.size; let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?; if let Some(alloc_buffers) = this.get_alloc_extra(alloc_id)?.weak_memory.as_ref() { let buffer = alloc_buffers .get_or_create_store_buffer(alloc_range(base_offset, size), init)?; - buffer.read_from_last_store(global); + buffer.read_from_last_store(global, &this.machine.threads); } } Ok(()) diff --git a/src/machine.rs b/src/machine.rs index abc55cde73..86b174182c 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -647,7 +647,12 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { None }; let race_alloc = if let Some(data_race) = &ecx.machine.data_race { - Some(data_race::AllocExtra::new_allocation(data_race, alloc.size(), kind)) + Some(data_race::AllocExtra::new_allocation( + data_race, + &ecx.machine.threads, + alloc.size(), + kind, + )) } else { None }; @@ -756,7 +761,12 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { range: AllocRange, ) -> InterpResult<'tcx> { if let Some(data_race) = &alloc_extra.data_race { - data_race.read(alloc_id, range, machine.data_race.as_ref().unwrap())?; + data_race.read( + alloc_id, + range, + machine.data_race.as_ref().unwrap(), + &machine.threads, + )?; } if let Some(stacked_borrows) = &alloc_extra.stacked_borrows { stacked_borrows.borrow_mut().memory_read( @@ -782,7 +792,12 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { range: AllocRange, ) -> InterpResult<'tcx> { if let Some(data_race) = &mut alloc_extra.data_race { - data_race.write(alloc_id, range, machine.data_race.as_mut().unwrap())?; + data_race.write( + alloc_id, + range, + machine.data_race.as_mut().unwrap(), + &machine.threads, + )?; } if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows { stacked_borrows.get_mut().memory_written( @@ -811,7 +826,12 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> { register_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id)); } if let Some(data_race) = &mut alloc_extra.data_race { - data_race.deallocate(alloc_id, range, machine.data_race.as_mut().unwrap())?; + data_race.deallocate( + alloc_id, + range, + machine.data_race.as_mut().unwrap(), + &machine.threads, + )?; } if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows { stacked_borrows.get_mut().memory_deallocated( diff --git a/src/shims/intrinsics.rs b/src/shims/intrinsics.rs index 9705f56cd1..d8f6292e9d 100644 --- a/src/shims/intrinsics.rs +++ b/src/shims/intrinsics.rs @@ -1038,20 +1038,24 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx #[rustfmt::skip] "atomic_xsub_relaxed" => this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOrd::Relaxed)?, - "atomic_min_seqcst" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::SeqCst)?, + "atomic_min_seqcst" => + this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::SeqCst)?, "atomic_min_acquire" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Acquire)?, "atomic_min_release" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Release)?, - "atomic_min_acqrel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::AcqRel)?, + "atomic_min_acqrel" => + this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::AcqRel)?, "atomic_min_relaxed" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOrd::Relaxed)?, - "atomic_max_seqcst" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::SeqCst)?, + "atomic_max_seqcst" => + this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::SeqCst)?, "atomic_max_acquire" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Acquire)?, "atomic_max_release" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Release)?, - "atomic_max_acqrel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::AcqRel)?, + "atomic_max_acqrel" => + this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::AcqRel)?, "atomic_max_relaxed" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOrd::Relaxed)?, "atomic_umin_seqcst" => diff --git a/src/thread.rs b/src/thread.rs index 2135806de3..7327f2b811 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -289,15 +289,21 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } /// Get the id of the currently active thread. - fn get_active_thread_id(&self) -> ThreadId { + pub fn get_active_thread_id(&self) -> ThreadId { self.active_thread } /// Get the total number of threads that were ever spawn by this program. - fn get_total_thread_count(&self) -> usize { + pub fn get_total_thread_count(&self) -> usize { self.threads.len() } + /// Get the total of threads that are currently live, i.e., not yet terminated. + /// (They might be blocked.) + pub fn get_live_thread_count(&self) -> usize { + self.threads.iter().filter(|t| !matches!(t.state, ThreadState::Terminated)).count() + } + /// Has the given thread terminated? fn has_terminated(&self, thread_id: ThreadId) -> bool { self.threads[thread_id].state == ThreadState::Terminated @@ -366,7 +372,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } else { // The thread has already terminated - mark join happens-before if let Some(data_race) = data_race { - data_race.thread_joined(self.active_thread, joined_thread_id); + data_race.thread_joined(self, self.active_thread, joined_thread_id); } } Ok(()) @@ -378,7 +384,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } /// Get the name of the active thread. - fn get_thread_name(&self) -> &[u8] { + pub fn get_thread_name(&self) -> &[u8] { self.active_thread_ref().thread_name() } @@ -460,21 +466,25 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { false }); } - // Set the thread into a terminated state in the data-race detector + // Set the thread into a terminated state in the data-race detector. if let Some(ref mut data_race) = data_race { - data_race.thread_terminated(); + data_race.thread_terminated(self); } // Check if we need to unblock any threads. + let mut joined_threads = vec![]; // store which threads joined, we'll need it for (i, thread) in self.threads.iter_enumerated_mut() { if thread.state == ThreadState::BlockedOnJoin(self.active_thread) { // The thread has terminated, mark happens-before edge to joining thread - if let Some(ref mut data_race) = data_race { - data_race.thread_joined(i, self.active_thread); + if let Some(_) = data_race { + joined_threads.push(i); } trace!("unblocking {:?} because {:?} terminated", i, self.active_thread); thread.state = ThreadState::Enabled; } } + for &i in &joined_threads { + data_race.as_mut().unwrap().thread_joined(self, i, self.active_thread); + } free_tls_statics } @@ -484,10 +494,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { /// used in stateless model checkers such as Loom: run the active thread as /// long as we can and switch only when we have to (the active thread was /// blocked, terminated, or has explicitly asked to be preempted). - fn schedule( - &mut self, - data_race: &Option, - ) -> InterpResult<'tcx, SchedulingAction> { + fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> { // Check whether the thread has **just** terminated (`check_terminated` // checks whether the thread has popped all its stack and if yes, sets // the thread state to terminated). @@ -535,9 +542,6 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { debug_assert_ne!(self.active_thread, id); if thread.state == ThreadState::Enabled { self.active_thread = id; - if let Some(data_race) = data_race { - data_race.thread_set_active(self.active_thread); - } break; } } @@ -598,7 +602,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); let id = this.machine.threads.create_thread(); if let Some(data_race) = &mut this.machine.data_race { - data_race.thread_created(id); + data_race.thread_created(&this.machine.threads, id); } id } @@ -619,9 +623,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx #[inline] fn set_active_thread(&mut self, thread_id: ThreadId) -> ThreadId { let this = self.eval_context_mut(); - if let Some(data_race) = &this.machine.data_race { - data_race.thread_set_active(thread_id); - } this.machine.threads.set_active_thread_id(thread_id) } @@ -682,11 +683,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx #[inline] fn set_active_thread_name(&mut self, new_thread_name: Vec) { let this = self.eval_context_mut(); - if let Some(data_race) = &mut this.machine.data_race { - if let Ok(string) = String::from_utf8(new_thread_name.clone()) { - data_race.thread_set_name(this.machine.threads.active_thread, string); - } - } this.machine.threads.set_thread_name(new_thread_name); } @@ -776,8 +772,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx #[inline] fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> { let this = self.eval_context_mut(); - let data_race = &this.machine.data_race; - this.machine.threads.schedule(data_race) + this.machine.threads.schedule() } /// Handles thread termination of the active thread: wakes up threads joining on this one, From d09db1660b0c7baca06029369bc48b8d9c604f0a Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Fri, 1 Jul 2022 17:33:17 -0400 Subject: [PATCH 3/3] fix and slightly improve data race reports --- src/concurrency/data_race.rs | 17 ++++--------- src/concurrency/weak_memory.rs | 2 +- src/thread.rs | 25 ++++++++++++++----- tests/compiletest.rs | 2 -- tests/fail/data_race/alloc_read_race.rs | 2 +- tests/fail/data_race/alloc_read_race.stderr | 4 +-- tests/fail/data_race/alloc_write_race.rs | 2 +- tests/fail/data_race/alloc_write_race.stderr | 4 +-- .../data_race/atomic_read_na_write_race1.rs | 2 +- .../atomic_read_na_write_race1.stderr | 4 +-- .../data_race/atomic_read_na_write_race2.rs | 2 +- .../atomic_read_na_write_race2.stderr | 4 +-- .../data_race/atomic_write_na_read_race1.rs | 2 +- .../atomic_write_na_read_race1.stderr | 4 +-- .../data_race/atomic_write_na_read_race2.rs | 2 +- .../atomic_write_na_read_race2.stderr | 4 +-- .../data_race/atomic_write_na_write_race1.rs | 2 +- .../atomic_write_na_write_race1.stderr | 4 +-- .../data_race/atomic_write_na_write_race2.rs | 2 +- .../atomic_write_na_write_race2.stderr | 4 +-- .../data_race/dangling_thread_async_race.rs | 2 +- .../dangling_thread_async_race.stderr | 4 +-- tests/fail/data_race/dangling_thread_race.rs | 2 +- .../data_race/dangling_thread_race.stderr | 4 +-- tests/fail/data_race/dealloc_read_race1.rs | 2 +- .../fail/data_race/dealloc_read_race1.stderr | 4 +-- tests/fail/data_race/dealloc_read_race2.rs | 2 +- .../fail/data_race/dealloc_read_race_stack.rs | 2 +- .../data_race/dealloc_read_race_stack.stderr | 4 +-- tests/fail/data_race/dealloc_write_race1.rs | 2 +- .../fail/data_race/dealloc_write_race1.stderr | 4 +-- tests/fail/data_race/dealloc_write_race2.rs | 2 +- .../data_race/dealloc_write_race_stack.rs | 2 +- .../data_race/dealloc_write_race_stack.stderr | 4 +-- .../data_race/enable_after_join_to_main.rs | 2 +- .../enable_after_join_to_main.stderr | 4 +-- tests/fail/data_race/fence_after_load.rs | 2 +- tests/fail/data_race/fence_after_load.stderr | 4 +-- tests/fail/data_race/read_write_race.rs | 2 +- tests/fail/data_race/read_write_race.stderr | 4 +-- tests/fail/data_race/read_write_race_stack.rs | 2 +- .../data_race/read_write_race_stack.stderr | 4 +-- tests/fail/data_race/relax_acquire_race.rs | 2 +- .../fail/data_race/relax_acquire_race.stderr | 4 +-- tests/fail/data_race/release_seq_race.rs | 2 +- tests/fail/data_race/release_seq_race.stderr | 4 +-- .../data_race/release_seq_race_same_thread.rs | 2 +- .../release_seq_race_same_thread.stderr | 4 +-- tests/fail/data_race/rmw_race.rs | 2 +- tests/fail/data_race/rmw_race.stderr | 4 +-- tests/fail/data_race/write_write_race.rs | 2 +- tests/fail/data_race/write_write_race.stderr | 4 +-- .../fail/data_race/write_write_race_stack.rs | 2 +- .../data_race/write_write_race_stack.stderr | 4 +-- tests/pass/libc.rs | 3 ++- 55 files changed, 101 insertions(+), 96 deletions(-) diff --git a/src/concurrency/data_race.rs b/src/concurrency/data_race.rs index 205b56ca4c..4b402b51fc 100644 --- a/src/concurrency/data_race.rs +++ b/src/concurrency/data_race.rs @@ -882,7 +882,7 @@ impl VClockAlloc { ) -> InterpResult<'tcx> { let (current_index, current_clocks) = global.current_thread_state(thread_mgr); let write_clock; - let (other_action, other_thread, other_clock) = if range.write + let (other_action, other_thread, _other_clock) = if range.write > current_clocks.clock[range.write_index] { // Convert the write action into the vector clock it @@ -920,14 +920,12 @@ impl VClockAlloc { // Throw the data-race detection. throw_ub_format!( - "Data race detected between {} on {} and {} on {} at {:?} (current vector clock = {:?}, conflicting timestamp = {:?})", + "Data race detected between {} on {} and {} on {} at {:?}", action, current_thread_info, other_action, other_thread_info, ptr_dbg, - current_clocks.clock, - other_clock ) } @@ -1208,8 +1206,7 @@ impl GlobalState { }; // Setup the main-thread since it is not explicitly created: - // uses vector index and thread-id 0, also the rust runtime gives - // the main-thread a name of "main". + // uses vector index and thread-id 0. let index = global_state.vector_clocks.get_mut().push(ThreadClockSet::default()); global_state.vector_info.get_mut().push(ThreadId::new(0)); global_state @@ -1448,12 +1445,8 @@ impl GlobalState { vector: VectorIdx, ) -> String { let thread = self.vector_info.borrow()[vector]; - let thread_name = thread_mgr.get_thread_name(); - format!( - "Thread(id = {:?}, name = {:?})", - thread.to_u32(), - String::from_utf8_lossy(thread_name) - ) + let thread_name = thread_mgr.get_thread_name(thread); + format!("thread `{}`", String::from_utf8_lossy(thread_name)) } /// Acquire a lock, express that the previous call of diff --git a/src/concurrency/weak_memory.rs b/src/concurrency/weak_memory.rs index e7ed9ea09a..f7cc9c4732 100644 --- a/src/concurrency/weak_memory.rs +++ b/src/concurrency/weak_memory.rs @@ -9,7 +9,7 @@ //! Note that this implementation does not take into account of C++20's memory model revision to SC accesses //! and fences introduced by P0668 (). //! This implementation is not fully correct under the revised C++20 model and may generate behaviours C++20 -//! disallows. +//! disallows (). //! //! Rust follows the C++20 memory model (except for the Consume ordering and some operations not performable through C++'s //! std::atomic API). It is therefore possible for this implementation to generate behaviours never observable when the diff --git a/src/thread.rs b/src/thread.rs index 7327f2b811..420eeb810f 100644 --- a/src/thread.rs +++ b/src/thread.rs @@ -170,6 +170,14 @@ impl<'mir, 'tcx> Default for Thread<'mir, 'tcx> { } } +impl<'mir, 'tcx> Thread<'mir, 'tcx> { + fn new(name: &str) -> Self { + let mut thread = Thread::default(); + thread.thread_name = Some(Vec::from(name.as_bytes())); + thread + } +} + /// A specific moment in time. #[derive(Debug)] pub enum Time { @@ -230,7 +238,7 @@ impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> { fn default() -> Self { let mut threads = IndexVec::new(); // Create the main thread and add it to the list of threads. - let mut main_thread = Thread::default(); + let mut main_thread = Thread::new("main"); // The main thread can *not* be joined on. main_thread.join_status = ThreadJoinStatus::Detached; threads.push(main_thread); @@ -379,15 +387,20 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { } /// Set the name of the active thread. - fn set_thread_name(&mut self, new_thread_name: Vec) { + fn set_active_thread_name(&mut self, new_thread_name: Vec) { self.active_thread_mut().thread_name = Some(new_thread_name); } /// Get the name of the active thread. - pub fn get_thread_name(&self) -> &[u8] { + pub fn get_active_thread_name(&self) -> &[u8] { self.active_thread_ref().thread_name() } + /// Get the name of the given thread. + pub fn get_thread_name(&self, thread: ThreadId) -> &[u8] { + self.threads[thread].thread_name() + } + /// Put the thread into the blocked state. fn block_thread(&mut self, thread: ThreadId) { let state = &mut self.threads[thread].state; @@ -475,7 +488,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> { for (i, thread) in self.threads.iter_enumerated_mut() { if thread.state == ThreadState::BlockedOnJoin(self.active_thread) { // The thread has terminated, mark happens-before edge to joining thread - if let Some(_) = data_race { + if data_race.is_some() { joined_threads.push(i); } trace!("unblocking {:?} because {:?} terminated", i, self.active_thread); @@ -683,7 +696,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx #[inline] fn set_active_thread_name(&mut self, new_thread_name: Vec) { let this = self.eval_context_mut(); - this.machine.threads.set_thread_name(new_thread_name); + this.machine.threads.set_active_thread_name(new_thread_name); } #[inline] @@ -692,7 +705,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx 'mir: 'c, { let this = self.eval_context_ref(); - this.machine.threads.get_thread_name() + this.machine.threads.get_active_thread_name() } #[inline] diff --git a/tests/compiletest.rs b/tests/compiletest.rs index e4d8511b51..754fccd63b 100644 --- a/tests/compiletest.rs +++ b/tests/compiletest.rs @@ -94,8 +94,6 @@ regexes! { "([0-9]+: ) +0x[0-9a-f]+ - (.*)" => "$1$2", // erase long hexadecimals r"0x[0-9a-fA-F]+[0-9a-fA-F]{2,2}" => "$$HEX", - // erase clocks - r"VClock\(\[[^\]]+\]\)" => "VClock", // erase specific alignments "alignment [0-9]+" => "alignment ALIGN", // erase thread caller ids diff --git a/tests/fail/data_race/alloc_read_race.rs b/tests/fail/data_race/alloc_read_race.rs index 12c1b6ec87..1eac8ce0f2 100644 --- a/tests/fail/data_race/alloc_read_race.rs +++ b/tests/fail/data_race/alloc_read_race.rs @@ -38,7 +38,7 @@ pub fn main() { let pointer = &*ptr.0; // Note: could also error due to reading uninitialized memory, but the data-race detector triggers first. - *pointer.load(Ordering::Relaxed) //~ ERROR Data race detected between Read on Thread(id = 2) and Allocate on Thread(id = 1) + *pointer.load(Ordering::Relaxed) //~ ERROR Data race detected between Read on thread `` and Allocate on thread `` }); j1.join().unwrap(); diff --git a/tests/fail/data_race/alloc_read_race.stderr b/tests/fail/data_race/alloc_read_race.stderr index 52004f2d2d..2049e4f4a1 100644 --- a/tests/fail/data_race/alloc_read_race.stderr +++ b/tests/fail/data_race/alloc_read_race.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Read on Thread(id = 2) and Allocate on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Read on thread `` and Allocate on thread `` at ALLOC --> $DIR/alloc_read_race.rs:LL:CC | LL | *pointer.load(Ordering::Relaxed) - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Read on Thread(id = 2) and Allocate on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Read on thread `` and Allocate on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/alloc_write_race.rs b/tests/fail/data_race/alloc_write_race.rs index c050d24bee..e618b72a82 100644 --- a/tests/fail/data_race/alloc_write_race.rs +++ b/tests/fail/data_race/alloc_write_race.rs @@ -36,7 +36,7 @@ pub fn main() { let j2 = spawn(move || { let pointer = &*ptr.0; - *pointer.load(Ordering::Relaxed) = 2; //~ ERROR Data race detected between Write on Thread(id = 2) and Allocate on Thread(id = 1) + *pointer.load(Ordering::Relaxed) = 2; //~ ERROR Data race detected between Write on thread `` and Allocate on thread `` }); j1.join().unwrap(); diff --git a/tests/fail/data_race/alloc_write_race.stderr b/tests/fail/data_race/alloc_write_race.stderr index b6c05b3407..82e3d92479 100644 --- a/tests/fail/data_race/alloc_write_race.stderr +++ b/tests/fail/data_race/alloc_write_race.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Write on Thread(id = 2) and Allocate on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Write on thread `` and Allocate on thread `` at ALLOC --> $DIR/alloc_write_race.rs:LL:CC | LL | *pointer.load(Ordering::Relaxed) = 2; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Write on Thread(id = 2) and Allocate on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Write on thread `` and Allocate on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/atomic_read_na_write_race1.rs b/tests/fail/data_race/atomic_read_na_write_race1.rs index 5cf2f26bf1..3b948eea98 100644 --- a/tests/fail/data_race/atomic_read_na_write_race1.rs +++ b/tests/fail/data_race/atomic_read_na_write_race1.rs @@ -22,7 +22,7 @@ pub fn main() { let j2 = spawn(move || { //Equivalent to: (&*c.0).load(Ordering::SeqCst) - intrinsics::atomic_load_seqcst(c.0 as *mut usize) //~ ERROR Data race detected between Atomic Load on Thread(id = 2) and Write on Thread(id = 1) + intrinsics::atomic_load_seqcst(c.0 as *mut usize) //~ ERROR Data race detected between Atomic Load on thread `` and Write on thread `` }); j1.join().unwrap(); diff --git a/tests/fail/data_race/atomic_read_na_write_race1.stderr b/tests/fail/data_race/atomic_read_na_write_race1.stderr index 51cdb23950..4b5355e866 100644 --- a/tests/fail/data_race/atomic_read_na_write_race1.stderr +++ b/tests/fail/data_race/atomic_read_na_write_race1.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Atomic Load on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Atomic Load on thread `` and Write on thread `` at ALLOC --> $DIR/atomic_read_na_write_race1.rs:LL:CC | LL | intrinsics::atomic_load_seqcst(c.0 as *mut usize) - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Atomic Load on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Atomic Load on thread `` and Write on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/atomic_read_na_write_race2.rs b/tests/fail/data_race/atomic_read_na_write_race2.rs index 1c0146367a..44b4eebee8 100644 --- a/tests/fail/data_race/atomic_read_na_write_race2.rs +++ b/tests/fail/data_race/atomic_read_na_write_race2.rs @@ -22,7 +22,7 @@ pub fn main() { let j2 = spawn(move || { let atomic_ref = &mut *c.0; - *atomic_ref.get_mut() = 32; //~ ERROR Data race detected between Write on Thread(id = 2) and Atomic Load on Thread(id = 1) + *atomic_ref.get_mut() = 32; //~ ERROR Data race detected between Write on thread `` and Atomic Load on thread `` }); j1.join().unwrap(); diff --git a/tests/fail/data_race/atomic_read_na_write_race2.stderr b/tests/fail/data_race/atomic_read_na_write_race2.stderr index 9a432c586a..3eb9f17bae 100644 --- a/tests/fail/data_race/atomic_read_na_write_race2.stderr +++ b/tests/fail/data_race/atomic_read_na_write_race2.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Write on Thread(id = 2) and Atomic Load on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Write on thread `` and Atomic Load on thread `` at ALLOC --> $DIR/atomic_read_na_write_race2.rs:LL:CC | LL | *atomic_ref.get_mut() = 32; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Write on Thread(id = 2) and Atomic Load on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Write on thread `` and Atomic Load on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/atomic_write_na_read_race1.rs b/tests/fail/data_race/atomic_write_na_read_race1.rs index a63aafb045..44dc1a9084 100644 --- a/tests/fail/data_race/atomic_write_na_read_race1.rs +++ b/tests/fail/data_race/atomic_write_na_read_race1.rs @@ -22,7 +22,7 @@ pub fn main() { let j2 = spawn(move || { let atomic_ref = &mut *c.0; - *atomic_ref.get_mut() //~ ERROR Data race detected between Read on Thread(id = 2) and Atomic Store on Thread(id = 1) + *atomic_ref.get_mut() //~ ERROR Data race detected between Read on thread `` and Atomic Store on thread `` }); j1.join().unwrap(); diff --git a/tests/fail/data_race/atomic_write_na_read_race1.stderr b/tests/fail/data_race/atomic_write_na_read_race1.stderr index 8280f43b51..810fa54d41 100644 --- a/tests/fail/data_race/atomic_write_na_read_race1.stderr +++ b/tests/fail/data_race/atomic_write_na_read_race1.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Read on Thread(id = 2) and Atomic Store on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Read on thread `` and Atomic Store on thread `` at ALLOC --> $DIR/atomic_write_na_read_race1.rs:LL:CC | LL | *atomic_ref.get_mut() - | ^^^^^^^^^^^^^^^^^^^^^ Data race detected between Read on Thread(id = 2) and Atomic Store on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^^^^^^^^^^^^^^^^^^ Data race detected between Read on thread `` and Atomic Store on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/atomic_write_na_read_race2.rs b/tests/fail/data_race/atomic_write_na_read_race2.rs index 0b055c9b96..b4b21b64fc 100644 --- a/tests/fail/data_race/atomic_write_na_read_race2.rs +++ b/tests/fail/data_race/atomic_write_na_read_race2.rs @@ -22,7 +22,7 @@ pub fn main() { let j2 = spawn(move || { //Equivalent to: (&*c.0).store(32, Ordering::SeqCst) - atomic_store(c.0 as *mut usize, 32); //~ ERROR Data race detected between Atomic Store on Thread(id = 2) and Read on Thread(id = 1) + atomic_store(c.0 as *mut usize, 32); //~ ERROR Data race detected between Atomic Store on thread `` and Read on thread `` }); j1.join().unwrap(); diff --git a/tests/fail/data_race/atomic_write_na_read_race2.stderr b/tests/fail/data_race/atomic_write_na_read_race2.stderr index 63d0f5814e..77f69e2bc3 100644 --- a/tests/fail/data_race/atomic_write_na_read_race2.stderr +++ b/tests/fail/data_race/atomic_write_na_read_race2.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Atomic Store on Thread(id = 2) and Read on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Atomic Store on thread `` and Read on thread `` at ALLOC --> $DIR/atomic_write_na_read_race2.rs:LL:CC | LL | atomic_store(c.0 as *mut usize, 32); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Atomic Store on Thread(id = 2) and Read on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Atomic Store on thread `` and Read on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/atomic_write_na_write_race1.rs b/tests/fail/data_race/atomic_write_na_write_race1.rs index 8268924e3c..b1a4cfb98b 100644 --- a/tests/fail/data_race/atomic_write_na_write_race1.rs +++ b/tests/fail/data_race/atomic_write_na_write_race1.rs @@ -22,7 +22,7 @@ pub fn main() { let j2 = spawn(move || { //Equivalent to: (&*c.0).store(64, Ordering::SeqCst) - atomic_store(c.0 as *mut usize, 64); //~ ERROR Data race detected between Atomic Store on Thread(id = 2) and Write on Thread(id = 1) + atomic_store(c.0 as *mut usize, 64); //~ ERROR Data race detected between Atomic Store on thread `` and Write on thread `` }); j1.join().unwrap(); diff --git a/tests/fail/data_race/atomic_write_na_write_race1.stderr b/tests/fail/data_race/atomic_write_na_write_race1.stderr index 332be7406c..8e70de5e4a 100644 --- a/tests/fail/data_race/atomic_write_na_write_race1.stderr +++ b/tests/fail/data_race/atomic_write_na_write_race1.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Atomic Store on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Atomic Store on thread `` and Write on thread `` at ALLOC --> $DIR/atomic_write_na_write_race1.rs:LL:CC | LL | atomic_store(c.0 as *mut usize, 64); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Atomic Store on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Atomic Store on thread `` and Write on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/atomic_write_na_write_race2.rs b/tests/fail/data_race/atomic_write_na_write_race2.rs index 440c72a059..dbdce8f623 100644 --- a/tests/fail/data_race/atomic_write_na_write_race2.rs +++ b/tests/fail/data_race/atomic_write_na_write_race2.rs @@ -22,7 +22,7 @@ pub fn main() { let j2 = spawn(move || { let atomic_ref = &mut *c.0; - *atomic_ref.get_mut() = 32; //~ ERROR Data race detected between Write on Thread(id = 2) and Atomic Store on Thread(id = 1) + *atomic_ref.get_mut() = 32; //~ ERROR Data race detected between Write on thread `` and Atomic Store on thread `` }); j1.join().unwrap(); diff --git a/tests/fail/data_race/atomic_write_na_write_race2.stderr b/tests/fail/data_race/atomic_write_na_write_race2.stderr index 024f525b12..310c2ed7df 100644 --- a/tests/fail/data_race/atomic_write_na_write_race2.stderr +++ b/tests/fail/data_race/atomic_write_na_write_race2.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Write on Thread(id = 2) and Atomic Store on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Write on thread `` and Atomic Store on thread `` at ALLOC --> $DIR/atomic_write_na_write_race2.rs:LL:CC | LL | *atomic_ref.get_mut() = 32; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Write on Thread(id = 2) and Atomic Store on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ Data race detected between Write on thread `` and Atomic Store on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/dangling_thread_async_race.rs b/tests/fail/data_race/dangling_thread_async_race.rs index 2656f4b7af..65325b60f2 100644 --- a/tests/fail/data_race/dangling_thread_async_race.rs +++ b/tests/fail/data_race/dangling_thread_async_race.rs @@ -34,7 +34,7 @@ fn main() { let join2 = unsafe { spawn(move || { - *c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 3) and Write on Thread(id = 1) + *c.0 = 64; //~ ERROR Data race detected between Write on thread `` and Write on thread `` }) }; diff --git a/tests/fail/data_race/dangling_thread_async_race.stderr b/tests/fail/data_race/dangling_thread_async_race.stderr index 6d31e3971e..efdc913ce2 100644 --- a/tests/fail/data_race/dangling_thread_async_race.stderr +++ b/tests/fail/data_race/dangling_thread_async_race.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Write on Thread(id = 3) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Write on thread `` and Write on thread `` at ALLOC --> $DIR/dangling_thread_async_race.rs:LL:CC | LL | *c.0 = 64; - | ^^^^^^^^^ Data race detected between Write on Thread(id = 3) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^^^^^^ Data race detected between Write on thread `` and Write on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/dangling_thread_race.rs b/tests/fail/data_race/dangling_thread_race.rs index f1174d8ff6..09e7032c93 100644 --- a/tests/fail/data_race/dangling_thread_race.rs +++ b/tests/fail/data_race/dangling_thread_race.rs @@ -33,6 +33,6 @@ fn main() { spawn(|| ()).join().unwrap(); unsafe { - *c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 0, name = "main") and Write on Thread(id = 1) + *c.0 = 64; //~ ERROR Data race detected between Write on thread `main` and Write on thread `` } } diff --git a/tests/fail/data_race/dangling_thread_race.stderr b/tests/fail/data_race/dangling_thread_race.stderr index ba1ef2760f..899cfdd095 100644 --- a/tests/fail/data_race/dangling_thread_race.stderr +++ b/tests/fail/data_race/dangling_thread_race.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Write on Thread(id = 0, name = "main") and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Write on thread `main` and Write on thread `` at ALLOC --> $DIR/dangling_thread_race.rs:LL:CC | LL | *c.0 = 64; - | ^^^^^^^^^ Data race detected between Write on Thread(id = 0, name = "main") and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^^^^^^ Data race detected between Write on thread `main` and Write on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/dealloc_read_race1.rs b/tests/fail/data_race/dealloc_read_race1.rs index 555700a75d..ff2ac8ca52 100644 --- a/tests/fail/data_race/dealloc_read_race1.rs +++ b/tests/fail/data_race/dealloc_read_race1.rs @@ -24,7 +24,7 @@ pub fn main() { let j2 = spawn(move || { __rust_dealloc( - //~^ ERROR Data race detected between Deallocate on Thread(id = 2) and Read on Thread(id = 1) + //~^ ERROR Data race detected between Deallocate on thread `` and Read on thread `` ptr.0 as *mut _, std::mem::size_of::(), std::mem::align_of::(), diff --git a/tests/fail/data_race/dealloc_read_race1.stderr b/tests/fail/data_race/dealloc_read_race1.stderr index 91a681e744..9e35fb7b6b 100644 --- a/tests/fail/data_race/dealloc_read_race1.stderr +++ b/tests/fail/data_race/dealloc_read_race1.stderr @@ -1,4 +1,4 @@ -error: Undefined Behavior: Data race detected between Deallocate on Thread(id = 2) and Read on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Deallocate on thread `` and Read on thread `` at ALLOC --> $DIR/dealloc_read_race1.rs:LL:CC | LL | / __rust_dealloc( @@ -7,7 +7,7 @@ LL | | ptr.0 as *mut _, LL | | std::mem::size_of::(), LL | | std::mem::align_of::(), LL | | ); - | |_____________^ Data race detected between Deallocate on Thread(id = 2) and Read on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | |_____________^ Data race detected between Deallocate on thread `` and Read on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/dealloc_read_race2.rs b/tests/fail/data_race/dealloc_read_race2.rs index 984268dca1..4bb6444f6a 100644 --- a/tests/fail/data_race/dealloc_read_race2.rs +++ b/tests/fail/data_race/dealloc_read_race2.rs @@ -27,7 +27,7 @@ pub fn main() { }); let j2 = spawn(move || { - // Also an error of the form: Data race detected between Read on Thread(id = 2) and Deallocate on Thread(id = 1) + // Also an error of the form: Data race detected between Read on thread `` and Deallocate on thread `` // but the invalid allocation is detected first. *ptr.0 //~ ERROR dereferenced after this allocation got freed }); diff --git a/tests/fail/data_race/dealloc_read_race_stack.rs b/tests/fail/data_race/dealloc_read_race_stack.rs index cdb6c18230..e079581a0d 100644 --- a/tests/fail/data_race/dealloc_read_race_stack.rs +++ b/tests/fail/data_race/dealloc_read_race_stack.rs @@ -36,7 +36,7 @@ pub fn main() { sleep(Duration::from_millis(200)); // Now `stack_var` gets deallocated. - } //~ ERROR Data race detected between Deallocate on Thread(id = 1) and Read on Thread(id = 2) + } //~ ERROR Data race detected between Deallocate on thread `` and Read on thread `` }); let j2 = spawn(move || { diff --git a/tests/fail/data_race/dealloc_read_race_stack.stderr b/tests/fail/data_race/dealloc_read_race_stack.stderr index 1275d1290b..b1e7d3649e 100644 --- a/tests/fail/data_race/dealloc_read_race_stack.stderr +++ b/tests/fail/data_race/dealloc_read_race_stack.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Deallocate on Thread(id = 1) and Read on Thread(id = 2) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Deallocate on thread `` and Read on thread `` at ALLOC --> $DIR/dealloc_read_race_stack.rs:LL:CC | LL | } - | ^ Data race detected between Deallocate on Thread(id = 1) and Read on Thread(id = 2) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^ Data race detected between Deallocate on thread `` and Read on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/dealloc_write_race1.rs b/tests/fail/data_race/dealloc_write_race1.rs index 44078a044a..9cd0ebc642 100644 --- a/tests/fail/data_race/dealloc_write_race1.rs +++ b/tests/fail/data_race/dealloc_write_race1.rs @@ -23,7 +23,7 @@ pub fn main() { let j2 = spawn(move || { __rust_dealloc( - //~^ ERROR Data race detected between Deallocate on Thread(id = 2) and Write on Thread(id = 1) + //~^ ERROR Data race detected between Deallocate on thread `` and Write on thread `` ptr.0 as *mut _, std::mem::size_of::(), std::mem::align_of::(), diff --git a/tests/fail/data_race/dealloc_write_race1.stderr b/tests/fail/data_race/dealloc_write_race1.stderr index dc1a6ed267..a9ac03eb31 100644 --- a/tests/fail/data_race/dealloc_write_race1.stderr +++ b/tests/fail/data_race/dealloc_write_race1.stderr @@ -1,4 +1,4 @@ -error: Undefined Behavior: Data race detected between Deallocate on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Deallocate on thread `` and Write on thread `` at ALLOC --> $DIR/dealloc_write_race1.rs:LL:CC | LL | / __rust_dealloc( @@ -7,7 +7,7 @@ LL | | ptr.0 as *mut _, LL | | std::mem::size_of::(), LL | | std::mem::align_of::(), LL | | ); - | |_____________^ Data race detected between Deallocate on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | |_____________^ Data race detected between Deallocate on thread `` and Write on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/dealloc_write_race2.rs b/tests/fail/data_race/dealloc_write_race2.rs index 2f4b9a194c..9b1b8f0614 100644 --- a/tests/fail/data_race/dealloc_write_race2.rs +++ b/tests/fail/data_race/dealloc_write_race2.rs @@ -26,7 +26,7 @@ pub fn main() { }); let j2 = spawn(move || { - // Also an error of the form: Data race detected between Write on Thread(id = 2) and Deallocate on Thread(id = 1) + // Also an error of the form: Data race detected between Write on thread `` and Deallocate on thread `` // but the invalid allocation is detected first. *ptr.0 = 2; //~ ERROR dereferenced after this allocation got freed }); diff --git a/tests/fail/data_race/dealloc_write_race_stack.rs b/tests/fail/data_race/dealloc_write_race_stack.rs index a209a2cd7d..2f12570892 100644 --- a/tests/fail/data_race/dealloc_write_race_stack.rs +++ b/tests/fail/data_race/dealloc_write_race_stack.rs @@ -36,7 +36,7 @@ pub fn main() { sleep(Duration::from_millis(200)); // Now `stack_var` gets deallocated. - } //~ ERROR Data race detected between Deallocate on Thread(id = 1) and Write on Thread(id = 2) + } //~ ERROR Data race detected between Deallocate on thread `` and Write on thread `` }); let j2 = spawn(move || { diff --git a/tests/fail/data_race/dealloc_write_race_stack.stderr b/tests/fail/data_race/dealloc_write_race_stack.stderr index 28a131aac0..622ac25189 100644 --- a/tests/fail/data_race/dealloc_write_race_stack.stderr +++ b/tests/fail/data_race/dealloc_write_race_stack.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Deallocate on Thread(id = 1) and Write on Thread(id = 2) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Deallocate on thread `` and Write on thread `` at ALLOC --> $DIR/dealloc_write_race_stack.rs:LL:CC | LL | } - | ^ Data race detected between Deallocate on Thread(id = 1) and Write on Thread(id = 2) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^ Data race detected between Deallocate on thread `` and Write on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/enable_after_join_to_main.rs b/tests/fail/data_race/enable_after_join_to_main.rs index 832158a34a..6f0735fac8 100644 --- a/tests/fail/data_race/enable_after_join_to_main.rs +++ b/tests/fail/data_race/enable_after_join_to_main.rs @@ -29,7 +29,7 @@ pub fn main() { }); let j2 = spawn(move || { - *c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 6) and Write on Thread(id = 5) + *c.0 = 64; //~ ERROR Data race detected between Write on thread `` and Write on thread `` }); j1.join().unwrap(); diff --git a/tests/fail/data_race/enable_after_join_to_main.stderr b/tests/fail/data_race/enable_after_join_to_main.stderr index db7577b096..4426952e44 100644 --- a/tests/fail/data_race/enable_after_join_to_main.stderr +++ b/tests/fail/data_race/enable_after_join_to_main.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Write on Thread(id = 6) and Write on Thread(id = 5) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Write on thread `` and Write on thread `` at ALLOC --> $DIR/enable_after_join_to_main.rs:LL:CC | LL | *c.0 = 64; - | ^^^^^^^^^ Data race detected between Write on Thread(id = 6) and Write on Thread(id = 5) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^^^^^^ Data race detected between Write on thread `` and Write on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/fence_after_load.rs b/tests/fail/data_race/fence_after_load.rs index c209ef1812..5a8c2e585f 100644 --- a/tests/fail/data_race/fence_after_load.rs +++ b/tests/fail/data_race/fence_after_load.rs @@ -21,5 +21,5 @@ fn main() { // The fence is useless, since it did not happen-after the `store` in the other thread. // Hence this is a data race. // Also see https://github.com/rust-lang/miri/issues/2192. - unsafe { V = 2 } //~ERROR Data race detected + unsafe { V = 2 } //~ERROR Data race detected between Write on thread `main` and Write on thread `` } diff --git a/tests/fail/data_race/fence_after_load.stderr b/tests/fail/data_race/fence_after_load.stderr index 17cc6a82a1..b9cffeda27 100644 --- a/tests/fail/data_race/fence_after_load.stderr +++ b/tests/fail/data_race/fence_after_load.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Write on Thread(id = 0, name = "main") and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Write on thread `main` and Write on thread `` at ALLOC --> $DIR/fence_after_load.rs:LL:CC | LL | unsafe { V = 2 } - | ^^^^^ Data race detected between Write on Thread(id = 0, name = "main") and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^^ Data race detected between Write on thread `main` and Write on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/read_write_race.rs b/tests/fail/data_race/read_write_race.rs index 9197912ef2..eeb49bb42a 100644 --- a/tests/fail/data_race/read_write_race.rs +++ b/tests/fail/data_race/read_write_race.rs @@ -18,7 +18,7 @@ pub fn main() { }); let j2 = spawn(move || { - *c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 2) and Read on Thread(id = 1) + *c.0 = 64; //~ ERROR Data race detected between Write on thread `` and Read on thread `` }); j1.join().unwrap(); diff --git a/tests/fail/data_race/read_write_race.stderr b/tests/fail/data_race/read_write_race.stderr index b775e2b6fd..a65e7006cf 100644 --- a/tests/fail/data_race/read_write_race.stderr +++ b/tests/fail/data_race/read_write_race.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Write on Thread(id = 2) and Read on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Write on thread `` and Read on thread `` at ALLOC --> $DIR/read_write_race.rs:LL:CC | LL | *c.0 = 64; - | ^^^^^^^^^ Data race detected between Write on Thread(id = 2) and Read on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^^^^^^ Data race detected between Write on thread `` and Read on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/read_write_race_stack.rs b/tests/fail/data_race/read_write_race_stack.rs index 00c36176a9..124f12d1ec 100644 --- a/tests/fail/data_race/read_write_race_stack.rs +++ b/tests/fail/data_race/read_write_race_stack.rs @@ -43,7 +43,7 @@ pub fn main() { sleep(Duration::from_millis(200)); - stack_var //~ ERROR Data race detected between Read on Thread(id = 1) and Write on Thread(id = 2) + stack_var //~ ERROR Data race detected between Read on thread `` and Write on thread `` }); let j2 = spawn(move || { diff --git a/tests/fail/data_race/read_write_race_stack.stderr b/tests/fail/data_race/read_write_race_stack.stderr index 0f5f4956ff..390b3ab38e 100644 --- a/tests/fail/data_race/read_write_race_stack.stderr +++ b/tests/fail/data_race/read_write_race_stack.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Read on Thread(id = 1) and Write on Thread(id = 2) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Read on thread `` and Write on thread `` at ALLOC --> $DIR/read_write_race_stack.rs:LL:CC | LL | stack_var - | ^^^^^^^^^ Data race detected between Read on Thread(id = 1) and Write on Thread(id = 2) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^^^^^^ Data race detected between Read on thread `` and Write on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/relax_acquire_race.rs b/tests/fail/data_race/relax_acquire_race.rs index 3b350f5c89..faa23a150e 100644 --- a/tests/fail/data_race/relax_acquire_race.rs +++ b/tests/fail/data_race/relax_acquire_race.rs @@ -38,7 +38,7 @@ pub fn main() { let j3 = spawn(move || { if SYNC.load(Ordering::Acquire) == 2 { - *c.0 //~ ERROR Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1) + *c.0 //~ ERROR Data race detected between Read on thread `` and Write on thread `` } else { 0 } diff --git a/tests/fail/data_race/relax_acquire_race.stderr b/tests/fail/data_race/relax_acquire_race.stderr index fb376b58f2..85de60c026 100644 --- a/tests/fail/data_race/relax_acquire_race.stderr +++ b/tests/fail/data_race/relax_acquire_race.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Read on thread `` and Write on thread `` at ALLOC --> $DIR/relax_acquire_race.rs:LL:CC | LL | *c.0 - | ^^^^ Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^ Data race detected between Read on thread `` and Write on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/release_seq_race.rs b/tests/fail/data_race/release_seq_race.rs index ec03888c76..ab6926102a 100644 --- a/tests/fail/data_race/release_seq_race.rs +++ b/tests/fail/data_race/release_seq_race.rs @@ -42,7 +42,7 @@ pub fn main() { let j3 = spawn(move || { sleep(Duration::from_millis(500)); if SYNC.load(Ordering::Acquire) == 3 { - *c.0 //~ ERROR Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1) + *c.0 //~ ERROR Data race detected between Read on thread `` and Write on thread `` } else { 0 } diff --git a/tests/fail/data_race/release_seq_race.stderr b/tests/fail/data_race/release_seq_race.stderr index 1de9c0ac1c..db333d756f 100644 --- a/tests/fail/data_race/release_seq_race.stderr +++ b/tests/fail/data_race/release_seq_race.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Read on thread `` and Write on thread `` at ALLOC --> $DIR/release_seq_race.rs:LL:CC | LL | *c.0 - | ^^^^ Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^ Data race detected between Read on thread `` and Write on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/release_seq_race_same_thread.rs b/tests/fail/data_race/release_seq_race_same_thread.rs index 1876238289..d3d18f0e25 100644 --- a/tests/fail/data_race/release_seq_race_same_thread.rs +++ b/tests/fail/data_race/release_seq_race_same_thread.rs @@ -38,7 +38,7 @@ pub fn main() { let j2 = spawn(move || { if SYNC.load(Ordering::Acquire) == 2 { - *c.0 //~ ERROR Data race detected between Read on Thread(id = 2) and Write on Thread(id = 1) + *c.0 //~ ERROR Data race detected between Read on thread `` and Write on thread `` } else { 0 } diff --git a/tests/fail/data_race/release_seq_race_same_thread.stderr b/tests/fail/data_race/release_seq_race_same_thread.stderr index 9bbdd9a475..f4c38d5315 100644 --- a/tests/fail/data_race/release_seq_race_same_thread.stderr +++ b/tests/fail/data_race/release_seq_race_same_thread.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Read on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Read on thread `` and Write on thread `` at ALLOC --> $DIR/release_seq_race_same_thread.rs:LL:CC | LL | *c.0 - | ^^^^ Data race detected between Read on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^ Data race detected between Read on thread `` and Write on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/rmw_race.rs b/tests/fail/data_race/rmw_race.rs index 51577b3b7b..800b1043c0 100644 --- a/tests/fail/data_race/rmw_race.rs +++ b/tests/fail/data_race/rmw_race.rs @@ -39,7 +39,7 @@ pub fn main() { let j3 = spawn(move || { if SYNC.load(Ordering::Acquire) == 3 { - *c.0 //~ ERROR Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1) + *c.0 //~ ERROR Data race detected between Read on thread `` and Write on thread `` } else { 0 } diff --git a/tests/fail/data_race/rmw_race.stderr b/tests/fail/data_race/rmw_race.stderr index 10d3291fa7..346fcc31b9 100644 --- a/tests/fail/data_race/rmw_race.stderr +++ b/tests/fail/data_race/rmw_race.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Read on thread `` and Write on thread `` at ALLOC --> $DIR/rmw_race.rs:LL:CC | LL | *c.0 - | ^^^^ Data race detected between Read on Thread(id = 3) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^ Data race detected between Read on thread `` and Write on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/write_write_race.rs b/tests/fail/data_race/write_write_race.rs index 61909eda86..989ae31a6d 100644 --- a/tests/fail/data_race/write_write_race.rs +++ b/tests/fail/data_race/write_write_race.rs @@ -18,7 +18,7 @@ pub fn main() { }); let j2 = spawn(move || { - *c.0 = 64; //~ ERROR Data race detected between Write on Thread(id = 2) and Write on Thread(id = 1) + *c.0 = 64; //~ ERROR Data race detected between Write on thread `` and Write on thread `` }); j1.join().unwrap(); diff --git a/tests/fail/data_race/write_write_race.stderr b/tests/fail/data_race/write_write_race.stderr index 0054f5bf63..e6254281ae 100644 --- a/tests/fail/data_race/write_write_race.stderr +++ b/tests/fail/data_race/write_write_race.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Write on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Write on thread `` and Write on thread `` at ALLOC --> $DIR/write_write_race.rs:LL:CC | LL | *c.0 = 64; - | ^^^^^^^^^ Data race detected between Write on Thread(id = 2) and Write on Thread(id = 1) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^^^^^^ Data race detected between Write on thread `` and Write on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/fail/data_race/write_write_race_stack.rs b/tests/fail/data_race/write_write_race_stack.rs index 49de5db43b..3c1eabbf25 100644 --- a/tests/fail/data_race/write_write_race_stack.rs +++ b/tests/fail/data_race/write_write_race_stack.rs @@ -40,7 +40,7 @@ pub fn main() { sleep(Duration::from_millis(200)); - stack_var = 1usize; //~ ERROR Data race detected between Write on Thread(id = 1) and Write on Thread(id = 2) + stack_var = 1usize; //~ ERROR Data race detected between Write on thread `` and Write on thread `` // read to silence errors stack_var diff --git a/tests/fail/data_race/write_write_race_stack.stderr b/tests/fail/data_race/write_write_race_stack.stderr index 2012643431..1f7318e6f9 100644 --- a/tests/fail/data_race/write_write_race_stack.stderr +++ b/tests/fail/data_race/write_write_race_stack.stderr @@ -1,8 +1,8 @@ -error: Undefined Behavior: Data race detected between Write on Thread(id = 1) and Write on Thread(id = 2) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) +error: Undefined Behavior: Data race detected between Write on thread `` and Write on thread `` at ALLOC --> $DIR/write_write_race_stack.rs:LL:CC | LL | stack_var = 1usize; - | ^^^^^^^^^^^^^^^^^^ Data race detected between Write on Thread(id = 1) and Write on Thread(id = 2) at ALLOC (current vector clock = VClock, conflicting timestamp = VClock) + | ^^^^^^^^^^^^^^^^^^ Data race detected between Write on thread `` and Write on thread `` at ALLOC | = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information diff --git a/tests/pass/libc.rs b/tests/pass/libc.rs index b108a01dae..bf7a59da97 100644 --- a/tests/pass/libc.rs +++ b/tests/pass/libc.rs @@ -218,7 +218,8 @@ fn test_prctl_thread_name() { libc::prctl(libc::PR_GET_NAME, buf.as_mut_ptr(), 0 as c_long, 0 as c_long, 0 as c_long), 0, ); - assert_eq!(b"\0", &buf); + // Rust runtime might set thread name, so we allow two options here. + assert!(&buf[..10] == b"\0" || &buf[..5] == b"main\0"); let thread_name = CString::new("hello").expect("CString::new failed"); assert_eq!( libc::prctl(