Skip to content

Commit

Permalink
Auto merge of #2302 - RalfJung:datarace, r=RalfJung
Browse files Browse the repository at this point in the history
avoid copying thread manager state in data race detector

When doing #2047 I did not realize that there is some redundant state here that we can now remove from the data race detector.

Also this removes the vector clocks from the data race errors since those don't really help diagnose the problem.
  • Loading branch information
bors committed Jul 1, 2022
2 parents 70b97a5 + 86a2bf4 commit 2d528ed
Show file tree
Hide file tree
Showing 61 changed files with 431 additions and 419 deletions.
285 changes: 129 additions & 156 deletions src/concurrency/data_race.rs

Large diffs are not rendered by default.

74 changes: 41 additions & 33 deletions src/concurrency/weak_memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
//! Note that this implementation does not take into account of C++20's memory model revision to SC accesses
//! and fences introduced by P0668 (<https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0668r5.html>).
//! This implementation is not fully correct under the revised C++20 model and may generate behaviours C++20
//! disallows.
//! disallows (<https://github.com/rust-lang/miri/issues/2301>).
//!
//! Rust follows the C++20 memory model (except for the Consume ordering and some operations not performable through C++'s
//! std::atomic<T> API). It is therefore possible for this implementation to generate behaviours never observable when the
Expand Down Expand Up @@ -82,10 +82,12 @@ use rustc_const_eval::interpret::{
};
use rustc_data_structures::fx::FxHashMap;

use crate::{AtomicReadOp, AtomicRwOp, AtomicWriteOp, Tag, VClock, VTimestamp, VectorIdx};
use crate::{
AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, Tag, ThreadManager, VClock, VTimestamp, VectorIdx,
};

use super::{
data_race::{GlobalState, ThreadClockSet},
data_race::{GlobalState as DataRaceState, ThreadClockSet},
range_object_map::{AccessType, RangeObjectMap},
};

Expand Down Expand Up @@ -149,7 +151,7 @@ impl StoreBufferAlloc {
/// before without data race, we can determine that the non-atomic access fully happens
/// after all the prior atomic accesses so the location no longer needs to exhibit
/// any weak memory behaviours until further atomic accesses.
pub fn memory_accessed(&self, range: AllocRange, global: &GlobalState) {
pub fn memory_accessed(&self, range: AllocRange, global: &DataRaceState) {
if !global.ongoing_action_data_race_free() {
let mut buffers = self.store_buffers.borrow_mut();
let access_type = buffers.access_type(range);
Expand Down Expand Up @@ -236,17 +238,18 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
}

/// Reads from the last store in modification order
fn read_from_last_store(&self, global: &GlobalState) {
fn read_from_last_store(&self, global: &DataRaceState, thread_mgr: &ThreadManager<'_, '_>) {
let store_elem = self.buffer.back();
if let Some(store_elem) = store_elem {
let (index, clocks) = global.current_thread_state();
let (index, clocks) = global.current_thread_state(thread_mgr);
store_elem.load_impl(index, &clocks);
}
}

fn buffered_read(
&self,
global: &GlobalState,
global: &DataRaceState,
thread_mgr: &ThreadManager<'_, '_>,
is_seqcst: bool,
rng: &mut (impl rand::Rng + ?Sized),
validate: impl FnOnce() -> InterpResult<'tcx>,
Expand All @@ -257,7 +260,7 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
let store_elem = {
// The `clocks` we got here must be dropped before calling validate_atomic_load
// as the race detector will update it
let (.., clocks) = global.current_thread_state();
let (.., clocks) = global.current_thread_state(thread_mgr);
// Load from a valid entry in the store buffer
self.fetch_store(is_seqcst, &clocks, &mut *rng)
};
Expand All @@ -268,18 +271,19 @@ impl<'mir, 'tcx: 'mir> StoreBuffer {
// requires access to ThreadClockSet.clock, which is updated by the race detector
validate()?;

let (index, clocks) = global.current_thread_state();
let (index, clocks) = global.current_thread_state(thread_mgr);
let loaded = store_elem.load_impl(index, &clocks);
Ok(loaded)
}

fn buffered_write(
&mut self,
val: ScalarMaybeUninit<Tag>,
global: &GlobalState,
global: &DataRaceState,
thread_mgr: &ThreadManager<'_, '_>,
is_seqcst: bool,
) -> InterpResult<'tcx> {
let (index, clocks) = global.current_thread_state();
let (index, clocks) = global.current_thread_state(thread_mgr);

self.store_impl(val, index, &clocks.clock, is_seqcst);
Ok(())
Expand Down Expand Up @@ -428,8 +432,11 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
{
let range = alloc_range(base_offset, place.layout.size);
if alloc_buffers.is_overlapping(range)
&& !alloc_clocks
.race_free_with_atomic(range, this.machine.data_race.as_ref().unwrap())
&& !alloc_clocks.race_free_with_atomic(
range,
this.machine.data_race.as_ref().unwrap(),
&this.machine.threads,
)
{
throw_unsup_format!(
"racy imperfectly overlapping atomic access is not possible in the C++20 memory model, and not supported by Miri's weak memory emulation"
Expand All @@ -443,41 +450,41 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
&mut self,
new_val: ScalarMaybeUninit<Tag>,
place: &MPlaceTy<'tcx, Tag>,
atomic: AtomicRwOp,
atomic: AtomicRwOrd,
init: ScalarMaybeUninit<Tag>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?;
if let (
crate::AllocExtra { weak_memory: Some(alloc_buffers), .. },
crate::Evaluator { data_race: Some(global), .. },
crate::Evaluator { data_race: Some(global), threads, .. },
) = this.get_alloc_extra_mut(alloc_id)?
{
if atomic == AtomicRwOp::SeqCst {
global.sc_read();
global.sc_write();
if atomic == AtomicRwOrd::SeqCst {
global.sc_read(threads);
global.sc_write(threads);
}
let range = alloc_range(base_offset, place.layout.size);
let buffer = alloc_buffers.get_or_create_store_buffer_mut(range, init)?;
buffer.read_from_last_store(global);
buffer.buffered_write(new_val, global, atomic == AtomicRwOp::SeqCst)?;
buffer.read_from_last_store(global, threads);
buffer.buffered_write(new_val, global, threads, atomic == AtomicRwOrd::SeqCst)?;
}
Ok(())
}

fn buffered_atomic_read(
&self,
place: &MPlaceTy<'tcx, Tag>,
atomic: AtomicReadOp,
atomic: AtomicReadOrd,
latest_in_mo: ScalarMaybeUninit<Tag>,
validate: impl FnOnce() -> InterpResult<'tcx>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let this = self.eval_context_ref();
if let Some(global) = &this.machine.data_race {
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?;
if let Some(alloc_buffers) = this.get_alloc_extra(alloc_id)?.weak_memory.as_ref() {
if atomic == AtomicReadOp::SeqCst {
global.sc_read();
if atomic == AtomicReadOrd::SeqCst {
global.sc_read(&this.machine.threads);
}
let mut rng = this.machine.rng.borrow_mut();
let buffer = alloc_buffers.get_or_create_store_buffer(
Expand All @@ -486,7 +493,8 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
)?;
let loaded = buffer.buffered_read(
global,
atomic == AtomicReadOp::SeqCst,
&this.machine.threads,
atomic == AtomicReadOrd::SeqCst,
&mut *rng,
validate,
)?;
Expand All @@ -504,18 +512,18 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
&mut self,
val: ScalarMaybeUninit<Tag>,
dest: &MPlaceTy<'tcx, Tag>,
atomic: AtomicWriteOp,
atomic: AtomicWriteOrd,
init: ScalarMaybeUninit<Tag>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(dest.ptr)?;
if let (
crate::AllocExtra { weak_memory: Some(alloc_buffers), .. },
crate::Evaluator { data_race: Some(global), .. },
crate::Evaluator { data_race: Some(global), threads, .. },
) = this.get_alloc_extra_mut(alloc_id)?
{
if atomic == AtomicWriteOp::SeqCst {
global.sc_write();
if atomic == AtomicWriteOrd::SeqCst {
global.sc_write(threads);
}

// UGLY HACK: in write_scalar_atomic() we don't know the value before our write,
Expand All @@ -535,7 +543,7 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
buffer.buffer.pop_front();
}

buffer.buffered_write(val, global, atomic == AtomicWriteOp::SeqCst)?;
buffer.buffered_write(val, global, threads, atomic == AtomicWriteOrd::SeqCst)?;
}

// Caller should've written to dest with the vanilla scalar write, we do nothing here
Expand All @@ -548,21 +556,21 @@ pub(super) trait EvalContextExt<'mir, 'tcx: 'mir>:
fn perform_read_on_buffered_latest(
&self,
place: &MPlaceTy<'tcx, Tag>,
atomic: AtomicReadOp,
atomic: AtomicReadOrd,
init: ScalarMaybeUninit<Tag>,
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();

if let Some(global) = &this.machine.data_race {
if atomic == AtomicReadOp::SeqCst {
global.sc_read();
if atomic == AtomicReadOrd::SeqCst {
global.sc_read(&this.machine.threads);
}
let size = place.layout.size;
let (alloc_id, base_offset, ..) = this.ptr_get_alloc_id(place.ptr)?;
if let Some(alloc_buffers) = this.get_alloc_extra(alloc_id)?.weak_memory.as_ref() {
let buffer = alloc_buffers
.get_or_create_store_buffer(alloc_range(base_offset, size), init)?;
buffer.read_from_last_store(global);
buffer.read_from_last_store(global, &this.machine.threads);
}
}
Ok(())
Expand Down
2 changes: 1 addition & 1 deletion src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ pub use crate::shims::tls::{EvalContextExt as _, TlsData};
pub use crate::shims::EvalContextExt as _;

pub use crate::concurrency::data_race::{
AtomicFenceOp, AtomicReadOp, AtomicRwOp, AtomicWriteOp,
AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd,
EvalContextExt as DataRaceEvalContextExt,
};
pub use crate::diagnostics::{
Expand Down
28 changes: 24 additions & 4 deletions src/machine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -647,7 +647,12 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
None
};
let race_alloc = if let Some(data_race) = &ecx.machine.data_race {
Some(data_race::AllocExtra::new_allocation(data_race, alloc.size(), kind))
Some(data_race::AllocExtra::new_allocation(
data_race,
&ecx.machine.threads,
alloc.size(),
kind,
))
} else {
None
};
Expand Down Expand Up @@ -756,7 +761,12 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
range: AllocRange,
) -> InterpResult<'tcx> {
if let Some(data_race) = &alloc_extra.data_race {
data_race.read(alloc_id, range, machine.data_race.as_ref().unwrap())?;
data_race.read(
alloc_id,
range,
machine.data_race.as_ref().unwrap(),
&machine.threads,
)?;
}
if let Some(stacked_borrows) = &alloc_extra.stacked_borrows {
stacked_borrows.borrow_mut().memory_read(
Expand All @@ -782,7 +792,12 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
range: AllocRange,
) -> InterpResult<'tcx> {
if let Some(data_race) = &mut alloc_extra.data_race {
data_race.write(alloc_id, range, machine.data_race.as_mut().unwrap())?;
data_race.write(
alloc_id,
range,
machine.data_race.as_mut().unwrap(),
&machine.threads,
)?;
}
if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows {
stacked_borrows.get_mut().memory_written(
Expand Down Expand Up @@ -811,7 +826,12 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
register_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id));
}
if let Some(data_race) = &mut alloc_extra.data_race {
data_race.deallocate(alloc_id, range, machine.data_race.as_mut().unwrap())?;
data_race.deallocate(
alloc_id,
range,
machine.data_race.as_mut().unwrap(),
&machine.threads,
)?;
}
if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows {
stacked_borrows.get_mut().memory_deallocated(
Expand Down
Loading

0 comments on commit 2d528ed

Please sign in to comment.