From 6faae4d36bd2fa02ef31b480703bfdb0cc4746fc Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Sun, 8 Dec 2019 11:33:36 -0800 Subject: [PATCH 01/10] Add `erase_alloc_id` method to various Miri primitives This method is based on `erase_tag`, and replaces `AllocId`s with `()`. --- src/librustc/mir/interpret/pointer.rs | 7 +++++++ src/librustc/mir/interpret/value.rs | 20 ++++++++++++++++++++ src/librustc_mir/interpret/operand.rs | 21 +++++++++++++++++++++ src/librustc_mir/interpret/place.rs | 11 +++++++++++ 4 files changed, 59 insertions(+) diff --git a/src/librustc/mir/interpret/pointer.rs b/src/librustc/mir/interpret/pointer.rs index 0b27f512e55b8..df346e477e8d4 100644 --- a/src/librustc/mir/interpret/pointer.rs +++ b/src/librustc/mir/interpret/pointer.rs @@ -201,6 +201,13 @@ impl<'tcx, Tag> Pointer { Pointer { alloc_id: self.alloc_id, offset: self.offset, tag: () } } + /// Replace any allocation IDs in this type with `()`. + /// + /// Used when comparing heap snapshots. + pub fn erase_alloc_id(self) -> Pointer { + Pointer { alloc_id: (), offset: self.offset, tag: self.tag } + } + /// Test if the pointer is "inbounds" of an allocation of the given size. /// A pointer is "inbounds" even if its offset is equal to the size; this is /// a "one-past-the-end" pointer. diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs index a038ca23ae92d..205410303c8a2 100644 --- a/src/librustc/mir/interpret/value.rs +++ b/src/librustc/mir/interpret/value.rs @@ -153,6 +153,16 @@ impl<'tcx, Tag> Scalar { } } + /// Replace any allocation IDs in this type with `()`. + /// + /// Used when comparing heap snapshots. + pub fn erase_alloc_id(self) -> Scalar { + match self { + Scalar::Ptr(ptr) => Scalar::Ptr(ptr.erase_alloc_id()), + Scalar::Raw { data, size } => Scalar::Raw { data, size }, + } + } + #[inline] pub fn ptr_null(cx: &impl HasDataLayout) -> Self { Scalar::Raw { @@ -509,6 +519,16 @@ impl<'tcx, Tag> ScalarMaybeUndef { } } + /// Replace any allocation IDs in this type with `()`. + /// + /// Used when comparing heap snapshots. + pub fn erase_alloc_id(self) -> ScalarMaybeUndef { + match self { + ScalarMaybeUndef::Scalar(s) => ScalarMaybeUndef::Scalar(s.erase_alloc_id()), + ScalarMaybeUndef::Undef => ScalarMaybeUndef::Undef, + } + } + #[inline] pub fn not_undef(self) -> InterpResult<'static, Scalar> { match self { diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs index 48e7193ec39d4..3ed877c6cd9f0 100644 --- a/src/librustc_mir/interpret/operand.rs +++ b/src/librustc_mir/interpret/operand.rs @@ -91,6 +91,17 @@ impl<'tcx, Tag> Immediate { Immediate::ScalarPair(a, b) => Ok((a.not_undef()?, b.not_undef()?)) } } + + /// Replace any allocation IDs in this type with `()`. + /// + /// Used when comparing heap snapshots. + pub(super) fn erase_alloc_id(self) -> Immediate { + match self { + Immediate::Scalar(a) => Immediate::Scalar(a.erase_alloc_id()), + Immediate::ScalarPair(a, b) + => Immediate::ScalarPair(a.erase_alloc_id(), b.erase_alloc_id()), + } + } } // ScalarPair needs a type to interpret, so we often have an immediate and a type together @@ -176,6 +187,16 @@ impl Operand { } } + + /// Replace any allocation IDs in this type with `()`. + /// + /// Used when comparing heap snapshots. + pub(super) fn erase_alloc_id(self) -> Operand { + match self { + Operand::Immediate(imm) => Operand::Immediate(imm.erase_alloc_id()), + Operand::Indirect(place) => Operand::Indirect(place.erase_alloc_id()), + } + } } #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index a600eb11e1d03..d272e14570d77 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -105,6 +105,17 @@ impl MemPlace { } } + /// Replace any allocation IDs in this type with `()`. + /// + /// Used when comparing heap snapshots. + pub fn erase_alloc_id(self) -> MemPlace { + MemPlace { + ptr: self.ptr.erase_alloc_id(), + align: self.align, + meta: self.meta.map(Scalar::erase_alloc_id), + } + } + #[inline(always)] pub fn from_scalar_ptr(ptr: Scalar, align: Align) -> Self { MemPlace { From f0148a5bdaa17db7481d79a9b811fd04cb146503 Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Sun, 8 Dec 2019 11:33:45 -0800 Subject: [PATCH 02/10] Derive `Hash` and `Eq` for some Miri primitives --- src/librustc_mir/interpret/eval_context.rs | 2 +- src/librustc_mir/interpret/memory.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index 653718c462f07..69ca0c2b734e4 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -121,7 +121,7 @@ pub struct LocalState<'tcx, Tag=(), Id=AllocId> { } /// Current value of a local variable -#[derive(Clone, PartialEq, Eq, Debug, HashStable)] // Miri debug-prints these +#[derive(Clone, PartialEq, Eq, Debug, Hash, HashStable)] // Miri debug-prints these pub enum LocalValue { /// This local is not currently alive, and cannot be used at all. Dead, diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 8f177ad122580..ce784d4565489 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -22,7 +22,7 @@ use super::{ Machine, AllocMap, MayLeak, ErrorHandled, CheckInAllocMsg, }; -#[derive(Debug, PartialEq, Copy, Clone)] +#[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)] pub enum MemoryKind { /// Stack memory. Error if deallocated except during a stack pop. Stack, From 840e323965be8c269278f76c1d1fb434df75f89a Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Sun, 8 Dec 2019 11:33:48 -0800 Subject: [PATCH 03/10] Add DFS iterator for `Memory` --- src/librustc_mir/interpret/memory.rs | 53 ++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index ce784d4565489..a9c220d72643f 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -950,3 +950,56 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { } } } + +/// A depth-first search over the allocation graph. +/// +/// This is based on the DFS in `rustc_data_structures`, which we cannot use directly because +/// `AllocId` does not implement `Idx`. +pub struct DepthFirstSearch<'mem, 'mir, 'tcx, M: Machine<'mir, 'tcx>> { + memory: &'mem Memory<'mir, 'tcx, M>, + visited: FxHashSet, + stack: Vec, +} + +impl> DepthFirstSearch<'mem, 'mir, 'tcx, M> { + /// Returns a new DFS iterator that will traverse all allocations reachable from the given + /// `AllocId`s. + /// + /// The first node in `roots` will be the first node visited by the DFS. + pub fn with_roots( + memory: &'mem Memory<'mir, 'tcx, M>, + roots: impl IntoIterator, + ) -> Self { + let mut stack: Vec<_> = roots.into_iter().collect(); + stack.reverse(); + + DepthFirstSearch { + memory, + visited: stack.iter().copied().collect(), + stack, + } + } +} + +impl> Iterator for DepthFirstSearch<'mem, 'mir, 'tcx, M> { + type Item = (AllocId, InterpResult<'tcx, &'mem Allocation>); + + fn next(&mut self) -> Option { + let DepthFirstSearch { stack, visited, memory } = self; + + let id = stack.pop()?; + let alloc = memory.get_raw(id); + + if let Ok(alloc) = alloc { + let new_pointers = alloc + .relocations() + .values() + .map(|&(_, id)| id) + .filter(|id| visited.insert(*id)); + + stack.extend(new_pointers); + } + + Some((id, alloc)) + } +} From fa5ff3afa6657d0a9832a5f1616edd4a2a401ea7 Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Sun, 8 Dec 2019 11:33:51 -0800 Subject: [PATCH 04/10] Remove unused `Clone` impl for `Memory` --- src/librustc_mir/interpret/memory.rs | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index a9c220d72643f..0bccdf92e1ed1 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -114,24 +114,6 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for Memory<'mir, 'tcx, M> } } -// FIXME: Really we shouldn't clone memory, ever. Snapshot machinery should instead -// carefully copy only the reachable parts. -impl<'mir, 'tcx, M> Clone for Memory<'mir, 'tcx, M> -where - M: Machine<'mir, 'tcx, PointerTag = (), AllocExtra = (), MemoryExtra = ()>, - M::MemoryMap: AllocMap, Allocation)>, -{ - fn clone(&self) -> Self { - Memory { - alloc_map: self.alloc_map.clone(), - extra_fn_ptr_map: self.extra_fn_ptr_map.clone(), - dead_alloc_map: self.dead_alloc_map.clone(), - extra: (), - tcx: self.tcx, - } - } -} - impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { pub fn new(tcx: TyCtxtAt<'tcx>, extra: M::MemoryExtra) -> Self { Memory { From 37844e573ae799b4c668aa13a894e3c2a7006114 Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Sun, 8 Dec 2019 11:33:54 -0800 Subject: [PATCH 05/10] Reimplement CTFE snapshot comparison using a DFS --- src/librustc_mir/const_eval.rs | 15 +- src/librustc_mir/interpret/memory.rs | 4 +- src/librustc_mir/interpret/snapshot.rs | 708 ++++++++++++++----------- 3 files changed, 417 insertions(+), 310 deletions(-) diff --git a/src/librustc_mir/const_eval.rs b/src/librustc_mir/const_eval.rs index b219fec31dc59..3cacdbfe34dd1 100644 --- a/src/librustc_mir/const_eval.rs +++ b/src/librustc_mir/const_eval.rs @@ -514,13 +514,16 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, } } + // Emit a warning if this is the first time we have triggered the loop detector. This means + // we have been executing for quite some time. let span = ecx.frame().span; - ecx.machine.loop_detector.observe_and_analyze( - *ecx.tcx, - span, - &ecx.memory, - &ecx.stack[..], - ) + if ecx.machine.loop_detector.is_empty() { + // FIXME(#49980): make this warning a lint + ecx.tcx.sess.span_warn(span, + "Constant evaluating a complex constant, this might take some time"); + } + + ecx.machine.loop_detector.observe_and_analyze(&ecx.stack, &ecx.memory) } #[inline(always)] diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 0bccdf92e1ed1..7e13c2f346a12 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -935,8 +935,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { /// A depth-first search over the allocation graph. /// -/// This is based on the DFS in `rustc_data_structures`, which we cannot use directly because -/// `AllocId` does not implement `Idx`. +/// This is based on the DFS iterator in `rustc_data_structures`, which we cannot use directly +/// because `AllocId` does not implement `Idx` (it is not dense). pub struct DepthFirstSearch<'mem, 'mir, 'tcx, M: Machine<'mir, 'tcx>> { memory: &'mem Memory<'mir, 'tcx, M>, visited: FxHashSet, diff --git a/src/librustc_mir/interpret/snapshot.rs b/src/librustc_mir/interpret/snapshot.rs index 3ea00d6922186..94456e722d08c 100644 --- a/src/librustc_mir/interpret/snapshot.rs +++ b/src/librustc_mir/interpret/snapshot.rs @@ -1,31 +1,72 @@ -//! This module contains the machinery necessary to detect infinite loops -//! during const-evaluation by taking snapshots of the state of the interpreter -//! at regular intervals. - -// This lives in `interpret` because it needs access to all sots of private state. However, -// it is not used by the general miri engine, just by CTFE. - +//! Capture and compare snapshots of the compile-time interpreter state to detect when a program +//! will loop infinitely. +//! +//! This lives in `interpret` because it needs access to all sots of private state. However, +//! it is not used by the general miri engine, just by CTFE. +//! +//! # Comparing interpreter state +//! +//! If we observe the same interpreter state at two different points in time during const-eval, we +//! can be certain that the program is in an infinite loop. For `CompileTimeInterpreter`, the +//! relevant bits of interpreter state are the stack and the heap. We refer to a copy of this state +//! as a "snapshot". +//! +//! While comparing stacks between snapshots is straightforward, comparing heaps between snapshots +//! is not. This is because `AllocId`s, which determine the target of a pointer, are frequently +//! thrown away and recreated, even if the actual value in memory did not change. Consider the +//! following code: +//! +//! ```rust,ignore(const-loop) +//! const fn inf_loop() { +//! // Function call to prevent promotion. +//! const fn zeros() -> [isize; 4] { +//! [0; 4] +//! } +//! +//! loop { +//! let arr = &zeros(); +//! if false { +//! break; +//! } +//! } +//! } +//! ``` +//! +//! Although this program will loop indefinitely, a new `AllocId` will be created for the value +//! pointed to by `arr` at each iteration of the loop. A naive method for comparing snapshots, one +//! that considered the numeric value of `AllocId`s, would cause this class of infinite loops to be +//! missed. See [#52475](https://github.com/rust-lang/rust/issues/52475). +//! +//! Instead, we assign a new, linear index, one that is used only while comparing snapshots, to +//! each allocation. This index, hereby referred to as the "DFS index" is the order that each +//! allocation would be encountered during a depth-first search of all allocations reachable from +//! the stack via a pointer. See below for an example of this numbering scheme. +//! +//! ```text +//! Stack pointers: x x (DFS starts with leftmost pointer on top of stack) +//! | | +//! Heap allocs: 1 o o 3 +//! | /| +//! |/ | +//! 2 o o 4 +//! ``` +//! +//! Instead of comparing `AllocId`s between snapshots, we first map `AllocId`s in each snapshot to +//! their DFS index, then compare those instead. + +use std::convert::TryFrom; use std::hash::{Hash, Hasher}; -use rustc::ich::StableHashingContextProvider; use rustc::mir; -use rustc::mir::interpret::{ - AllocId, Pointer, Scalar, - Relocations, Allocation, UndefMask, InterpResult, -}; - -use rustc::ty::{self, TyCtxt}; -use rustc::ty::layout::{Align, Size}; -use rustc_data_structures::fx::FxHashSet; -use rustc_index::vec::IndexVec; -use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; -use rustc_macros::HashStable; -use syntax::ast::Mutability; -use syntax::source_map::Span; - -use super::eval_context::{LocalState, StackPopCleanup}; -use super::{Frame, Memory, Operand, MemPlace, Place, Immediate, ScalarMaybeUndef, LocalValue}; +use rustc::mir::interpret::{Allocation, InterpResult}; +use rustc::ty::layout::Size; +use rustc_data_structures::fx::{FxHasher, FxHashMap, FxHashSet}; + +use super::{memory, AllocId, Frame, Immediate, MemPlace, Operand}; use crate::const_eval::CompileTimeInterpreter; +use crate::interpret::{LocalState, LocalValue}; + +type CtfeMemory<'mir, 'tcx> = Memory<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>; #[derive(Default)] pub(crate) struct InfiniteLoopDetector<'mir, 'tcx> { @@ -46,32 +87,22 @@ pub(crate) struct InfiniteLoopDetector<'mir, 'tcx> { impl<'mir, 'tcx> InfiniteLoopDetector<'mir, 'tcx> { pub fn observe_and_analyze( &mut self, - tcx: TyCtxt<'tcx>, - span: Span, - memory: &Memory<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>, stack: &[Frame<'mir, 'tcx>], + memory: &CtfeMemory<'mir, 'tcx>, ) -> InterpResult<'tcx, ()> { - // Compute stack's hash before copying anything - let mut hcx = tcx.get_stable_hashing_context(); - let mut hasher = StableHasher::new(); - stack.hash_stable(&mut hcx, &mut hasher); - let hash = hasher.finish::(); - - // Check if we know that hash already - if self.hashes.is_empty() { - // FIXME(#49980): make this warning a lint - tcx.sess.span_warn(span, - "Constant evaluating a complex constant, this might take some time"); - } + let snapshot = InterpRef::new(stack, memory); + + let mut hasher = FxHasher::default(); + snapshot.hash(&mut hasher); + let hash = hasher.finish(); + if self.hashes.insert(hash) { // No collision return Ok(()) } - // We need to make a full copy. NOW things that to get really expensive. - info!("snapshotting the state of the interpreter"); - - if self.snapshots.insert(InterpSnapshot::new(memory, stack)) { + // We need to make a full copy. NOW things start to get really expensive. + if self.snapshots.insert(InterpSnapshot::capture(stack, memory)) { // Spurious collision or first cycle return Ok(()) } @@ -79,328 +110,401 @@ impl<'mir, 'tcx> InfiniteLoopDetector<'mir, 'tcx> { // Second cycle throw_exhaust!(InfiniteLoop) } -} -trait SnapshotContext<'a> { - fn resolve(&'a self, id: &AllocId) -> Option<&'a Allocation>; + /// Returns `true` if the `InfiniteLoopDetector` has not yet been invoked. + pub fn is_empty(&self) -> bool { + self.hashes.is_empty() + } } -/// Taking a snapshot of the evaluation context produces a view of -/// the state of the interpreter that is invariant to `AllocId`s. -trait Snapshot<'a, Ctx: SnapshotContext<'a>> { - type Item; - fn snapshot(&self, ctx: &'a Ctx) -> Self::Item; +/// A copy of the virtual machine state at a certain point in time during const-evaluation. +struct InterpSnapshot<'mir, 'tcx> { + stack: Vec>, + memory: CtfeMemory<'mir, 'tcx>, } -macro_rules! __impl_snapshot_field { - ($field:ident, $ctx:expr) => ($field.snapshot($ctx)); - ($field:ident, $ctx:expr, $delegate:expr) => ($delegate); -} +impl InterpSnapshot<'mir, 'tcx> { + fn capture(stack: &'a [Frame<'mir, 'tcx>], memory: &'a CtfeMemory<'mir, 'tcx>) -> Self { + info!("snapshotting the state of the interpreter"); -// This assumes the type has two type parameters, first for the tag (set to `()`), -// then for the id -macro_rules! impl_snapshot_for { - (enum $enum_name:ident { - $( $variant:ident $( ( $($field:ident $(-> $delegate:expr)?),* ) )? ),* $(,)? - }) => { - - impl<'a, Ctx> self::Snapshot<'a, Ctx> for $enum_name - where Ctx: self::SnapshotContext<'a>, - { - type Item = $enum_name<(), AllocIdSnapshot<'a>>; - - #[inline] - fn snapshot(&self, __ctx: &'a Ctx) -> Self::Item { - match *self { - $( - $enum_name::$variant $( ( $(ref $field),* ) )? => { - $enum_name::$variant $( - ( $( __impl_snapshot_field!($field, __ctx $(, $delegate)?) ),* ) - )? - } - )* - } + // Copy all reachable allocations that exist in `memory.alloc_map` into a new `alloc_map`. + // There's no need to copy allocations in `tcx`, since these are never mutated during + // execution. + let mut heap_snapshot = CtfeMemory::new(memory.tcx, ()); + for (id, _) in InterpRef::new(stack, memory).live_allocs() { + if let Some(alloc) = memory.alloc_map.get(&id) { + heap_snapshot.alloc_map.insert(id, alloc.clone()); + continue; } } - }; - - (struct $struct_name:ident { $($field:ident $(-> $delegate:expr)?),* $(,)? }) => { - impl<'a, Ctx> self::Snapshot<'a, Ctx> for $struct_name - where Ctx: self::SnapshotContext<'a>, - { - type Item = $struct_name<(), AllocIdSnapshot<'a>>; - - #[inline] - fn snapshot(&self, __ctx: &'a Ctx) -> Self::Item { - let $struct_name { - $(ref $field),* - } = *self; - - $struct_name { - $( $field: __impl_snapshot_field!($field, __ctx $(, $delegate)?) ),* - } - } - } - }; -} -impl<'a, Ctx, T> Snapshot<'a, Ctx> for Option - where Ctx: SnapshotContext<'a>, - T: Snapshot<'a, Ctx> -{ - type Item = Option<>::Item>; + InterpSnapshot { + stack: stack.into(), + memory: heap_snapshot, + } + } - fn snapshot(&self, ctx: &'a Ctx) -> Self::Item { - match self { - Some(x) => Some(x.snapshot(ctx)), - None => None, + fn as_ref(&self) -> InterpRef<'_, 'mir, 'tcx> { + InterpRef { + stack: &self.stack, + memory: &self.memory, } } } -#[derive(Eq, PartialEq)] -struct AllocIdSnapshot<'a>(Option>); - -impl<'a, Ctx> Snapshot<'a, Ctx> for AllocId - where Ctx: SnapshotContext<'a>, -{ - type Item = AllocIdSnapshot<'a>; - - fn snapshot(&self, ctx: &'a Ctx) -> Self::Item { - AllocIdSnapshot(ctx.resolve(self).map(|alloc| alloc.snapshot(ctx))) +impl Eq for InterpSnapshot<'mir, 'tcx> {} +impl PartialEq for InterpSnapshot<'mir, 'tcx> { + fn eq(&self, other: &Self) -> bool { + self.as_ref() == other.as_ref() } } -impl_snapshot_for!(struct Pointer { - alloc_id, - offset -> *offset, // just copy offset verbatim - tag -> *tag, // just copy tag -}); - -impl<'a, Ctx> Snapshot<'a, Ctx> for Scalar - where Ctx: SnapshotContext<'a>, -{ - type Item = Scalar<(), AllocIdSnapshot<'a>>; - - fn snapshot(&self, ctx: &'a Ctx) -> Self::Item { - match self { - Scalar::Ptr(p) => Scalar::Ptr(p.snapshot(ctx)), - Scalar::Raw{ size, data } => Scalar::Raw { - data: *data, - size: *size, - }, - } +impl Hash for InterpSnapshot<'mir, 'tcx> { + fn hash(&self, state: &mut H) { + self.as_ref().hash(state) } } -impl_snapshot_for!(enum ScalarMaybeUndef { - Scalar(s), - Undef, -}); - -impl_snapshot_for!(struct MemPlace { - ptr, - meta, - align -> *align, // just copy alignment verbatim -}); - -impl<'a, Ctx> Snapshot<'a, Ctx> for Place - where Ctx: SnapshotContext<'a>, -{ - type Item = Place<(), AllocIdSnapshot<'a>>; - - fn snapshot(&self, ctx: &'a Ctx) -> Self::Item { - match self { - Place::Ptr(p) => Place::Ptr(p.snapshot(ctx)), - - Place::Local{ frame, local } => Place::Local{ - frame: *frame, - local: *local, - }, +/// A reference to the subset of the interpreter state that can change during execution. +/// +/// Having a separate "reference" data type allows us to compare snapshots to the current +/// interpreter state without actually creating a snapshot of the current state. +// +// FIXME(ecstaticmorse): We could do away with this if we refactored the mutable parts of +// `InterpCx` into a single struct, e.g., an `InterpVm` inside of `InterpCx`. `InterpRef` would +// then become `IgnoreAllocId<&InterpVm>` and `InterpSnapshot` would be `IgnoreAllocId`. +struct InterpRef<'a, 'mir, 'tcx> { + stack: &'a [Frame<'mir, 'tcx>], + memory: &'a CtfeMemory<'mir, 'tcx> +} + +impl InterpRef<'a, 'mir, 'tcx> { + fn new(stack: &'a [Frame<'mir, 'tcx>], memory: &'a CtfeMemory<'mir, 'tcx>) -> Self { + InterpRef { + stack, + memory, } } -} -impl_snapshot_for!(enum Immediate { - Scalar(s), - ScalarPair(s, t), -}); - -impl_snapshot_for!(enum Operand { - Immediate(v), - Indirect(m), -}); - -impl_snapshot_for!(enum LocalValue { - Dead, - Uninitialized, - Live(v), -}); - -impl<'a, Ctx> Snapshot<'a, Ctx> for Relocations - where Ctx: SnapshotContext<'a>, -{ - type Item = Relocations<(), AllocIdSnapshot<'a>>; - - fn snapshot(&self, ctx: &'a Ctx) -> Self::Item { - Relocations::from_presorted(self.iter() - .map(|(size, ((), id))| (*size, ((), id.snapshot(ctx)))) - .collect()) + /// Returns an iterator over all memory allocations reachable from anywhere on the stack. + /// + /// The order of iteration is predictable: Calling this method twice on a single + /// `InterpRef` will yield the same `Allocation`s in the same order. + fn live_allocs(&self) + -> memory::DepthFirstSearch<'a, 'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>> + { + memory::DepthFirstSearch::with_roots(self.memory, stack_roots(self.stack).into_iter()) } } -#[derive(Eq, PartialEq)] -struct AllocationSnapshot<'a> { - bytes: &'a [u8], - relocations: Relocations<(), AllocIdSnapshot<'a>>, - undef_mask: &'a UndefMask, - align: &'a Align, - size: &'a Size, - mutability: &'a Mutability, -} +impl PartialEq for InterpRef<'_, 'mir, 'tcx> { + fn eq(&self, other: &Self) -> bool { + let is_stack_eq = self.stack.iter().map(IgnoreAllocId) + .eq(other.stack.iter().map(IgnoreAllocId)); -impl<'a, Ctx> Snapshot<'a, Ctx> for &'a Allocation - where Ctx: SnapshotContext<'a>, -{ - type Item = AllocationSnapshot<'a>; + if !is_stack_eq { + return false; + } - fn snapshot(&self, ctx: &'a Ctx) -> Self::Item { - let Allocation { - size, - align, - mutability, - extra: (), - .. - } = self; + // Map each `AllocId` to its DFS index as we traverse the allocation graph. + let mut dfs_index = DfsIndexForAllocId::from_stack_roots(self.stack); + let mut other_dfs_index = DfsIndexForAllocId::from_stack_roots(other.stack); - let all_bytes = 0..self.len(); - // This 'inspect' is okay since following access respects undef and relocations. This does - // influence interpreter exeuction, but only to detect the error of cycles in evalution - // dependencies. - let bytes = self.inspect_with_undef_and_ptr_outside_interpreter(all_bytes); + for (a, b) in self.live_allocs().zip(other.live_allocs()) { + let (a, b) = match (a.1, b.1) { + (Ok(a), Ok(b)) => (a, b), - let undef_mask = self.undef_mask(); - let relocations = self.relocations(); + // All pointers that cannot be dereferenced are considered equal. We don't + // differentiate between null, dangling, etc. + (Err(_), Err(_)) => continue, - AllocationSnapshot { - bytes, - undef_mask, - align, - size, - mutability, - relocations: relocations.snapshot(ctx), + _ => return false, + }; + + // Compare the size, value, and number of pointers in both allocations, but not the + // `AllocId`s. + if IgnoreAllocId(a) != IgnoreAllocId(b) { + return false; + } + + // Check to see if each pointer in allocation `a` points to the allocation with the same + // DFS index as the corresponding pointer in allocation `b`. + let is_isomorphic = a.relocations().values() + .zip(b.relocations().values()) + .all(|(&(_, pa), &(_, pb))| { + dfs_index.mark_visited(pa); + other_dfs_index.mark_visited(pb); + + dfs_index.get(pa).unwrap() == other_dfs_index.get(pb).unwrap() + }); + + if !is_isomorphic { + return false + } } + + true } } -#[derive(Eq, PartialEq)] -struct FrameSnapshot<'a, 'tcx> { - instance: ty::Instance<'tcx>, - span: Span, - return_to_block: &'a StackPopCleanup, - return_place: Option>>, - locals: IndexVec>>, - block: Option, - stmt: usize, +impl Hash for InterpRef<'_, 'mir, 'tcx> { + fn hash(&self, state: &mut H) { + for frame in self.stack { + IgnoreAllocId(frame).hash(state); + } + + // FIXME(ecstaticmorse): Incorporate the DFS index for each pointer into the hash? + for (_, alloc) in self.live_allocs() { + alloc.ok().map(IgnoreAllocId).hash(state); + } + } } -impl<'a, 'mir, 'tcx, Ctx> Snapshot<'a, Ctx> for &'a Frame<'mir, 'tcx> - where Ctx: SnapshotContext<'a>, -{ - type Item = FrameSnapshot<'a, 'tcx>; +/// A wrapper type for the various interpreter data structures that compares them without +/// considering the numeric value of `AllocId`s. +struct IgnoreAllocId<'a, T>(&'a T); - fn snapshot(&self, ctx: &'a Ctx) -> Self::Item { - let Frame { - body: _, +impl PartialEq for IgnoreAllocId<'_, Frame<'mir, 'tcx>> { + fn eq(&self, IgnoreAllocId(other): &Self) -> bool { + // This *must* remain exhaustive to ensure that all fields are considered for equality. + let super::Frame { + body, instance, - span, - return_to_block, - return_place, locals, block, stmt, - extra: _, - } = self; - - FrameSnapshot { - instance: *instance, - span: *span, - return_to_block, - block: *block, - stmt: *stmt, - return_place: return_place.map(|r| r.snapshot(ctx)), - locals: locals.iter().map(|local| local.snapshot(ctx)).collect(), - } + span, // Not really necessary, but cheap to compare. + + // No need to check these, since when comparing snapshots we have to check whether + // we are at the same statement in the caller's frame. + return_to_block: _, + return_place: _, + + extra: (), + } = self.0; + + std::ptr::eq::>(*body, other.body) + && instance == &other.instance + && block == &other.block + && stmt == &other.stmt + && span == &other.span + && locals.iter().map(IgnoreAllocId) + .eq(other.locals.iter().map(IgnoreAllocId)) } } -impl<'a, 'tcx, Ctx> Snapshot<'a, Ctx> for &'a LocalState<'tcx> - where Ctx: SnapshotContext<'a>, -{ - type Item = LocalValue<(), AllocIdSnapshot<'a>>; +impl<'mir, 'tcx> Hash for IgnoreAllocId<'_, Frame<'mir, 'tcx>> { + fn hash(&self, state: &mut H) { + // See `PartialEq` impl above for description of ignored fields. + let super::Frame { + body, + instance, + locals, + block, + stmt, + span, + return_to_block: _, + return_place: _, + extra: (), + } = &self.0; + + std::ptr::hash::, _>(*body, state); + instance.hash(state); + block.hash(state); + stmt.hash(state); + span.hash(state); - fn snapshot(&self, ctx: &'a Ctx) -> Self::Item { - let LocalState { value, layout: _ } = self; - value.snapshot(ctx) + for local in locals { + IgnoreAllocId(local).hash(state); + } } } -impl<'b, 'mir, 'tcx> SnapshotContext<'b> - for Memory<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>> -{ - fn resolve(&'b self, id: &AllocId) -> Option<&'b Allocation> { - self.get_raw(*id).ok() +impl PartialEq for IgnoreAllocId<'_, LocalState<'tcx>> { + fn eq(&self, IgnoreAllocId(other): &Self) -> bool { + // This *must* remain exhaustive to ensure that all fields are considered for equality. + let super::LocalState { + value, + layout: _, // Memoized result of the layout query. + } = self.0; + + match (value, &other.value) { + // Compare both `Operand`s with any `AllocId`s erased. + (LocalValue::Live(op), LocalValue::Live(other_op)) + => op.erase_alloc_id() == other_op.erase_alloc_id(), + + // Otherwise we can use the `PartialEq` impl of `LocalValue` directly. + _ => value == &other.value, + } } } -/// The virtual machine state during const-evaluation at a given point in time. -/// We assume the `CompileTimeInterpreter` has no interesting extra state that -/// is worth considering here. -#[derive(HashStable)] -struct InterpSnapshot<'mir, 'tcx> { - // Not hashing memory: Avoid hashing memory all the time during execution - #[stable_hasher(ignore)] - memory: Memory<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>, - stack: Vec>, +impl Hash for IgnoreAllocId<'_, LocalState<'tcx>> { + fn hash(&self, state: &mut H) { + // See `PartialEq` impl above for description of ignored fields. + let super::LocalState { + value, + layout: _, + } = self.0; + + match value { + LocalValue::Live(op) => op.erase_alloc_id().hash(state), + LocalValue::Dead | LocalValue::Uninitialized => value.hash(state), + } + } } -impl InterpSnapshot<'mir, 'tcx> { - fn new( - memory: &Memory<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>, - stack: &[Frame<'mir, 'tcx>], - ) -> Self { - InterpSnapshot { - memory: memory.clone(), - stack: stack.into(), +impl PartialEq for IgnoreAllocId<'_, Allocation> { + fn eq(&self, IgnoreAllocId(other): &Self) -> bool { + // `Allocation` has private fields, so we cannot exhaustively match on it. If `Allocation` + // is updated, make sure to update the `IgnoreAllocId` impls for `PartialEq` and `Hash` + // as well. + let super::Allocation { + size, + align, + mutability, + extra: (), + .. + } = *self.0; + + let undef_mask = self.0.undef_mask(); + let relocations = self.0.relocations(); + let is_eq = align == other.align + && size == other.size + && mutability == other.mutability + && undef_mask == other.undef_mask() + && relocations.keys().eq(other.relocations().keys()); + + if !is_eq { + return false; } - } - // Used to compare two snapshots - fn snapshot(&'b self) - -> Vec> - { - // Start with the stack, iterate and recursively snapshot - self.stack.iter().map(|frame| frame.snapshot(&self.memory)).collect() + // We now know that the two allocations have identical sizes and undef masks. We rely + // on this below. + debug_assert_eq!(undef_mask, other.undef_mask()); + debug_assert_eq!(size, other.size); + + let len = self.0.len(); + let bytes = self.0.inspect_with_undef_and_ptr_outside_interpreter(0..len); + let other_bytes = other.inspect_with_undef_and_ptr_outside_interpreter(0..len); + + // In the common case, neither allocation will have undefined bytes, and we can compare + // the bytes directly for equality. + if undef_mask.is_range_defined(Size::ZERO, size).is_ok() { + return bytes == other_bytes; + } + + // Otherwise, we only compare the bytes that are defined. + // + // FIXME(ecstaticmorse): This is slow. Add a method to `UndefMask` to iterate over each + // range of defined bytes. + (0..len) + .filter(|&i| undef_mask.get(Size::from_bytes(u64::try_from(i).unwrap()))) + .all(|i| bytes[i] == other_bytes[i]) } } -impl<'mir, 'tcx> Hash for InterpSnapshot<'mir, 'tcx> { +impl Hash for IgnoreAllocId<'_, Allocation> { fn hash(&self, state: &mut H) { - // Implement in terms of hash stable, so that k1 == k2 -> hash(k1) == hash(k2) - let mut hcx = self.memory.tcx.get_stable_hashing_context(); - let mut hasher = StableHasher::new(); - self.hash_stable(&mut hcx, &mut hasher); - hasher.finish::().hash(state) + // `Allocation` has private fields, so we cannot exhaustively match on it. If `Allocation` + // is updated, make sure to update the `IgnoreAllocId` impls for `PartialEq` and `Hash` + // as well. + let super::Allocation { + size, + align, + mutability, + extra: (), + .. + } = *self.0; + + let undef_mask = self.0.undef_mask(); + let relocations = self.0.relocations(); + + size.hash(state); + align.hash(state); + mutability.hash(state); + undef_mask.hash(state); + + // Hash the number of pointers in the `Allocation` as well as the offsets of those pointers + // within the allocation. + relocations.len().hash(state); + relocations.keys().for_each(|pos| pos.hash(state)); + + let len = self.0.len(); + let bytes = self.0.inspect_with_undef_and_ptr_outside_interpreter(0..len); + + // See `PartialEq` impl above. + if undef_mask.is_range_defined(Size::ZERO, size).is_ok() { + bytes.hash(state); + return; + } + + for i in 0..len { + if undef_mask.get(Size::from_bytes(u64::try_from(i).unwrap())) { + bytes[i].hash(state) + } + } + } +} + +/// Returns a `Vec` containing each `AllocId` that appears in a `Local` on the stack. +/// +/// A single `AllocId` may appear more than once. +fn stack_roots(stack: &[Frame<'_, '_>]) -> Vec { + let operands = stack + .iter() + .flat_map(|frame| frame.locals.iter()) + .filter_map(|local| match local.value { + LocalValue::Live(op) => Some(op), + LocalValue::Dead | LocalValue::Uninitialized => None, + }); + + let mut stack_roots = vec![]; + for op in operands { + let scalars = match op { + Operand::Immediate(Immediate::Scalar(a)) => [a.not_undef().ok(), None], + Operand::Immediate(Immediate::ScalarPair(a, b)) + => [a.not_undef().ok(), b.not_undef().ok()], + + Operand::Indirect(MemPlace { ptr, meta, align: _ }) => [Some(ptr), meta], + }; + + for scalar in scalars.iter() { + if let Some(ptr) = scalar.and_then(|s| s.to_ptr().ok()) { + stack_roots.push(ptr.alloc_id); + } + } } + + stack_roots } -impl<'mir, 'tcx> Eq for InterpSnapshot<'mir, 'tcx> {} +/// A mapping from `AllocId`s to the order they were encountered in the DFS. +#[derive(Default)] +struct DfsIndexForAllocId { + map: FxHashMap, + next_idx: u64, +} -impl<'mir, 'tcx> PartialEq for InterpSnapshot<'mir, 'tcx> { - fn eq(&self, other: &Self) -> bool { - // FIXME: This looks to be a *ridiculously expensive* comparison operation. - // Doesn't this make tons of copies? Either `snapshot` is very badly named, - // or it does! - self.snapshot() == other.snapshot() +impl DfsIndexForAllocId { + /// Returns a mapping for all `AllocId`s present on a given stack. + fn from_stack_roots(stack: &[Frame<'_, '_>]) -> Self { + let mut ret = Self::default(); + + for id in stack_roots(stack) { + ret.mark_visited(id); + } + + ret + } + + fn mark_visited(&mut self, id: AllocId) { + if self.map.insert(id, self.next_idx).is_none() { + self.next_idx += 1; + } + } + + fn get(&self, id: AllocId) -> Option { + self.map.get(&id).copied() } } From 26b914660fae1f1d25a9f17400447fe3e8fcfc81 Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Sun, 8 Dec 2019 11:33:58 -0800 Subject: [PATCH 06/10] Rename `is_empty` to `has_been_invoked` --- src/librustc_mir/const_eval.rs | 2 +- src/librustc_mir/interpret/snapshot.rs | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/librustc_mir/const_eval.rs b/src/librustc_mir/const_eval.rs index 3cacdbfe34dd1..981c442d41bcf 100644 --- a/src/librustc_mir/const_eval.rs +++ b/src/librustc_mir/const_eval.rs @@ -517,7 +517,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, // Emit a warning if this is the first time we have triggered the loop detector. This means // we have been executing for quite some time. let span = ecx.frame().span; - if ecx.machine.loop_detector.is_empty() { + if !ecx.machine.loop_detector.has_been_invoked() { // FIXME(#49980): make this warning a lint ecx.tcx.sess.span_warn(span, "Constant evaluating a complex constant, this might take some time"); diff --git a/src/librustc_mir/interpret/snapshot.rs b/src/librustc_mir/interpret/snapshot.rs index 94456e722d08c..69fd91501f14d 100644 --- a/src/librustc_mir/interpret/snapshot.rs +++ b/src/librustc_mir/interpret/snapshot.rs @@ -111,9 +111,8 @@ impl<'mir, 'tcx> InfiniteLoopDetector<'mir, 'tcx> { throw_exhaust!(InfiniteLoop) } - /// Returns `true` if the `InfiniteLoopDetector` has not yet been invoked. - pub fn is_empty(&self) -> bool { - self.hashes.is_empty() + pub fn has_been_invoked(&self) -> bool { + !self.hashes.is_empty() } } From 345a8d51b789843257c088932ea6c233d60126b2 Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Sun, 8 Dec 2019 11:37:12 -0800 Subject: [PATCH 07/10] Move `CtfeMemory` alias next to `CompileTimeInterpreter` --- src/librustc_mir/const_eval.rs | 4 +++- src/librustc_mir/interpret/snapshot.rs | 4 +--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/librustc_mir/const_eval.rs b/src/librustc_mir/const_eval.rs index 981c442d41bcf..a72a9288ca1bf 100644 --- a/src/librustc_mir/const_eval.rs +++ b/src/librustc_mir/const_eval.rs @@ -305,6 +305,8 @@ impl interpret::MayLeak for ! { } } +pub type CtfeMemory<'mir, 'tcx> = Memory<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>; + impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, 'tcx> { type MemoryKinds = !; type PointerTag = (); @@ -445,7 +447,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, } fn ptr_to_int( - _mem: &Memory<'mir, 'tcx, Self>, + _mem: &CtfeMemory<'mir, 'tcx>, _ptr: Pointer, ) -> InterpResult<'tcx, u64> { Err( diff --git a/src/librustc_mir/interpret/snapshot.rs b/src/librustc_mir/interpret/snapshot.rs index 69fd91501f14d..d1807adcd4b36 100644 --- a/src/librustc_mir/interpret/snapshot.rs +++ b/src/librustc_mir/interpret/snapshot.rs @@ -63,11 +63,9 @@ use rustc::ty::layout::Size; use rustc_data_structures::fx::{FxHasher, FxHashMap, FxHashSet}; use super::{memory, AllocId, Frame, Immediate, MemPlace, Operand}; -use crate::const_eval::CompileTimeInterpreter; +use crate::const_eval::{CompileTimeInterpreter, CtfeMemory}; use crate::interpret::{LocalState, LocalValue}; -type CtfeMemory<'mir, 'tcx> = Memory<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>; - #[derive(Default)] pub(crate) struct InfiniteLoopDetector<'mir, 'tcx> { /// The set of all `InterpSnapshot` *hashes* observed by this detector. From e017f57b7f61c2483c4eb47c875eb6b1ef17c1b9 Mon Sep 17 00:00:00 2001 From: ecstatic-morse Date: Mon, 9 Dec 2019 12:56:48 -0800 Subject: [PATCH 08/10] Fix typo Co-Authored-By: Oliver Scherer --- src/librustc_mir/interpret/snapshot.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/librustc_mir/interpret/snapshot.rs b/src/librustc_mir/interpret/snapshot.rs index d1807adcd4b36..b938703a2964e 100644 --- a/src/librustc_mir/interpret/snapshot.rs +++ b/src/librustc_mir/interpret/snapshot.rs @@ -1,7 +1,7 @@ //! Capture and compare snapshots of the compile-time interpreter state to detect when a program //! will loop infinitely. //! -//! This lives in `interpret` because it needs access to all sots of private state. However, +//! This lives in `interpret` because it needs access to all sorts of private state. However, //! it is not used by the general miri engine, just by CTFE. //! //! # Comparing interpreter state From afb4faaa636bd775994cff7be8db60b826209bd8 Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Mon, 9 Dec 2019 13:03:51 -0800 Subject: [PATCH 09/10] Hash discriminant of `LocalValue` --- src/librustc_mir/interpret/snapshot.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/librustc_mir/interpret/snapshot.rs b/src/librustc_mir/interpret/snapshot.rs index b938703a2964e..90e3f901bbf59 100644 --- a/src/librustc_mir/interpret/snapshot.rs +++ b/src/librustc_mir/interpret/snapshot.rs @@ -344,6 +344,7 @@ impl Hash for IgnoreAllocId<'_, LocalState<'tcx>> { layout: _, } = self.0; + std::mem::discriminant(value).hash(state); match value { LocalValue::Live(op) => op.erase_alloc_id().hash(state), LocalValue::Dead | LocalValue::Uninitialized => value.hash(state), From ba22627cb0db6fb5ba7ec505c8c3054e814b94ec Mon Sep 17 00:00:00 2001 From: Dylan MacKenzie Date: Mon, 9 Dec 2019 13:31:17 -0800 Subject: [PATCH 10/10] Ensure promotion does not prevent generation of new `AllocId`s --- src/librustc_mir/interpret/snapshot.rs | 10 +++------- src/test/ui/consts/const-eval/issue-52475.rs | 12 ++++++++---- src/test/ui/consts/const-eval/issue-52475.stderr | 14 ++++++++------ 3 files changed, 19 insertions(+), 17 deletions(-) diff --git a/src/librustc_mir/interpret/snapshot.rs b/src/librustc_mir/interpret/snapshot.rs index 90e3f901bbf59..56011c8c7b9f8 100644 --- a/src/librustc_mir/interpret/snapshot.rs +++ b/src/librustc_mir/interpret/snapshot.rs @@ -18,14 +18,10 @@ //! //! ```rust,ignore(const-loop) //! const fn inf_loop() { -//! // Function call to prevent promotion. -//! const fn zeros() -> [isize; 4] { -//! [0; 4] -//! } -//! //! loop { -//! let arr = &zeros(); -//! if false { +//! let arr = [0isize; 4]; +//! let parr = &arr; +//! if parr[0] != 0 { //! break; //! } //! } diff --git a/src/test/ui/consts/const-eval/issue-52475.rs b/src/test/ui/consts/const-eval/issue-52475.rs index 3788167f44902..2f5c61f57b0c8 100644 --- a/src/test/ui/consts/const-eval/issue-52475.rs +++ b/src/test/ui/consts/const-eval/issue-52475.rs @@ -1,12 +1,16 @@ fn main() { let _ = [(); { //~^ WARNING Constant evaluating a complex constant, this might take some time - let mut x = &0; let mut n = 0; while n < 5 { - //~^ ERROR `while` is not allowed in a `const` - n = (n + 1) % 5; //~ ERROR evaluation of constant value failed - x = &0; // Materialize a new AllocId + //~^ ERROR `while` is not allowed in a `const` + //~| ERROR evaluation of constant value failed + n = (n + 1) % 5; + + // Materialize a new AllocId. We need to make sure that `px` doesn't get promoted out. + let x = [0isize; 4]; + let px = &x; + n += px[0]; } 0 }]; diff --git a/src/test/ui/consts/const-eval/issue-52475.stderr b/src/test/ui/consts/const-eval/issue-52475.stderr index b8267f495de94..9a602a3fbaa16 100644 --- a/src/test/ui/consts/const-eval/issue-52475.stderr +++ b/src/test/ui/consts/const-eval/issue-52475.stderr @@ -1,10 +1,12 @@ error[E0744]: `while` is not allowed in a `const` - --> $DIR/issue-52475.rs:6:9 + --> $DIR/issue-52475.rs:5:9 | LL | / while n < 5 { LL | | +LL | | LL | | n = (n + 1) % 5; -LL | | x = &0; // Materialize a new AllocId +... | +LL | | n += px[0]; LL | | } | |_________^ @@ -14,18 +16,18 @@ warning: Constant evaluating a complex constant, this might take some time LL | let _ = [(); { | __________________^ LL | | -LL | | let mut x = &0; LL | | let mut n = 0; +LL | | while n < 5 { ... | LL | | 0 LL | | }]; | |_____^ error[E0080]: evaluation of constant value failed - --> $DIR/issue-52475.rs:8:17 + --> $DIR/issue-52475.rs:5:19 | -LL | n = (n + 1) % 5; - | ^^^^^^^^^^^ duplicate interpreter state observed here, const evaluation will never terminate +LL | while n < 5 { + | ^ duplicate interpreter state observed here, const evaluation will never terminate error: aborting due to 2 previous errors