diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index a165fa23f30ac..9ae50d0df80d7 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -892,8 +892,11 @@ impl<'tcx, 'a, Tag: Provenance, Extra> AllocRefMut<'a, 'tcx, Tag, Extra> { } /// Mark the entire referenced range as uninitalized - pub fn write_uninit(&mut self) { - self.alloc.mark_init(self.range, false); + pub fn write_uninit(&mut self) -> InterpResult<'tcx> { + Ok(self + .alloc + .write_uninit(&self.tcx, self.range) + .map_err(|e| e.to_interp_error(self.alloc_id))?) } } @@ -1053,8 +1056,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // This also avoids writing to the target bytes so that the backing allocation is never // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary // operating system this can avoid physically allocating the page. - dest_alloc.mark_init(dest_range, false); // `Size` multiplication - dest_alloc.mark_relocation_range(relocations); + dest_alloc + .write_uninit(&tcx, dest_range) + .map_err(|e| e.to_interp_error(dest_alloc_id))?; + // We can forget about the relocations, this is all not initialized anyway. return Ok(()); } diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index 31da4522a1fda..e4660fe090ce5 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -823,7 +823,7 @@ where // Zero-sized access return Ok(()); }; - alloc.write_uninit(); + alloc.write_uninit()?; Ok(()) } diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs index 438f356f072c6..7723f7a64f769 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs @@ -269,7 +269,7 @@ impl Allocation { /// `get_bytes_with_uninit_and_ptr` instead, /// /// This function also guarantees that the resulting pointer will remain stable - /// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies + /// even when new allocations are pushed to the `HashMap`. `mem_copy_repeatedly` relies /// on that. /// /// It is the caller's responsibility to check bounds and alignment beforehand. @@ -429,8 +429,7 @@ impl Allocation { let val = match val { ScalarMaybeUninit::Scalar(scalar) => scalar, ScalarMaybeUninit::Uninit => { - self.mark_init(range, false); - return Ok(()); + return self.write_uninit(cx, range); } }; @@ -455,6 +454,13 @@ impl Allocation { Ok(()) } + + /// Write "uninit" to the given memory range. + pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult { + self.mark_init(range, false); + self.clear_relocations(cx, range)?; + return Ok(()); + } } /// Relocations. @@ -561,8 +567,10 @@ impl Deref for Relocations { } /// A partial, owned list of relocations to transfer into another allocation. +/// +/// Offsets are already adjusted to the destination allocation. pub struct AllocationRelocations { - relative_relocations: Vec<(Size, Tag)>, + dest_relocations: Vec<(Size, Tag)>, } impl Allocation { @@ -575,12 +583,17 @@ impl Allocation { ) -> AllocationRelocations { let relocations = self.get_relocations(cx, src); if relocations.is_empty() { - return AllocationRelocations { relative_relocations: Vec::new() }; + return AllocationRelocations { dest_relocations: Vec::new() }; } let size = src.size; let mut new_relocations = Vec::with_capacity(relocations.len() * (count as usize)); + // If `count` is large, this is rather wasteful -- we are allocating a big array here, which + // is mostly filled with redundant information since it's just N copies of the same `Tag`s + // at slightly adjusted offsets. The reason we do this is so that in `mark_relocation_range` + // we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces + // the right sequence of relocations for all N copies. for i in 0..count { new_relocations.extend(relocations.iter().map(|&(offset, reloc)| { // compute offset for current repetition @@ -593,14 +606,17 @@ impl Allocation { })); } - AllocationRelocations { relative_relocations: new_relocations } + AllocationRelocations { dest_relocations: new_relocations } } /// Applies a relocation copy. /// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected /// to be clear of relocations. + /// + /// This is dangerous to use as it can violate internal `Allocation` invariants! + /// It only exists to support an efficient implementation of `mem_copy_repeatedly`. pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations) { - self.relocations.0.insert_presorted(relocations.relative_relocations); + self.relocations.0.insert_presorted(relocations.dest_relocations); } } @@ -1056,7 +1072,7 @@ impl Allocation { }) } - pub fn mark_init(&mut self, range: AllocRange, is_init: bool) { + fn mark_init(&mut self, range: AllocRange, is_init: bool) { if range.size.bytes() == 0 { return; } @@ -1118,6 +1134,9 @@ impl Allocation { } /// Applies multiple instances of the run-length encoding to the initialization mask. + /// + /// This is dangerous to use as it can violate internal `Allocation` invariants! + /// It only exists to support an efficient implementation of `mem_copy_repeatedly`. pub fn mark_compressed_init_range( &mut self, defined: &InitMaskCompressed, diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs index 69dac03883940..b7f695da544f1 100644 --- a/compiler/rustc_middle/src/mir/pretty.rs +++ b/compiler/rustc_middle/src/mir/pretty.rs @@ -851,6 +851,7 @@ fn write_allocation_bytes<'tcx, Tag: Provenance, Extra>( } if let Some(&tag) = alloc.relocations().get(&i) { // Memory with a relocation must be defined + assert!(alloc.init_mask().is_range_initialized(i, i + ptr_size).is_ok()); let j = i.bytes_usize(); let offset = alloc .inspect_with_uninit_and_ptr_outside_interpreter(j..j + ptr_size.bytes_usize());