From 15ec8f7c521461bfa64729d859e64f5415ddb4fd Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Sat, 2 Nov 2019 17:46:11 +0100 Subject: [PATCH] rename Memory::get methods to get_raw to indicate their unchecked nature --- .../interpret/intrinsics/caller_location.rs | 3 +- src/librustc_mir/interpret/memory.rs | 32 ++++++++++-------- src/librustc_mir/interpret/operand.rs | 6 ++-- src/librustc_mir/interpret/place.rs | 6 ++-- src/librustc_mir/interpret/snapshot.rs | 2 +- src/librustc_mir/interpret/terminator.rs | 2 +- src/librustc_mir/interpret/traits.rs | 33 ++++++++----------- src/librustc_mir/interpret/validity.rs | 5 ++- 8 files changed, 46 insertions(+), 43 deletions(-) diff --git a/src/librustc_mir/interpret/intrinsics/caller_location.rs b/src/librustc_mir/interpret/intrinsics/caller_location.rs index 249d2f9ff536a..88bfcd63129fa 100644 --- a/src/librustc_mir/interpret/intrinsics/caller_location.rs +++ b/src/librustc_mir/interpret/intrinsics/caller_location.rs @@ -37,7 +37,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let col_out = self.force_ptr(self.mplace_field(location, 2)?.ptr)?; let layout = &self.tcx.data_layout; - let alloc = self.memory.get_mut(file_ptr_out.alloc_id)?; + // We just allocated this, so we can skip the bounds checks. + let alloc = self.memory.get_raw_mut(file_ptr_out.alloc_id)?; alloc.write_scalar(layout, file_ptr_out, file.into(), ptr_size)?; alloc.write_scalar(layout, file_len_out, file_len.into(), ptr_size)?; diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 47b918248330a..6d4ef93aafafa 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -210,7 +210,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { let new_ptr = self.allocate(new_size, new_align, kind); let old_size = match old_size_and_align { Some((size, _align)) => size, - None => self.get(ptr.alloc_id)?.size, + None => self.get_raw(ptr.alloc_id)?.size, }; self.copy( ptr, @@ -480,7 +480,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { ).0) } - pub fn get( + /// Gives raw access to the `Allocation`, without bounds or alignment checks. + /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCtx` instead! + pub fn get_raw( &self, id: AllocId, ) -> InterpResult<'tcx, &Allocation> { @@ -513,7 +515,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { } } - pub fn get_mut( + /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks. + /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCtx` instead! + pub fn get_raw_mut( &mut self, id: AllocId, ) -> InterpResult<'tcx, &mut Allocation> { @@ -555,7 +559,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { liveness: AllocCheck, ) -> InterpResult<'static, (Size, Align)> { // # Regular allocations - // Don't use `self.get` here as that will + // Don't use `self.get_raw` here as that will // a) cause cycles in case `id` refers to a static // b) duplicate a static's allocation in miri if let Some((_, alloc)) = self.alloc_map.get(id) { @@ -627,7 +631,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { } pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> { - self.get_mut(id)?.mutability = Mutability::Immutable; + self.get_raw_mut(id)?.mutability = Mutability::Immutable; Ok(()) } @@ -776,7 +780,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { Some(ptr) => ptr, None => return Ok(&[]), // zero-sized access }; - self.get(ptr.alloc_id)?.get_bytes(self, ptr, size) + self.get_raw(ptr.alloc_id)?.get_bytes(self, ptr, size) } /// Reads a 0-terminated sequence of bytes from memory. Returns them as a slice. @@ -784,7 +788,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { /// Performs appropriate bounds checks. pub fn read_c_str(&self, ptr: Scalar) -> InterpResult<'tcx, &[u8]> { let ptr = self.force_ptr(ptr)?; // We need to read at least 1 byte, so we *need* a ptr. - self.get(ptr.alloc_id)?.read_c_str(self, ptr) + self.get_raw(ptr.alloc_id)?.read_c_str(self, ptr) } /// Writes the given stream of bytes into memory. @@ -804,7 +808,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { None => return Ok(()), // zero-sized access }; let tcx = self.tcx.tcx; - self.get_mut(ptr.alloc_id)?.write_bytes(&tcx, ptr, src) + self.get_raw_mut(ptr.alloc_id)?.write_bytes(&tcx, ptr, src) } /// Expects the caller to have checked bounds and alignment. @@ -832,16 +836,16 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { // since we don't want to keep any relocations at the target. // (`get_bytes_with_undef_and_ptr` below checks that there are no // relocations overlapping the edges; those would not be handled correctly). - let relocations = self.get(src.alloc_id)? + let relocations = self.get_raw(src.alloc_id)? .prepare_relocation_copy(self, src, size, dest, length); let tcx = self.tcx.tcx; // This checks relocation edges on the src. - let src_bytes = self.get(src.alloc_id)? + let src_bytes = self.get_raw(src.alloc_id)? .get_bytes_with_undef_and_ptr(&tcx, src, size)? .as_ptr(); - let dest_bytes = self.get_mut(dest.alloc_id)? + let dest_bytes = self.get_raw_mut(dest.alloc_id)? .get_bytes_mut(&tcx, dest, size * length)? .as_mut_ptr(); @@ -880,7 +884,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { // copy definedness to the destination self.copy_undef_mask(src, dest, size, length)?; // copy the relocations to the destination - self.get_mut(dest.alloc_id)?.mark_relocation_range(relocations); + self.get_raw_mut(dest.alloc_id)?.mark_relocation_range(relocations); Ok(()) } @@ -899,11 +903,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { // The bits have to be saved locally before writing to dest in case src and dest overlap. assert_eq!(size.bytes() as usize as u64, size.bytes()); - let src_alloc = self.get(src.alloc_id)?; + let src_alloc = self.get_raw(src.alloc_id)?; let compressed = src_alloc.compress_undef_range(src, size); // now fill in all the data - let dest_allocation = self.get_mut(dest.alloc_id)?; + let dest_allocation = self.get_raw_mut(dest.alloc_id)?; dest_allocation.mark_compressed_undef_range(&compressed, dest, size, repeat); Ok(()) diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs index d80ad3848d20a..79762b87b0a85 100644 --- a/src/librustc_mir/interpret/operand.rs +++ b/src/librustc_mir/interpret/operand.rs @@ -248,7 +248,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { match mplace.layout.abi { layout::Abi::Scalar(..) => { let scalar = self.memory - .get(ptr.alloc_id)? + .get_raw(ptr.alloc_id)? .read_scalar(self, ptr, mplace.layout.size)?; Ok(Some(ImmTy { imm: scalar.into(), @@ -266,10 +266,10 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields let b_ptr = ptr.offset(b_offset, self)?; let a_val = self.memory - .get(ptr.alloc_id)? + .get_raw(ptr.alloc_id)? .read_scalar(self, a_ptr, a_size)?; let b_val = self.memory - .get(ptr.alloc_id)? + .get_raw(ptr.alloc_id)? .read_scalar(self, b_ptr, b_size)?; Ok(Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index 36e58d356d100..04effc2ea2de6 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -808,7 +808,7 @@ where _ => bug!("write_immediate_to_mplace: invalid Scalar layout: {:#?}", dest.layout) } - self.memory.get_mut(ptr.alloc_id)?.write_scalar( + self.memory.get_raw_mut(ptr.alloc_id)?.write_scalar( tcx, ptr, scalar, dest.layout.size ) } @@ -830,10 +830,10 @@ where // fields do not match the `ScalarPair` components. self.memory - .get_mut(ptr.alloc_id)? + .get_raw_mut(ptr.alloc_id)? .write_scalar(tcx, ptr, a_val, a_size)?; self.memory - .get_mut(b_ptr.alloc_id)? + .get_raw_mut(b_ptr.alloc_id)? .write_scalar(tcx, b_ptr, b_val, b_size) } } diff --git a/src/librustc_mir/interpret/snapshot.rs b/src/librustc_mir/interpret/snapshot.rs index 7ce151e087a6b..1df98f079cc10 100644 --- a/src/librustc_mir/interpret/snapshot.rs +++ b/src/librustc_mir/interpret/snapshot.rs @@ -392,7 +392,7 @@ impl<'b, 'mir, 'tcx> SnapshotContext<'b> for Memory<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>> { fn resolve(&'b self, id: &AllocId) -> Option<&'b Allocation> { - self.get(*id).ok() + self.get_raw(*id).ok() } } diff --git a/src/librustc_mir/interpret/terminator.rs b/src/librustc_mir/interpret/terminator.rs index d90f2058aa74f..e10bb85d52df8 100644 --- a/src/librustc_mir/interpret/terminator.rs +++ b/src/librustc_mir/interpret/terminator.rs @@ -445,7 +445,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ptr_size, self.tcx.data_layout.pointer_align.abi, )?.expect("cannot be a ZST"); - let fn_ptr = self.memory.get(vtable_slot.alloc_id)? + let fn_ptr = self.memory.get_raw(vtable_slot.alloc_id)? .read_ptr_sized(self, vtable_slot)?.not_undef()?; let drop_fn = self.memory.get_fn(fn_ptr)?; diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs index 10b767ebba191..c15425321ec01 100644 --- a/src/librustc_mir/interpret/traits.rs +++ b/src/librustc_mir/interpret/traits.rs @@ -63,35 +63,30 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let drop = Instance::resolve_drop_in_place(*tcx, ty); let drop = self.memory.create_fn_alloc(FnVal::Instance(drop)); - // no need to do any alignment checks on the memory accesses below, because we know the + // No need to do any alignment checks on the memory accesses below, because we know the // allocation is correctly aligned as we created it above. Also we're only offsetting by // multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`. - self.memory - .get_mut(vtable.alloc_id)? - .write_ptr_sized(tcx, vtable, Scalar::Ptr(drop).into())?; - - let size_ptr = vtable.offset(ptr_size, self)?; - self.memory - .get_mut(size_ptr.alloc_id)? - .write_ptr_sized(tcx, size_ptr, Scalar::from_uint(size, ptr_size).into())?; - let align_ptr = vtable.offset(ptr_size * 2, self)?; - self.memory - .get_mut(align_ptr.alloc_id)? - .write_ptr_sized(tcx, align_ptr, Scalar::from_uint(align, ptr_size).into())?; + let vtable_alloc = self.memory.get_raw_mut(vtable.alloc_id)?; + vtable_alloc.write_ptr_sized(tcx, vtable, Scalar::Ptr(drop).into())?; + + let size_ptr = vtable.offset(ptr_size, tcx)?; + vtable_alloc.write_ptr_sized(tcx, size_ptr, Scalar::from_uint(size, ptr_size).into())?; + let align_ptr = vtable.offset(ptr_size * 2, tcx)?; + vtable_alloc.write_ptr_sized(tcx, align_ptr, Scalar::from_uint(align, ptr_size).into())?; for (i, method) in methods.iter().enumerate() { if let Some((def_id, substs)) = *method { // resolve for vtable: insert shims where needed let instance = ty::Instance::resolve_for_vtable( - *self.tcx, + *tcx, self.param_env, def_id, substs, ).ok_or_else(|| err_inval!(TooGeneric))?; let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance)); - let method_ptr = vtable.offset(ptr_size * (3 + i as u64), self)?; - self.memory - .get_mut(method_ptr.alloc_id)? + // We cannot use `vtable_allic` as we are creating fn ptrs in this loop. + let method_ptr = vtable.offset(ptr_size * (3 + i as u64), tcx)?; + self.memory.get_raw_mut(vtable.alloc_id)? .write_ptr_sized(tcx, method_ptr, Scalar::Ptr(fn_ptr).into())?; } } @@ -114,7 +109,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { self.tcx.data_layout.pointer_align.abi, )?.expect("cannot be a ZST"); let drop_fn = self.memory - .get(vtable.alloc_id)? + .get_raw(vtable.alloc_id)? .read_ptr_sized(self, vtable)? .not_undef()?; // We *need* an instance here, no other kind of function value, to be able @@ -140,7 +135,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { 3*pointer_size, self.tcx.data_layout.pointer_align.abi, )?.expect("cannot be a ZST"); - let alloc = self.memory.get(vtable.alloc_id)?; + let alloc = self.memory.get_raw(vtable.alloc_id)?; let size = alloc.read_ptr_sized( self, vtable.offset(pointer_size, self)? diff --git a/src/librustc_mir/interpret/validity.rs b/src/librustc_mir/interpret/validity.rs index 8cb2f6c3462cc..dedccbaa3d7f4 100644 --- a/src/librustc_mir/interpret/validity.rs +++ b/src/librustc_mir/interpret/validity.rs @@ -586,6 +586,8 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> _ => false, } } => { + // Optimized handling for arrays of integer/float type. + // bailing out for zsts is ok, since the array element type can only be int/float if op.layout.is_zst() { return Ok(()); @@ -605,6 +607,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> // Size is not 0, get a pointer. let ptr = self.ecx.force_ptr(mplace.ptr)?; + // This is the optimization: we just check the entire range at once. // NOTE: Keep this in sync with the handling of integer and float // types above, in `visit_primitive`. // In run-time mode, we accept pointers in here. This is actually more @@ -614,7 +617,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> // to reject those pointers, we just do not have the machinery to // talk about parts of a pointer. // We also accept undef, for consistency with the slow path. - match self.ecx.memory.get(ptr.alloc_id)?.check_bytes( + match self.ecx.memory.get_raw(ptr.alloc_id)?.check_bytes( self.ecx, ptr, size,