Skip to content

Commit

Permalink
Add check_mplace_ptr convenience method; provide ptr-normalization me…
Browse files Browse the repository at this point in the history
…thods for mplace and op to avoid repeated int-to-ptr casting during validation.

Also change Memory::copy to work on `Pointer` instead of `Scalar`.
Also rename some methods from to_* to assert_* that will panic if their precondition is not met.
  • Loading branch information
RalfJung committed Jul 6, 2019
1 parent eed52de commit 9007296
Show file tree
Hide file tree
Showing 8 changed files with 108 additions and 66 deletions.
2 changes: 1 addition & 1 deletion src/librustc_mir/const_eval.rs
Expand Up @@ -109,7 +109,7 @@ fn op_to_const<'tcx>(
// `Immediate` is when we are called from `const_field`, and that `Immediate`
// comes from a constant so it can happen have `Undef`, because the indirect
// memory that was read had undefined bytes.
let mplace = op.to_mem_place();
let mplace = op.assert_mem_place();
let ptr = mplace.ptr.to_ptr().unwrap();
let alloc = ecx.tcx.alloc_map.lock().unwrap_memory(ptr.alloc_id);
ConstValue::ByRef { offset: ptr.offset, align: mplace.align, alloc }
Expand Down
36 changes: 12 additions & 24 deletions src/librustc_mir/interpret/memory.rs
Expand Up @@ -214,10 +214,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
None => Size::from_bytes(self.get(ptr.alloc_id)?.bytes.len() as u64),
};
self.copy(
ptr.into(),
Align::from_bytes(1).unwrap(), // old_align anyway gets checked below by `deallocate`
new_ptr.into(),
new_align,
ptr,
new_ptr,
old_size.min(new_size),
/*nonoverlapping*/ true,
)?;
Expand Down Expand Up @@ -310,6 +308,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// `Pointer` they need. And even if you already have a `Pointer`, call this method
/// to make sure it is sufficiently aligned and not dangling. Not doing that may
/// cause ICEs.
///
/// Most of the time you should use `check_mplace_access`, but when you just have a pointer,
/// this method is still appropriate.
pub fn check_ptr_access(
&self,
sptr: Scalar<M::PointerTag>,
Expand Down Expand Up @@ -751,39 +752,26 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
self.get(ptr.alloc_id)?.read_c_str(self, ptr)
}

/// Performs appropriate bounds checks.
/// Expects the caller to have checked bounds and alignment.
pub fn copy(
&mut self,
src: Scalar<M::PointerTag>,
src_align: Align,
dest: Scalar<M::PointerTag>,
dest_align: Align,
src: Pointer<M::PointerTag>,
dest: Pointer<M::PointerTag>,
size: Size,
nonoverlapping: bool,
) -> InterpResult<'tcx> {
self.copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
self.copy_repeatedly(src, dest, size, 1, nonoverlapping)
}

/// Performs appropriate bounds checks.
/// Expects the caller to have checked bounds and alignment.
pub fn copy_repeatedly(
&mut self,
src: Scalar<M::PointerTag>,
src_align: Align,
dest: Scalar<M::PointerTag>,
dest_align: Align,
src: Pointer<M::PointerTag>,
dest: Pointer<M::PointerTag>,
size: Size,
length: u64,
nonoverlapping: bool,
) -> InterpResult<'tcx> {
// We need to check *both* before early-aborting due to the size being 0.
let (src, dest) = match (self.check_ptr_access(src, size, src_align)?,
self.check_ptr_access(dest, size * length, dest_align)?)
{
(Some(src), Some(dest)) => (src, dest),
// One of the two sizes is 0.
_ => return Ok(()),
};

// first copy the relocations to a temporary buffer, because
// `get_bytes_mut` will clear the relocations, which is correct,
// since we don't want to keep any relocations at the target.
Expand Down
26 changes: 19 additions & 7 deletions src/librustc_mir/interpret/operand.rs
Expand Up @@ -123,23 +123,23 @@ pub enum Operand<Tag=(), Id=AllocId> {

impl<Tag> Operand<Tag> {
#[inline]
pub fn to_mem_place(self) -> MemPlace<Tag>
pub fn assert_mem_place(self) -> MemPlace<Tag>
where Tag: ::std::fmt::Debug
{
match self {
Operand::Indirect(mplace) => mplace,
_ => bug!("to_mem_place: expected Operand::Indirect, got {:?}", self),
_ => bug!("assert_mem_place: expected Operand::Indirect, got {:?}", self),

}
}

#[inline]
pub fn to_immediate(self) -> Immediate<Tag>
pub fn assert_immediate(self) -> Immediate<Tag>
where Tag: ::std::fmt::Debug
{
match self {
Operand::Immediate(imm) => imm,
_ => bug!("to_immediate: expected Operand::Immediate, got {:?}", self),
_ => bug!("assert_immediate: expected Operand::Immediate, got {:?}", self),

}
}
Expand Down Expand Up @@ -214,6 +214,19 @@ pub(super) fn from_known_layout<'tcx>(
}

impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Normalice `place.ptr` to a `Pointer` if this is a place and not a ZST.
/// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot.
#[inline]
pub fn normalize_op_ptr(
&self,
op: OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
match op.try_as_mplace() {
Ok(mplace) => Ok(self.normalize_mplace_ptr(mplace)?.into()),
Err(imm) => Ok(imm.into()), // Nothing to normalize
}
}

/// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
/// Returns `None` if the layout does not permit loading this as a value.
fn try_read_immediate_from_mplace(
Expand All @@ -224,9 +237,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Don't touch unsized
return Ok(None);
}
let (ptr, ptr_align) = mplace.to_scalar_ptr_align();

let ptr = match self.memory.check_ptr_access(ptr, mplace.layout.size, ptr_align)? {
let ptr = match self.check_mplace_access(mplace, None)? {
Some(ptr) => ptr,
None => return Ok(Some(ImmTy { // zero-sized type
imm: Immediate::Scalar(Scalar::zst().into()),
Expand Down Expand Up @@ -396,7 +408,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} else {
// The rest should only occur as mplace, we do not use Immediates for types
// allowing such operations. This matches place_projection forcing an allocation.
let mplace = base.to_mem_place();
let mplace = base.assert_mem_place();
self.mplace_projection(mplace, proj_elem)?.into()
}
})
Expand Down
72 changes: 55 additions & 17 deletions src/librustc_mir/interpret/place.rs
Expand Up @@ -230,6 +230,7 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> {
}
}

// These are defined here because they produce a place.
impl<'tcx, Tag: ::std::fmt::Debug + Copy> OpTy<'tcx, Tag> {
#[inline(always)]
pub fn try_as_mplace(self) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>> {
Expand All @@ -240,7 +241,7 @@ impl<'tcx, Tag: ::std::fmt::Debug + Copy> OpTy<'tcx, Tag> {
}

#[inline(always)]
pub fn to_mem_place(self) -> MPlaceTy<'tcx, Tag> {
pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> {
self.try_as_mplace().unwrap()
}
}
Expand All @@ -263,29 +264,29 @@ impl<'tcx, Tag: ::std::fmt::Debug> Place<Tag> {
}

#[inline]
pub fn to_mem_place(self) -> MemPlace<Tag> {
pub fn assert_mem_place(self) -> MemPlace<Tag> {
match self {
Place::Ptr(mplace) => mplace,
_ => bug!("to_mem_place: expected Place::Ptr, got {:?}", self),
_ => bug!("assert_mem_place: expected Place::Ptr, got {:?}", self),

}
}

#[inline]
pub fn to_scalar_ptr_align(self) -> (Scalar<Tag>, Align) {
self.to_mem_place().to_scalar_ptr_align()
self.assert_mem_place().to_scalar_ptr_align()
}

#[inline]
pub fn to_ptr(self) -> InterpResult<'tcx, Pointer<Tag>> {
self.to_mem_place().to_ptr()
self.assert_mem_place().to_ptr()
}
}

impl<'tcx, Tag: ::std::fmt::Debug> PlaceTy<'tcx, Tag> {
#[inline]
pub fn to_mem_place(self) -> MPlaceTy<'tcx, Tag> {
MPlaceTy { mplace: self.place.to_mem_place(), layout: self.layout }
pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> {
MPlaceTy { mplace: self.place.assert_mem_place(), layout: self.layout }
}
}

Expand Down Expand Up @@ -322,8 +323,8 @@ where
Ok(MPlaceTy { mplace, layout })
}

// Take an operand, representing a pointer, and dereference it to a place -- that
// will always be a MemPlace. Lives in `place.rs` because it creates a place.
/// Take an operand, representing a pointer, and dereference it to a place -- that
/// will always be a MemPlace. Lives in `place.rs` because it creates a place.
pub fn deref_operand(
&self,
src: OpTy<'tcx, M::PointerTag>,
Expand All @@ -333,6 +334,38 @@ where
self.ref_to_mplace(val)
}

/// Check if the given place is good for memory access with the given
/// size, falling back to the layout's size if `None` (in the latter case,
/// this must be a statically sized type).
///
/// On success, returns `None` for zero-sized accesses (where nothing else is
/// left to do) and a `Pointer` to use for the actual access otherwise.
#[inline]
pub fn check_mplace_access(
&self,
place: MPlaceTy<'tcx, M::PointerTag>,
size: Option<Size>,
) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
let size = size.unwrap_or_else(|| {
assert!(!place.layout.is_unsized());
assert!(place.meta.is_none());
place.layout.size
});
self.memory.check_ptr_access(place.ptr, size, place.align)
}

/// Normalice `place.ptr` to a `Pointer` if this is not a ZST.
/// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot.
pub fn normalize_mplace_ptr(
&self,
mut place: MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
if !place.layout.is_zst() {
place.mplace.ptr = self.force_ptr(place.mplace.ptr)?.into();
}
Ok(place)
}

/// Offset a pointer to project to a field. Unlike `place_field`, this is always
/// possible without allocating, so it can take `&self`. Also return the field's layout.
/// This supports both struct and array fields.
Expand Down Expand Up @@ -741,14 +774,12 @@ where
value: Immediate<M::PointerTag>,
dest: MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
let (ptr, ptr_align) = dest.to_scalar_ptr_align();
// Note that it is really important that the type here is the right one, and matches the
// type things are read at. In case `src_val` is a `ScalarPair`, we don't do any magic here
// to handle padding properly, which is only correct if we never look at this data with the
// wrong type.
assert!(!dest.layout.is_unsized());

let ptr = match self.memory.check_ptr_access(ptr, dest.layout.size, ptr_align)? {
let ptr = match self.check_mplace_access(dest, None)? {
Some(ptr) => ptr,
None => return Ok(()), // zero-sized access
};
Expand Down Expand Up @@ -850,14 +881,21 @@ where
dest.layout.size
});
assert_eq!(src.meta, dest.meta, "Can only copy between equally-sized instances");

let src = self.check_mplace_access(src, Some(size))?;
let dest = self.check_mplace_access(dest, Some(size))?;
let (src_ptr, dest_ptr) = match (src, dest) {
(Some(src_ptr), Some(dest_ptr)) => (src_ptr, dest_ptr),
(None, None) => return Ok(()), // zero-sized copy
_ => bug!("The pointers should both be Some or both None"),
};

self.memory.copy(
src.ptr, src.align,
dest.ptr, dest.align,
src_ptr,
dest_ptr,
size,
/*nonoverlapping*/ true,
)?;

Ok(())
)
}

/// Copies the data from an operand to a place. The layouts may disagree, but they must
Expand Down
13 changes: 7 additions & 6 deletions src/librustc_mir/interpret/step.rs
Expand Up @@ -209,17 +209,18 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let dest = self.force_allocation(dest)?;
let length = dest.len(self)?;

if length > 0 {
// write the first
if let Some(first_ptr) = self.check_mplace_access(dest, None)? {
// Write the first.
let first = self.mplace_field(dest, 0)?;
self.copy_op(op, first.into())?;

if length > 1 {
// copy the rest
let (dest, dest_align) = first.to_scalar_ptr_align();
let rest = dest.ptr_offset(first.layout.size, self)?;
let elem_size = first.layout.size;
// Copy the rest. This is performance-sensitive code
// for big static/const arrays!
let rest_ptr = first_ptr.offset(elem_size, self)?;
self.memory.copy_repeatedly(
dest, dest_align, rest, dest_align, first.layout.size, length - 1, true
first_ptr, rest_ptr, elem_size, length - 1, /*nonoverlapping:*/true
)?;
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/librustc_mir/interpret/terminator.rs
Expand Up @@ -426,7 +426,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
None => {
// Unsized self.
args[0].to_mem_place()
args[0].assert_mem_place()
}
};
// Find and consult vtable
Expand Down
19 changes: 11 additions & 8 deletions src/librustc_mir/interpret/validity.rs
Expand Up @@ -440,9 +440,11 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
}
}
}
// Check if we have encountered this pointer+layout combination
// before. Proceed recursively even for ZST, no
// reason to skip them! E.g., `!` is a ZST and we want to validate it.
// Proceed recursively even for ZST, no reason to skip them!
// `!` is a ZST and we want to validate it.
// Normalize before handing `place` to tracking because that will
// check for duplicates.
let place = self.ecx.normalize_mplace_ptr(place)?;
let path = &self.path;
ref_tracking.track(place, || {
// We need to clone the path anyway, make sure it gets created
Expand Down Expand Up @@ -548,7 +550,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
) -> InterpResult<'tcx> {
match op.layout.ty.sty {
ty::Str => {
let mplace = op.to_mem_place(); // strings are never immediate
let mplace = op.assert_mem_place(); // strings are never immediate
try_validation!(self.ecx.read_str(mplace),
"uninitialized or non-UTF-8 data in str", self.path);
}
Expand All @@ -565,7 +567,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
return Ok(());
}
// non-ZST array cannot be immediate, slices are never immediate
let mplace = op.to_mem_place();
let mplace = op.assert_mem_place();
// This is the length of the array/slice.
let len = mplace.len(self.ecx)?;
// zero length slices have nothing to be checked
Expand All @@ -576,8 +578,8 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
let ty_size = self.ecx.layout_of(tys)?.size;
// This is the size in bytes of the whole array.
let size = ty_size * len;

let ptr = self.ecx.force_ptr(mplace.ptr)?;
// Size is not 0, get a pointer (no cast because we normalized in validate_operand).
let ptr = mplace.ptr.assert_ptr();

// NOTE: Keep this in sync with the handling of integer and float
// types above, in `visit_primitive`.
Expand Down Expand Up @@ -633,7 +635,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// `ref_tracking_for_consts` can be `None` to avoid recursive checking below references.
/// This also toggles between "run-time" (no recursion) and "compile-time" (with recursion)
/// validation (e.g., pointer values are fine in integers at runtime) and various other const
/// specific validation checks
/// specific validation checks.
pub fn validate_operand(
&self,
op: OpTy<'tcx, M::PointerTag>,
Expand All @@ -653,6 +655,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
};

// Run it
let op = self.normalize_op_ptr(op)?; // avoid doing ptr-to-int all the time
visitor.visit_value(op)
}
}

0 comments on commit 9007296

Please sign in to comment.