Skip to content

Commit

Permalink
Auto merge of #70807 - Dylan-DPC:rollup-qd1kgl2, r=Dylan-DPC
Browse files Browse the repository at this point in the history
Rollup of 5 pull requests

Successful merges:

 - #70558 (Fix some aliasing issues in Vec)
 - #70760 (docs: make the description of Result::map_or more clear)
 - #70769 (Miri: remove an outdated FIXME)
 - #70776 (clarify comment in RawVec::into_box)
 - #70806 (fix Miri assignment sanity check)

Failed merges:

r? @ghost
  • Loading branch information
bors committed Apr 5, 2020
2 parents 607b858 + 31b8d65 commit e6cef04
Show file tree
Hide file tree
Showing 9 changed files with 138 additions and 39 deletions.
9 changes: 6 additions & 3 deletions src/liballoc/raw_vec.rs
Expand Up @@ -570,16 +570,19 @@ impl<T> RawVec<T, Global> {
///
/// # Safety
///
/// `shrink_to_fit(len)` must be called immediately prior to calling this function. This
/// implies, that `len` must be smaller than or equal to `self.capacity()`.
/// * `len` must be greater than or equal to the most recently requested capacity, and
/// * `len` must be less than or equal to `self.capacity()`.
///
/// Note, that the requested capacity and `self.capacity()` could differ, as
/// an allocator could overallocate and return a greater memory block than requested.
pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>]> {
// Sanity-check one half of the safety requirement (we cannot check the other half).
debug_assert!(
len <= self.capacity(),
"`len` must be smaller than or equal to `self.capacity()`"
);

let me = ManuallyDrop::new(self);
// NOTE: not calling `capacity()` here; actually using the real `cap` field!
let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len);
Box::from_raw(slice)
}
Expand Down
71 changes: 66 additions & 5 deletions src/liballoc/tests/vec.rs
Expand Up @@ -1351,24 +1351,85 @@ fn test_try_reserve_exact() {
}

#[test]
fn test_stable_push_pop() {
fn test_stable_pointers() {
/// Pull an element from the iterator, then drop it.
/// Useful to cover both the `next` and `drop` paths of an iterator.
fn next_then_drop<I: Iterator>(mut i: I) {
i.next().unwrap();
drop(i);
}

// Test that, if we reserved enough space, adding and removing elements does not
// invalidate references into the vector (such as `v0`). This test also
// runs in Miri, which would detect such problems.
let mut v = Vec::with_capacity(10);
let mut v = Vec::with_capacity(128);
v.push(13);

// laundering the lifetime -- we take care that `v` does not reallocate, so that's okay.
let v0 = unsafe { &*(&v[0] as *const _) };

// Laundering the lifetime -- we take care that `v` does not reallocate, so that's okay.
let v0 = &mut v[0];
let v0 = unsafe { &mut *(v0 as *mut _) };
// Now do a bunch of things and occasionally use `v0` again to assert it is still valid.

// Pushing/inserting and popping/removing
v.push(1);
v.push(2);
v.insert(1, 1);
assert_eq!(*v0, 13);
v.remove(1);
v.pop().unwrap();
assert_eq!(*v0, 13);
v.push(1);
v.swap_remove(1);
assert_eq!(v.len(), 2);
v.swap_remove(1); // swap_remove the last element
assert_eq!(*v0, 13);

// Appending
v.append(&mut vec![27, 19]);
assert_eq!(*v0, 13);

// Extending
v.extend_from_slice(&[1, 2]);
v.extend(&[1, 2]); // `slice::Iter` (with `T: Copy`) specialization
v.extend(vec![2, 3]); // `vec::IntoIter` specialization
v.extend(std::iter::once(3)); // `TrustedLen` specialization
v.extend(std::iter::empty::<i32>()); // `TrustedLen` specialization with empty iterator
v.extend(std::iter::once(3).filter(|_| true)); // base case
v.extend(std::iter::once(&3)); // `cloned` specialization
assert_eq!(*v0, 13);

// Truncation
v.truncate(2);
assert_eq!(*v0, 13);

// Resizing
v.resize_with(v.len() + 10, || 42);
assert_eq!(*v0, 13);
v.resize_with(2, || panic!());
assert_eq!(*v0, 13);

// No-op reservation
v.reserve(32);
v.reserve_exact(32);
assert_eq!(*v0, 13);

// Partial draining
v.resize_with(10, || 42);
next_then_drop(v.drain(5..));
assert_eq!(*v0, 13);

// Splicing
v.resize_with(10, || 42);
next_then_drop(v.splice(5.., vec![1, 2, 3, 4, 5])); // empty tail after range
assert_eq!(*v0, 13);
next_then_drop(v.splice(5..8, vec![1])); // replacement is smaller than original range
assert_eq!(*v0, 13);
next_then_drop(v.splice(5..6, vec![1; 10].into_iter().filter(|_| true))); // lower bound not exact
assert_eq!(*v0, 13);

// Smoke test that would fire even outside Miri if an actual relocation happened.
*v0 -= 13;
assert_eq!(v[0], 0);
}

// https://github.com/rust-lang/rust/pull/49496 introduced specialization based on:
Expand Down
22 changes: 13 additions & 9 deletions src/liballoc/vec.rs
Expand Up @@ -740,7 +740,8 @@ impl<T> Vec<T> {
if len > self.len {
return;
}
let s = self.get_unchecked_mut(len..) as *mut _;
let remaining_len = self.len - len;
let s = slice::from_raw_parts_mut(self.as_mut_ptr().add(len), remaining_len);
self.len = len;
ptr::drop_in_place(s);
}
Expand Down Expand Up @@ -963,13 +964,15 @@ impl<T> Vec<T> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn swap_remove(&mut self, index: usize) -> T {
let len = self.len();
assert!(index < len);
unsafe {
// We replace self[index] with the last element. Note that if the
// bounds check on hole succeeds there must be a last element (which
// bounds check above succeeds there must be a last element (which
// can be self[index] itself).
let hole: *mut T = &mut self[index];
let last = ptr::read(self.get_unchecked(self.len - 1));
self.len -= 1;
let last = ptr::read(self.as_ptr().add(len - 1));
let hole: *mut T = self.as_mut_ptr().add(index);
self.set_len(len - 1);
ptr::replace(hole, last)
}
}
Expand Down Expand Up @@ -1200,7 +1203,7 @@ impl<T> Vec<T> {
} else {
unsafe {
self.len -= 1;
Some(ptr::read(self.get_unchecked(self.len())))
Some(ptr::read(self.as_ptr().add(self.len())))
}
}
}
Expand Down Expand Up @@ -2020,7 +2023,7 @@ where
let (lower, _) = iterator.size_hint();
let mut vector = Vec::with_capacity(lower.saturating_add(1));
unsafe {
ptr::write(vector.get_unchecked_mut(0), element);
ptr::write(vector.as_mut_ptr(), element);
vector.set_len(1);
}
vector
Expand Down Expand Up @@ -2122,8 +2125,9 @@ where
self.reserve(slice.len());
unsafe {
let len = self.len();
let dst_slice = slice::from_raw_parts_mut(self.as_mut_ptr().add(len), slice.len());
dst_slice.copy_from_slice(slice);
self.set_len(len + slice.len());
self.get_unchecked_mut(len..).copy_from_slice(slice);
}
}
}
Expand All @@ -2144,7 +2148,7 @@ impl<T> Vec<T> {
self.reserve(lower.saturating_add(1));
}
unsafe {
ptr::write(self.get_unchecked_mut(len), element);
ptr::write(self.as_mut_ptr().add(len), element);
// NB can't overflow since we would have had to alloc the address space
self.set_len(len + 1);
}
Expand Down
6 changes: 4 additions & 2 deletions src/libcore/result.rs
Expand Up @@ -521,14 +521,16 @@ impl<T, E> Result<T, E> {
}
}

/// Applies a function to the contained value (if any),
/// or returns the provided default (if not).
/// Applies a function to the contained value (if [`Ok`]),
/// or returns the provided default (if [`Err`]).
///
/// Arguments passed to `map_or` are eagerly evaluated; if you are passing
/// the result of a function call, it is recommended to use [`map_or_else`],
/// which is lazily evaluated.
///
/// [`map_or_else`]: #method.map_or_else
/// [`Ok`]: enum.Result.html#variant.Ok
/// [`Err`]: enum.Result.html#variant.Err
///
/// # Examples
///
Expand Down
52 changes: 36 additions & 16 deletions src/librustc_mir/interpret/eval_context.rs
Expand Up @@ -14,11 +14,11 @@ use rustc_middle::mir::interpret::{
sign_extend, truncate, AllocId, FrameInfo, GlobalId, InterpResult, Pointer, Scalar,
};
use rustc_middle::ty::layout::{self, TyAndLayout};
use rustc_middle::ty::query::TyCtxtAt;
use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc_middle::ty::{
self, fold::BottomUpFolder, query::TyCtxtAt, subst::SubstsRef, Ty, TyCtxt, TypeFoldable,
};
use rustc_span::source_map::DUMMY_SP;
use rustc_target::abi::{Abi, Align, HasDataLayout, LayoutOf, Size, TargetDataLayout};
use rustc_target::abi::{Align, HasDataLayout, LayoutOf, Size, TargetDataLayout};

use super::{
Immediate, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, OpTy, Operand, Place, PlaceTy,
Expand Down Expand Up @@ -213,30 +213,50 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> LayoutOf for InterpCx<'mir, 'tcx, M> {
/// Test if it is valid for a MIR assignment to assign `src`-typed place to `dest`-typed value.
/// This test should be symmetric, as it is primarily about layout compatibility.
pub(super) fn mir_assign_valid_types<'tcx>(
tcx: TyCtxt<'tcx>,
src: TyAndLayout<'tcx>,
dest: TyAndLayout<'tcx>,
) -> bool {
if src.ty == dest.ty {
// Equal types, all is good.
return true;
}
// Type-changing assignments can happen for (at least) two reasons:
// - `&mut T` -> `&T` gets optimized from a reborrow to a mere assignment.
// - Subtyping is used. While all normal lifetimes are erased, higher-ranked lifetime
// bounds are still around and can lead to type differences.
// There is no good way to check the latter, so we compare layouts instead -- but only
// for values with `Scalar`/`ScalarPair` abi.
// FIXME: Do something more accurate, type-based.
match &src.abi {
Abi::Scalar(..) | Abi::ScalarPair(..) => src.layout == dest.layout,
_ => false,
if src.layout != dest.layout {
// Layout differs, definitely not equal.
// We do this here because Miri would *do the wrong thing* if we allowed layout-changing
// assignments.
return false;
}

// Type-changing assignments can happen for (at least) two reasons:
// 1. `&mut T` -> `&T` gets optimized from a reborrow to a mere assignment.
// 2. Subtyping is used. While all normal lifetimes are erased, higher-ranked types
// with their late-bound lifetimes are still around and can lead to type differences.
// Normalize both of them away.
let normalize = |ty: Ty<'tcx>| {
ty.fold_with(&mut BottomUpFolder {
tcx,
// Normalize all references to immutable.
ty_op: |ty| match ty.kind {
ty::Ref(_, pointee, _) => tcx.mk_imm_ref(tcx.lifetimes.re_erased, pointee),
_ => ty,
},
// We just erase all late-bound lifetimes, but this is not fully correct (FIXME):
// lifetimes in invariant positions could matter (e.g. through associated types).
// We rely on the fact that layout was confirmed to be equal above.
lt_op: |_| tcx.lifetimes.re_erased,
// Leave consts unchanged.
ct_op: |ct| ct,
})
};
normalize(src.ty) == normalize(dest.ty)
}

/// Use the already known layout if given (but sanity check in debug mode),
/// or compute the layout.
#[cfg_attr(not(debug_assertions), inline(always))]
pub(super) fn from_known_layout<'tcx>(
tcx: TyCtxt<'tcx>,
known_layout: Option<TyAndLayout<'tcx>>,
compute: impl FnOnce() -> InterpResult<'tcx, TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
Expand All @@ -246,7 +266,7 @@ pub(super) fn from_known_layout<'tcx>(
if cfg!(debug_assertions) {
let check_layout = compute()?;
assert!(
mir_assign_valid_types(check_layout, known_layout),
mir_assign_valid_types(tcx, check_layout, known_layout),
"expected type differs from actual type.\nexpected: {:?}\nactual: {:?}",
known_layout.ty,
check_layout.ty,
Expand Down Expand Up @@ -424,7 +444,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// have to support that case (mostly by skipping all caching).
match frame.locals.get(local).and_then(|state| state.layout.get()) {
None => {
let layout = from_known_layout(layout, || {
let layout = from_known_layout(self.tcx.tcx, layout, || {
let local_ty = frame.body.local_decls[local].ty;
let local_ty =
self.subst_from_frame_and_normalize_erasing_regions(frame, local_ty);
Expand Down
2 changes: 1 addition & 1 deletion src/librustc_mir/interpret/operand.rs
Expand Up @@ -529,7 +529,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ty::ConstKind::Value(val_val) => val_val,
};
// Other cases need layout.
let layout = from_known_layout(layout, || self.layout_of(val.ty))?;
let layout = from_known_layout(self.tcx.tcx, layout, || self.layout_of(val.ty))?;
let op = match val_val {
ConstValue::ByRef { alloc, offset } => {
let id = self.tcx.alloc_map.lock().create_memory_alloc(alloc);
Expand Down
4 changes: 2 additions & 2 deletions src/librustc_mir/interpret/place.rs
Expand Up @@ -868,7 +868,7 @@ where
// We do NOT compare the types for equality, because well-typed code can
// actually "transmute" `&mut T` to `&T` in an assignment without a cast.
assert!(
mir_assign_valid_types(src.layout, dest.layout),
mir_assign_valid_types(self.tcx.tcx, src.layout, dest.layout),
"type mismatch when copying!\nsrc: {:?},\ndest: {:?}",
src.layout.ty,
dest.layout.ty,
Expand Down Expand Up @@ -922,7 +922,7 @@ where
src: OpTy<'tcx, M::PointerTag>,
dest: PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
if mir_assign_valid_types(src.layout, dest.layout) {
if mir_assign_valid_types(self.tcx.tcx, src.layout, dest.layout) {
// Fast path: Just use normal `copy_op`
return self.copy_op(src, dest);
}
Expand Down
1 change: 0 additions & 1 deletion src/librustc_mir/interpret/terminator.rs
Expand Up @@ -75,7 +75,6 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}

Drop { location, target, unwind } => {
// FIXME(CTFE): forbid drop in const eval
let place = self.eval_place(location)?;
let ty = place.layout.ty;
trace!("TerminatorKind::drop: {:?}, type {}", location, ty);
Expand Down
10 changes: 10 additions & 0 deletions src/test/ui/consts/const-eval/issue-70804-fn-subtyping.rs
@@ -0,0 +1,10 @@
// check-pass
#![feature(const_fn)]

const fn nested(x: (for<'a> fn(&'a ()), String)) -> (fn(&'static ()), String) {
x
}

pub const TEST: (fn(&'static ()), String) = nested((|_x| (), String::new()));

fn main() {}

0 comments on commit e6cef04

Please sign in to comment.