Skip to content

Commit

Permalink
Auto merge of #123676 - GuillaumeGomez:rollup-1hurixy, r=GuillaumeGomez
Browse files Browse the repository at this point in the history
Rollup of 8 pull requests

Successful merges:

 - #123254 (Do not allocate for ZST ThinBox (attempt 2 using const_allocate))
 - #123626 (Add MC/DC support to coverage test tools)
 - #123638 (rustdoc: synthetic auto: filter out clauses from the implementor's ParamEnv)
 - #123653 (Split `non_local_definitions` lint tests in separate test files)
 - #123658 (Stop making any assumption about the projections applied to the upvars in the `ByMoveBody` pass)
 - #123662 (Don't rely on upvars being assigned just because coroutine-closure kind is assigned)
 - #123665 (Fix typo in `Future::poll()` docs)
 - #123672 (compiletest: unset `RUSTC_LOG_COLOR`)

r? `@ghost`
`@rustbot` modify labels: rollup
  • Loading branch information
bors committed Apr 9, 2024
2 parents 2805aed + ed43ac6 commit ff24ef9
Show file tree
Hide file tree
Showing 39 changed files with 1,691 additions and 1,321 deletions.
2 changes: 1 addition & 1 deletion compiler/rustc_middle/src/ty/sty.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2231,7 +2231,7 @@ impl<'tcx> Ty<'tcx> {
pub fn tuple_fields(self) -> &'tcx List<Ty<'tcx>> {
match self.kind() {
Tuple(args) => args,
_ => bug!("tuple_fields called on non-tuple"),
_ => bug!("tuple_fields called on non-tuple: {self:?}"),
}
}

Expand Down
36 changes: 17 additions & 19 deletions compiler/rustc_mir_transform/src/coroutine/by_move_body.rs
Original file line number Diff line number Diff line change
Expand Up @@ -281,14 +281,14 @@ impl<'tcx> MutVisitor<'tcx> for MakeByMoveBody<'tcx> {
if place.local == ty::CAPTURE_STRUCT_LOCAL
&& let Some((&mir::ProjectionElem::Field(idx, _), projection)) =
place.projection.split_first()
&& let Some(&(remapped_idx, remapped_ty, needs_deref, additional_projections)) =
&& let Some(&(remapped_idx, remapped_ty, needs_deref, bridging_projections)) =
self.field_remapping.get(&idx)
{
// As noted before, if the parent closure captures a field by value, and
// the child captures a field by ref, then for the by-move body we're
// generating, we also are taking that field by value. Peel off a deref,
// since a layer of reffing has now become redundant.
let final_deref = if needs_deref {
// since a layer of ref'ing has now become redundant.
let final_projections = if needs_deref {
let Some((mir::ProjectionElem::Deref, projection)) = projection.split_first()
else {
bug!(
Expand All @@ -302,20 +302,18 @@ impl<'tcx> MutVisitor<'tcx> for MakeByMoveBody<'tcx> {
projection
};

// The only thing that should be left is a deref, if the parent captured
// an upvar by-ref.
std::assert_matches::assert_matches!(final_deref, [] | [mir::ProjectionElem::Deref]);

// For all of the additional projections that come out of precise capturing,
// re-apply these projections.
let additional_projections =
additional_projections.iter().map(|elem| match elem.kind {
ProjectionKind::Deref => mir::ProjectionElem::Deref,
ProjectionKind::Field(idx, VariantIdx::ZERO) => {
mir::ProjectionElem::Field(idx, elem.ty)
}
_ => unreachable!("precise captures only through fields and derefs"),
});
// These projections are applied in order to "bridge" the local that we are
// currently transforming *from* the old upvar that the by-ref coroutine used
// to capture *to* the upvar of the parent coroutine-closure. For example, if
// the parent captures `&s` but the child captures `&(s.field)`, then we will
// apply a field projection.
let bridging_projections = bridging_projections.iter().map(|elem| match elem.kind {
ProjectionKind::Deref => mir::ProjectionElem::Deref,
ProjectionKind::Field(idx, VariantIdx::ZERO) => {
mir::ProjectionElem::Field(idx, elem.ty)
}
_ => unreachable!("precise captures only through fields and derefs"),
});

// We start out with an adjusted field index (and ty), representing the
// upvar that we get from our parent closure. We apply any of the additional
Expand All @@ -326,8 +324,8 @@ impl<'tcx> MutVisitor<'tcx> for MakeByMoveBody<'tcx> {
projection: self.tcx.mk_place_elems_from_iter(
[mir::ProjectionElem::Field(remapped_idx, remapped_ty)]
.into_iter()
.chain(additional_projections)
.chain(final_deref.iter().copied()),
.chain(bridging_projections)
.chain(final_projections.iter().copied()),
),
};
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,9 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_callable<'tcx>(
let kind_ty = args.kind_ty();
let sig = args.coroutine_closure_sig().skip_binder();

let coroutine_ty = if let Some(closure_kind) = kind_ty.to_opt_closure_kind() {
let coroutine_ty = if let Some(closure_kind) = kind_ty.to_opt_closure_kind()
&& !args.tupled_upvars_ty().is_ty_var()
{
if !closure_kind.extends(goal_kind) {
return Err(NoSolution);
}
Expand Down Expand Up @@ -401,7 +403,9 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_async_callable<'tc
let kind_ty = args.kind_ty();
let sig = args.coroutine_closure_sig().skip_binder();
let mut nested = vec![];
let coroutine_ty = if let Some(closure_kind) = kind_ty.to_opt_closure_kind() {
let coroutine_ty = if let Some(closure_kind) = kind_ty.to_opt_closure_kind()
&& !args.tupled_upvars_ty().is_ty_var()
{
if !closure_kind.extends(goal_kind) {
return Err(NoSolution);
}
Expand Down
5 changes: 5 additions & 0 deletions compiler/rustc_trait_selection/src/solve/normalizes_to/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -487,6 +487,11 @@ impl<'tcx> assembly::GoalKind<'tcx> for NormalizesTo<'tcx> {
bug!();
};

// Bail if the upvars haven't been constrained.
if tupled_upvars_ty.expect_ty().is_ty_var() {
return ecx.evaluate_added_goals_and_make_canonical_response(Certainty::AMBIGUOUS);
}

let Some(closure_kind) = closure_fn_kind_ty.expect_ty().to_opt_closure_kind() else {
// We don't need to worry about the self type being an infer var.
return Err(NoSolution);
Expand Down
10 changes: 8 additions & 2 deletions compiler/rustc_trait_selection/src/traits/project.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1601,7 +1601,10 @@ fn confirm_closure_candidate<'cx, 'tcx>(
// If we know the kind and upvars, use that directly.
// Otherwise, defer to `AsyncFnKindHelper::Upvars` to delay
// the projection, like the `AsyncFn*` traits do.
let output_ty = if let Some(_) = kind_ty.to_opt_closure_kind() {
let output_ty = if let Some(_) = kind_ty.to_opt_closure_kind()
// Fall back to projection if upvars aren't constrained
&& !args.tupled_upvars_ty().is_ty_var()
{
sig.to_coroutine_given_kind_and_upvars(
tcx,
args.parent_args(),
Expand Down Expand Up @@ -1731,7 +1734,10 @@ fn confirm_async_closure_candidate<'cx, 'tcx>(

let term = match item_name {
sym::CallOnceFuture | sym::CallRefFuture => {
if let Some(closure_kind) = kind_ty.to_opt_closure_kind() {
if let Some(closure_kind) = kind_ty.to_opt_closure_kind()
// Fall back to projection if upvars aren't constrained
&& !args.tupled_upvars_ty().is_ty_var()
{
if !closure_kind.extends(goal_kind) {
bug!("we should not be confirming if the closure kind is not met");
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -400,39 +400,36 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
}
}
ty::CoroutineClosure(def_id, args) => {
let args = args.as_coroutine_closure();
let is_const = self.tcx().is_const_fn_raw(def_id);
match self.infcx.closure_kind(self_ty) {
Some(closure_kind) => {
let no_borrows = match self
.infcx
.shallow_resolve(args.as_coroutine_closure().tupled_upvars_ty())
.kind()
{
ty::Tuple(tys) => tys.is_empty(),
ty::Error(_) => false,
_ => bug!("tuple_fields called on non-tuple"),
};
// A coroutine-closure implements `FnOnce` *always*, since it may
// always be called once. It additionally implements `Fn`/`FnMut`
// only if it has no upvars (therefore no borrows from the closure
// that would need to be represented with a lifetime) and if the
// closure kind permits it.
// FIXME(async_closures): Actually, it could also implement `Fn`/`FnMut`
// if it takes all of its upvars by copy, and none by ref. This would
// require us to record a bit more information during upvar analysis.
if no_borrows && closure_kind.extends(kind) {
candidates.vec.push(ClosureCandidate { is_const });
} else if kind == ty::ClosureKind::FnOnce {
candidates.vec.push(ClosureCandidate { is_const });
}
if let Some(closure_kind) = self.infcx.closure_kind(self_ty)
// Ambiguity if upvars haven't been constrained yet
&& !args.tupled_upvars_ty().is_ty_var()
{
let no_borrows = match args.tupled_upvars_ty().kind() {
ty::Tuple(tys) => tys.is_empty(),
ty::Error(_) => false,
_ => bug!("tuple_fields called on non-tuple"),
};
// A coroutine-closure implements `FnOnce` *always*, since it may
// always be called once. It additionally implements `Fn`/`FnMut`
// only if it has no upvars (therefore no borrows from the closure
// that would need to be represented with a lifetime) and if the
// closure kind permits it.
// FIXME(async_closures): Actually, it could also implement `Fn`/`FnMut`
// if it takes all of its upvars by copy, and none by ref. This would
// require us to record a bit more information during upvar analysis.
if no_borrows && closure_kind.extends(kind) {
candidates.vec.push(ClosureCandidate { is_const });
} else if kind == ty::ClosureKind::FnOnce {
candidates.vec.push(ClosureCandidate { is_const });
}
None => {
if kind == ty::ClosureKind::FnOnce {
candidates.vec.push(ClosureCandidate { is_const });
} else {
// This stays ambiguous until kind+upvars are determined.
candidates.ambiguous = true;
}
} else {
if kind == ty::ClosureKind::FnOnce {
candidates.vec.push(ClosureCandidate { is_const });
} else {
// This stays ambiguous until kind+upvars are determined.
candidates.ambiguous = true;
}
}
}
Expand Down
96 changes: 83 additions & 13 deletions library/alloc/src/boxed/thin.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,14 @@
use crate::alloc::{self, Layout, LayoutError};
use core::error::Error;
use core::fmt::{self, Debug, Display, Formatter};
#[cfg(not(no_global_oom_handling))]
use core::intrinsics::const_allocate;
use core::marker::PhantomData;
#[cfg(not(no_global_oom_handling))]
use core::marker::Unsize;
use core::mem::{self, SizedTypeProperties};
use core::mem;
#[cfg(not(no_global_oom_handling))]
use core::mem::SizedTypeProperties;
use core::ops::{Deref, DerefMut};
use core::ptr::Pointee;
use core::ptr::{self, NonNull};
Expand Down Expand Up @@ -109,9 +113,14 @@ impl<Dyn: ?Sized> ThinBox<Dyn> {
where
T: Unsize<Dyn>,
{
let meta = ptr::metadata(&value as &Dyn);
let ptr = WithOpaqueHeader::new(meta, value);
ThinBox { ptr, _marker: PhantomData }
if mem::size_of::<T>() == 0 {
let ptr = WithOpaqueHeader::new_unsize_zst::<Dyn, T>(value);
ThinBox { ptr, _marker: PhantomData }
} else {
let meta = ptr::metadata(&value as &Dyn);
let ptr = WithOpaqueHeader::new(meta, value);
ThinBox { ptr, _marker: PhantomData }
}
}
}

Expand Down Expand Up @@ -200,6 +209,16 @@ impl WithOpaqueHeader {
Self(ptr.0)
}

#[cfg(not(no_global_oom_handling))]
fn new_unsize_zst<Dyn, T>(value: T) -> Self
where
Dyn: ?Sized,
T: Unsize<Dyn>,
{
let ptr = WithHeader::<<Dyn as Pointee>::Metadata>::new_unsize_zst::<Dyn, T>(value);
Self(ptr.0)
}

fn try_new<H, T>(header: H, value: T) -> Result<Self, core::alloc::AllocError> {
WithHeader::try_new(header, value).map(|ptr| Self(ptr.0))
}
Expand Down Expand Up @@ -288,6 +307,58 @@ impl<H> WithHeader<H> {
}
}

// `Dyn` is `?Sized` type like `[u32]`, and `T` is ZST type like `[u32; 0]`.
#[cfg(not(no_global_oom_handling))]
fn new_unsize_zst<Dyn, T>(value: T) -> WithHeader<H>
where
Dyn: Pointee<Metadata = H> + ?Sized,
T: Unsize<Dyn>,
{
assert!(mem::size_of::<T>() == 0);

const fn max(a: usize, b: usize) -> usize {
if a > b { a } else { b }
}

// Compute a pointer to the right metadata. This will point to the beginning
// of the header, past the padding, so the assigned type makes sense.
// It also ensures that the address at the end of the header is sufficiently
// aligned for T.
let alloc: &<Dyn as Pointee>::Metadata = const {
// FIXME: just call `WithHeader::alloc_layout` with size reset to 0.
// Currently that's blocked on `Layout::extend` not being `const fn`.

let alloc_align =
max(mem::align_of::<T>(), mem::align_of::<<Dyn as Pointee>::Metadata>());

let alloc_size =
max(mem::align_of::<T>(), mem::size_of::<<Dyn as Pointee>::Metadata>());

unsafe {
// SAFETY: align is power of two because it is the maximum of two alignments.
let alloc: *mut u8 = const_allocate(alloc_size, alloc_align);

let metadata_offset =
alloc_size.checked_sub(mem::size_of::<<Dyn as Pointee>::Metadata>()).unwrap();
// SAFETY: adding offset within the allocation.
let metadata_ptr: *mut <Dyn as Pointee>::Metadata =
alloc.add(metadata_offset).cast();
// SAFETY: `*metadata_ptr` is within the allocation.
metadata_ptr.write(ptr::metadata::<Dyn>(ptr::dangling::<T>() as *const Dyn));

// SAFETY: we have just written the metadata.
&*(metadata_ptr)
}
};

// SAFETY: `alloc` points to `<Dyn as Pointee>::Metadata`, so addition stays in-bounds.
let value_ptr =
unsafe { (alloc as *const <Dyn as Pointee>::Metadata).add(1) }.cast::<T>().cast_mut();
debug_assert!(value_ptr.is_aligned());
mem::forget(value);
WithHeader(NonNull::new(value_ptr.cast()).unwrap(), PhantomData)
}

// Safety:
// - Assumes that either `value` can be dereferenced, or is the
// `NonNull::dangling()` we use when both `T` and `H` are ZSTs.
Expand All @@ -300,20 +371,19 @@ impl<H> WithHeader<H> {

impl<H> Drop for DropGuard<H> {
fn drop(&mut self) {
// All ZST are allocated statically.
if self.value_layout.size() == 0 {
return;
}

unsafe {
// SAFETY: Layout must have been computable if we're in drop
let (layout, value_offset) =
WithHeader::<H>::alloc_layout(self.value_layout).unwrap_unchecked();

// Note: Don't deallocate if the layout size is zero, because the pointer
// didn't come from the allocator.
if layout.size() != 0 {
alloc::dealloc(self.ptr.as_ptr().sub(value_offset), layout);
} else {
debug_assert!(
value_offset == 0 && H::IS_ZST && self.value_layout.size() == 0
);
}
// Since we only allocate for non-ZSTs, the layout size cannot be zero.
debug_assert!(layout.size() != 0);
alloc::dealloc(self.ptr.as_ptr().sub(value_offset), layout);
}
}
}
Expand Down
2 changes: 2 additions & 0 deletions library/alloc/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -108,8 +108,10 @@
#![feature(const_box)]
#![feature(const_cow_is_borrowed)]
#![feature(const_eval_select)]
#![feature(const_heap)]
#![feature(const_maybe_uninit_as_mut_ptr)]
#![feature(const_maybe_uninit_write)]
#![feature(const_option)]
#![feature(const_pin)]
#![feature(const_refs_to_cell)]
#![feature(const_size_of_val)]
Expand Down
2 changes: 1 addition & 1 deletion library/core/src/future/future.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ pub trait Future {
/// An implementation of `poll` should strive to return quickly, and should
/// not block. Returning quickly prevents unnecessarily clogging up
/// threads or event loops. If it is known ahead of time that a call to
/// `poll` may end up taking awhile, the work should be offloaded to a
/// `poll` may end up taking a while, the work should be offloaded to a
/// thread pool (or something similar) to ensure that `poll` can return
/// quickly.
///
Expand Down
2 changes: 1 addition & 1 deletion src/librustdoc/clean/auto_trait.rs
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ fn clean_param_env<'tcx>(

// FIXME(#111101): Incorporate the explicit predicates of the item here...
let item_predicates: FxIndexSet<_> =
tcx.predicates_of(item_def_id).predicates.iter().map(|(pred, _)| pred).collect();
tcx.param_env(item_def_id).caller_bounds().iter().collect();
let where_predicates = param_env
.caller_bounds()
.iter()
Expand Down
2 changes: 1 addition & 1 deletion src/tools/compiletest/src/header.rs
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ impl TestProps {
aux_crates: vec![],
revisions: vec![],
rustc_env: vec![("RUSTC_ICE".to_string(), "0".to_string())],
unset_rustc_env: vec![],
unset_rustc_env: vec![("RUSTC_LOG_COLOR".to_string())],
exec_env: vec![],
unset_exec_env: vec![],
build_aux_docs: false,
Expand Down
13 changes: 13 additions & 0 deletions src/tools/compiletest/src/runtest.rs
Original file line number Diff line number Diff line change
Expand Up @@ -752,6 +752,19 @@ impl<'test> TestCx<'test> {
Lazy::new(|| Regex::new(r"(?m:^)(?<prefix>(?: \|)+ Branch \()[0-9]+:").unwrap());
let coverage = BRANCH_LINE_NUMBER_RE.replace_all(&coverage, "${prefix}LL:");

// ` |---> MC/DC Decision Region (1:30) to (2:` => ` |---> MC/DC Decision Region (LL:30) to (LL:`
static MCDC_DECISION_LINE_NUMBER_RE: Lazy<Regex> = Lazy::new(|| {
Regex::new(r"(?m:^)(?<prefix>(?: \|)+---> MC/DC Decision Region \()[0-9]+:(?<middle>[0-9]+\) to \()[0-9]+:").unwrap()
});
let coverage =
MCDC_DECISION_LINE_NUMBER_RE.replace_all(&coverage, "${prefix}LL:${middle}LL:");

// ` | Condition C1 --> (1:` => ` | Condition C1 --> (LL:`
static MCDC_CONDITION_LINE_NUMBER_RE: Lazy<Regex> = Lazy::new(|| {
Regex::new(r"(?m:^)(?<prefix>(?: \|)+ Condition C[0-9]+ --> \()[0-9]+:").unwrap()
});
let coverage = MCDC_CONDITION_LINE_NUMBER_RE.replace_all(&coverage, "${prefix}LL:");

coverage.into_owned()
}

Expand Down
Loading

0 comments on commit ff24ef9

Please sign in to comment.