Skip to content

Commit

Permalink
Auto merge of rust-lang#121407 - matthiaskrgr:rollup-gf2ohp5, r=matth…
Browse files Browse the repository at this point in the history
…iaskrgr

Rollup of 6 pull requests

Successful merges:

 - rust-lang#119636 (os::net: expanding TcpStreamExt for Linux with `tcp_deferaccept`.)
 - rust-lang#121261 (coverage: Remove `pending_dups` from the span refiner)
 - rust-lang#121336 (triagebot: add queue notifications)
 - rust-lang#121391 (never patterns: Fix liveness analysis in the presence of never patterns)
 - rust-lang#121399 (Solaris linker does not support --strip-debug)
 - rust-lang#121406 (Add a couple tests)

Failed merges:

 - rust-lang#121206 (Top level error handling)

r? `@ghost`
`@rustbot` modify labels: rollup
  • Loading branch information
bors committed Feb 21, 2024
2 parents f8131a4 + d3d0ae7 commit a4d2b17
Show file tree
Hide file tree
Showing 15 changed files with 225 additions and 184 deletions.
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_ssa/src/back/linker.rs
Expand Up @@ -626,7 +626,7 @@ impl<'a> Linker for GccLinker<'a> {
// it does support --strip-all as a compatibility alias for -s.
// The --strip-debug case is handled by running an external
// `strip` utility as a separate step after linking.
if self.sess.target.os != "illumos" {
if !self.sess.target.is_like_solaris {
self.linker_arg("--strip-debug");
}
}
Expand Down
11 changes: 9 additions & 2 deletions compiler/rustc_hir/src/pat_util.rs
Expand Up @@ -71,14 +71,21 @@ impl hir::Pat<'_> {
/// Call `f` on every "binding" in a pattern, e.g., on `a` in
/// `match foo() { Some(a) => (), None => () }`.
///
/// When encountering an or-pattern `p_0 | ... | p_n` only `p_0` will be visited.
/// When encountering an or-pattern `p_0 | ... | p_n` only the first non-never pattern will be
/// visited. If they're all never patterns we visit nothing, which is ok since a never pattern
/// cannot have bindings.
pub fn each_binding_or_first(
&self,
f: &mut impl FnMut(hir::BindingAnnotation, HirId, Span, Ident),
) {
self.walk(|p| match &p.kind {
PatKind::Or(ps) => {
ps[0].each_binding_or_first(f);
for p in *ps {
if !p.is_never_pattern() {
p.each_binding_or_first(f);
break;
}
}
false
}
PatKind::Binding(bm, _, ident, _) => {
Expand Down
180 changes: 16 additions & 164 deletions compiler/rustc_mir_transform/src/coverage/spans.rs
Expand Up @@ -61,7 +61,7 @@ pub(super) fn generate_coverage_spans(
hir_info,
basic_coverage_blocks,
);
let coverage_spans = SpansRefiner::refine_sorted_spans(basic_coverage_blocks, sorted_spans);
let coverage_spans = SpansRefiner::refine_sorted_spans(sorted_spans);
mappings.extend(coverage_spans.into_iter().map(|RefinedCovspan { bcb, span, .. }| {
// Each span produced by the generator represents an ordinary code region.
BcbMapping { kind: BcbMappingKind::Code(bcb), span }
Expand All @@ -88,8 +88,6 @@ pub(super) fn generate_coverage_spans(

#[derive(Debug)]
struct CurrCovspan {
/// This is used as the basis for [`PrevCovspan::original_span`], so it must
/// not be modified.
span: Span,
bcb: BasicCoverageBlock,
is_closure: bool,
Expand All @@ -102,7 +100,7 @@ impl CurrCovspan {

fn into_prev(self) -> PrevCovspan {
let Self { span, bcb, is_closure } = self;
PrevCovspan { original_span: span, span, bcb, merged_spans: vec![span], is_closure }
PrevCovspan { span, bcb, merged_spans: vec![span], is_closure }
}

fn into_refined(self) -> RefinedCovspan {
Expand All @@ -115,7 +113,6 @@ impl CurrCovspan {

#[derive(Debug)]
struct PrevCovspan {
original_span: Span,
span: Span,
bcb: BasicCoverageBlock,
/// List of all the original spans from MIR that have been merged into this
Expand All @@ -135,42 +132,17 @@ impl PrevCovspan {
self.merged_spans.push(other.span);
}

fn cutoff_statements_at(&mut self, cutoff_pos: BytePos) {
fn cutoff_statements_at(mut self, cutoff_pos: BytePos) -> Option<RefinedCovspan> {
self.merged_spans.retain(|span| span.hi() <= cutoff_pos);
if let Some(max_hi) = self.merged_spans.iter().map(|span| span.hi()).max() {
self.span = self.span.with_hi(max_hi);
}
}

fn into_dup(self) -> DuplicateCovspan {
let Self { original_span, span, bcb, merged_spans: _, is_closure } = self;
// Only unmodified spans end up in `pending_dups`.
debug_assert_eq!(original_span, span);
DuplicateCovspan { span, bcb, is_closure }
}

fn refined_copy(&self) -> RefinedCovspan {
let &Self { original_span: _, span, bcb, merged_spans: _, is_closure } = self;
RefinedCovspan { span, bcb, is_closure }
}

fn into_refined(self) -> RefinedCovspan {
self.refined_copy()
if self.merged_spans.is_empty() { None } else { Some(self.into_refined()) }
}
}

#[derive(Debug)]
struct DuplicateCovspan {
span: Span,
bcb: BasicCoverageBlock,
is_closure: bool,
}

impl DuplicateCovspan {
/// Returns a copy of this covspan, as a [`RefinedCovspan`].
/// Should only be called in places that would otherwise clone this covspan.
fn refined_copy(&self) -> RefinedCovspan {
let &Self { span, bcb, is_closure } = self;
let &Self { span, bcb, merged_spans: _, is_closure } = self;
RefinedCovspan { span, bcb, is_closure }
}

Expand Down Expand Up @@ -205,10 +177,7 @@ impl RefinedCovspan {
/// * Merge spans that represent continuous (both in source code and control flow), non-branching
/// execution
/// * Carve out (leave uncovered) any span that will be counted by another MIR (notably, closures)
struct SpansRefiner<'a> {
/// The BasicCoverageBlock Control Flow Graph (BCB CFG).
basic_coverage_blocks: &'a CoverageGraph,

struct SpansRefiner {
/// The initial set of coverage spans, sorted by `Span` (`lo` and `hi`) and by relative
/// dominance between the `BasicCoverageBlock`s of equal `Span`s.
sorted_spans_iter: std::vec::IntoIter<SpanFromMir>,
Expand All @@ -223,36 +192,22 @@ struct SpansRefiner<'a> {
/// If that `curr` was discarded, `prev` retains its value from the previous iteration.
some_prev: Option<PrevCovspan>,

/// One or more coverage spans with the same `Span` but different `BasicCoverageBlock`s, and
/// no `BasicCoverageBlock` in this list dominates another `BasicCoverageBlock` in the list.
/// If a new `curr` span also fits this criteria (compared to an existing list of
/// `pending_dups`), that `curr` moves to `prev` before possibly being added to
/// the `pending_dups` list, on the next iteration. As a result, if `prev` and `pending_dups`
/// have the same `Span`, the criteria for `pending_dups` holds for `prev` as well: a `prev`
/// with a matching `Span` does not dominate any `pending_dup` and no `pending_dup` dominates a
/// `prev` with a matching `Span`)
pending_dups: Vec<DuplicateCovspan>,

/// The final coverage spans to add to the coverage map. A `Counter` or `Expression`
/// will also be injected into the MIR for each BCB that has associated spans.
refined_spans: Vec<RefinedCovspan>,
}

impl<'a> SpansRefiner<'a> {
impl SpansRefiner {
/// Takes the initial list of (sorted) spans extracted from MIR, and "refines"
/// them by merging compatible adjacent spans, removing redundant spans,
/// and carving holes in spans when they overlap in unwanted ways.
fn refine_sorted_spans(
basic_coverage_blocks: &'a CoverageGraph,
sorted_spans: Vec<SpanFromMir>,
) -> Vec<RefinedCovspan> {
fn refine_sorted_spans(sorted_spans: Vec<SpanFromMir>) -> Vec<RefinedCovspan> {
let sorted_spans_len = sorted_spans.len();
let this = Self {
basic_coverage_blocks,
sorted_spans_iter: sorted_spans.into_iter(),
some_curr: None,
some_prev: None,
pending_dups: Vec::new(),
refined_spans: Vec::with_capacity(basic_coverage_blocks.num_nodes() * 2),
refined_spans: Vec::with_capacity(sorted_spans_len),
};

this.to_refined_spans()
Expand Down Expand Up @@ -292,21 +247,11 @@ impl<'a> SpansRefiner<'a> {
self.take_curr(); // Discards curr.
} else if curr.is_closure {
self.carve_out_span_for_closure();
} else if prev.original_span == prev.span && prev.span == curr.span {
// Prev and curr have the same span, and prev's span hasn't
// been modified by other spans.
self.update_pending_dups();
} else {
self.cutoff_prev_at_overlapping_curr();
}
}

// Drain any remaining dups into the output.
for dup in self.pending_dups.drain(..) {
debug!(" ...adding at least one pending dup={:?}", dup);
self.refined_spans.push(dup.into_refined());
}

// There is usually a final span remaining in `prev` after the loop ends,
// so add it to the output as well.
if let Some(prev) = self.some_prev.take() {
Expand Down Expand Up @@ -359,36 +304,6 @@ impl<'a> SpansRefiner<'a> {
self.some_prev.take().unwrap_or_else(|| bug!("some_prev is None (take_prev)"))
}

/// If there are `pending_dups` but `prev` is not a matching dup (`prev.span` doesn't match the
/// `pending_dups` spans), then one of the following two things happened during the previous
/// iteration:
/// * the previous `curr` span (which is now `prev`) was not a duplicate of the pending_dups
/// (in which case there should be at least two spans in `pending_dups`); or
/// * the `span` of `prev` was modified by `curr_mut().merge_from(prev)` (in which case
/// `pending_dups` could have as few as one span)
/// In either case, no more spans will match the span of `pending_dups`, so
/// add the `pending_dups` if they don't overlap `curr`, and clear the list.
fn maybe_flush_pending_dups(&mut self) {
let Some(last_dup) = self.pending_dups.last() else { return };
if last_dup.span == self.prev().span {
return;
}

debug!(
" SAME spans, but pending_dups are NOT THE SAME, so BCBs matched on \
previous iteration, or prev started a new disjoint span"
);
if last_dup.span.hi() <= self.curr().span.lo() {
for dup in self.pending_dups.drain(..) {
debug!(" ...adding at least one pending={:?}", dup);
self.refined_spans.push(dup.into_refined());
}
} else {
self.pending_dups.clear();
}
assert!(self.pending_dups.is_empty());
}

/// Advance `prev` to `curr` (if any), and `curr` to the next coverage span in sorted order.
fn next_coverage_span(&mut self) -> bool {
if let Some(curr) = self.some_curr.take() {
Expand All @@ -408,7 +323,6 @@ impl<'a> SpansRefiner<'a> {
);
} else {
self.some_curr = Some(CurrCovspan::new(curr.span, curr.bcb, curr.is_closure));
self.maybe_flush_pending_dups();
return true;
}
}
Expand All @@ -433,13 +347,6 @@ impl<'a> SpansRefiner<'a> {
let mut pre_closure = self.prev().refined_copy();
pre_closure.span = pre_closure.span.with_hi(left_cutoff);
debug!(" prev overlaps a closure. Adding span for pre_closure={:?}", pre_closure);

for mut dup in self.pending_dups.iter().map(DuplicateCovspan::refined_copy) {
dup.span = dup.span.with_hi(left_cutoff);
debug!(" ...and at least one pre_closure dup={:?}", dup);
self.refined_spans.push(dup);
}

self.refined_spans.push(pre_closure);
}

Expand All @@ -448,58 +355,9 @@ impl<'a> SpansRefiner<'a> {
self.prev_mut().span = self.prev().span.with_lo(right_cutoff);
debug!(" Mutated prev.span to start after the closure. prev={:?}", self.prev());

for dup in &mut self.pending_dups {
debug!(" ...and at least one overlapping dup={:?}", dup);
dup.span = dup.span.with_lo(right_cutoff);
}

// Prevent this curr from becoming prev.
let closure_covspan = self.take_curr().into_refined();
self.refined_spans.push(closure_covspan); // since self.prev() was already updated
} else {
self.pending_dups.clear();
}
}

/// Called if `curr.span` equals `prev.original_span` (and potentially equal to all
/// `pending_dups` spans, if any). Keep in mind, `prev.span()` may have been changed.
/// If prev.span() was merged into other spans (with matching BCB, for instance),
/// `prev.span.hi()` will be greater than (further right of) `prev.original_span.hi()`.
/// If prev.span() was split off to the right of a closure, prev.span().lo() will be
/// greater than prev.original_span.lo(). The actual span of `prev.original_span` is
/// not as important as knowing that `prev()` **used to have the same span** as `curr()`,
/// which means their sort order is still meaningful for determining the dominator
/// relationship.
///
/// When two coverage spans have the same `Span`, dominated spans can be discarded; but if
/// neither coverage span dominates the other, both (or possibly more than two) are held,
/// until their disposition is determined. In this latter case, the `prev` dup is moved into
/// `pending_dups` so the new `curr` dup can be moved to `prev` for the next iteration.
fn update_pending_dups(&mut self) {
let prev_bcb = self.prev().bcb;
let curr_bcb = self.curr().bcb;

// Equal coverage spans are ordered by dominators before dominated (if any), so it should be
// impossible for `curr` to dominate any previous coverage span.
debug_assert!(!self.basic_coverage_blocks.dominates(curr_bcb, prev_bcb));

// `prev` is a duplicate of `curr`, so add it to the list of pending dups.
// If it dominates `curr`, it will be removed by the subsequent discard step.
let prev = self.take_prev().into_dup();
debug!(?prev, "adding prev to pending dups");
self.pending_dups.push(prev);

let initial_pending_count = self.pending_dups.len();
if initial_pending_count > 0 {
self.pending_dups
.retain(|dup| !self.basic_coverage_blocks.dominates(dup.bcb, curr_bcb));

let n_discarded = initial_pending_count - self.pending_dups.len();
if n_discarded > 0 {
debug!(
" discarded {n_discarded} of {initial_pending_count} pending_dups that dominated curr",
);
}
}
}

Expand All @@ -516,19 +374,13 @@ impl<'a> SpansRefiner<'a> {
if it has statements that end before curr; prev={:?}",
self.prev()
);
if self.pending_dups.is_empty() {
let curr_span = self.curr().span;
self.prev_mut().cutoff_statements_at(curr_span.lo());
if self.prev().merged_spans.is_empty() {
debug!(" ... no non-overlapping statements to add");
} else {
debug!(" ... adding modified prev={:?}", self.prev());
let prev = self.take_prev().into_refined();
self.refined_spans.push(prev);
}

let curr_span = self.curr().span;
if let Some(prev) = self.take_prev().cutoff_statements_at(curr_span.lo()) {
debug!("after cutoff, adding {prev:?}");
self.refined_spans.push(prev);
} else {
// with `pending_dups`, `prev` cannot have any statements that don't overlap
self.pending_dups.clear();
debug!("prev was eliminated by cutoff");
}
}
}
15 changes: 10 additions & 5 deletions compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
Expand Up @@ -52,14 +52,19 @@ pub(super) fn mir_to_initial_sorted_coverage_spans(
// - Span A extends further left, or
// - Both have the same start and span A extends further right
.then_with(|| Ord::cmp(&a.span.hi(), &b.span.hi()).reverse())
// If both spans are equal, sort the BCBs in dominator order,
// so that dominating BCBs come before other BCBs they dominate.
.then_with(|| basic_coverage_blocks.cmp_in_dominator_order(a.bcb, b.bcb))
// If two spans are otherwise identical, put closure spans first,
// as this seems to be what the refinement step expects.
// If two spans have the same lo & hi, put closure spans first,
// as they take precedence over non-closure spans.
.then_with(|| Ord::cmp(&a.is_closure, &b.is_closure).reverse())
// After deduplication, we want to keep only the most-dominated BCB.
.then_with(|| basic_coverage_blocks.cmp_in_dominator_order(a.bcb, b.bcb).reverse())
});

// Among covspans with the same span, keep only one. Closure spans take
// precedence, otherwise keep the one with the most-dominated BCB.
// (Ideally we should try to preserve _all_ non-dominating BCBs, but that
// requires a lot more complexity in the span refiner, for little benefit.)
initial_spans.dedup_by(|b, a| a.span.source_equal(b.span));

initial_spans
}

Expand Down
4 changes: 2 additions & 2 deletions compiler/rustc_passes/src/liveness.rs
Expand Up @@ -526,8 +526,8 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
}

fn define_bindings_in_pat(&mut self, pat: &hir::Pat<'_>, mut succ: LiveNode) -> LiveNode {
// In an or-pattern, only consider the first pattern; any later patterns
// must have the same bindings, and we also consider the first pattern
// In an or-pattern, only consider the first non-never pattern; any later patterns
// must have the same bindings, and we also consider that pattern
// to be the "authoritative" set of ids.
pat.each_binding_or_first(&mut |_, hir_id, pat_sp, ident| {
let ln = self.live_node(hir_id, pat_sp);
Expand Down

0 comments on commit a4d2b17

Please sign in to comment.