Skip to content

Commit

Permalink
Use a QueryContext for try_mark_green.
Browse files Browse the repository at this point in the history
  • Loading branch information
cjgillot committed Feb 19, 2021
1 parent 3bd14c7 commit b27266f
Show file tree
Hide file tree
Showing 10 changed files with 133 additions and 143 deletions.
6 changes: 1 addition & 5 deletions compiler/rustc_codegen_cranelift/src/driver/aot.rs
Expand Up @@ -465,9 +465,5 @@ fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguR
cgu.name()
);

if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
CguReuse::PreLto
} else {
CguReuse::No
}
if tcx.try_mark_green(&dep_node) { CguReuse::PreLto } else { CguReuse::No }
}
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_ssa/src/base.rs
Expand Up @@ -867,7 +867,7 @@ fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguR
cgu.name()
);

if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
if tcx.try_mark_green(&dep_node) {
// We can re-use either the pre- or the post-thinlto state. If no LTO is
// being performed then we can use post-LTO artifacts, otherwise we must
// reuse pre-LTO artifacts
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_middle/src/dep_graph/dep_node.rs
Expand Up @@ -135,7 +135,7 @@ pub struct DepKindStruct {
/// then `force_from_dep_node()` should not fail for it. Otherwise, you can just
/// add it to the "We don't have enough information to reconstruct..." group in
/// the match below.
pub(super) force_from_dep_node: fn(tcx: TyCtxt<'_>, dep_node: &DepNode) -> bool,
pub(crate) force_from_dep_node: fn(tcx: TyCtxt<'_>, dep_node: &DepNode) -> bool,

/// Invoke a query to put the on-disk cached value in memory.
pub(crate) try_load_from_on_disk_cache: fn(QueryCtxt<'_>, &DepNode),
Expand Down
95 changes: 0 additions & 95 deletions compiler/rustc_middle/src/dep_graph/mod.rs
Expand Up @@ -2,9 +2,6 @@ use crate::ich::StableHashingContext;
use crate::ty::{self, TyCtxt};
use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::Diagnostic;
use rustc_hir::def_id::LocalDefId;

mod dep_node;

Expand Down Expand Up @@ -116,99 +113,7 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
&self.dep_graph
}

fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
// FIXME: This match is just a workaround for incremental bugs and should
// be removed. https://github.com/rust-lang/rust/issues/62649 is one such
// bug that must be fixed before removing this.
match dep_node.kind {
DepKind::hir_owner | DepKind::hir_owner_nodes => {
if let Some(def_id) = dep_node.extract_def_id(*self) {
if !def_id_corresponds_to_hir_dep_node(*self, def_id.expect_local()) {
// This `DefPath` does not have a
// corresponding `DepNode` (e.g. a
// struct field), and the ` DefPath`
// collided with the `DefPath` of a
// proper item that existed in the
// previous compilation session.
//
// Since the given `DefPath` does not
// denote the item that previously
// existed, we just fail to mark green.
return false;
}
} else {
// If the node does not exist anymore, we
// just fail to mark green.
return false;
}
}
_ => {
// For other kinds of nodes it's OK to be
// forced.
}
}

debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);

// We must avoid ever having to call `force_from_dep_node()` for a
// `DepNode::codegen_unit`:
// Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
// would always end up having to evaluate the first caller of the
// `codegen_unit` query that *is* reconstructible. This might very well be
// the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
// to re-trigger calling the `codegen_unit` query with the right key. At
// that point we would already have re-done all the work we are trying to
// avoid doing in the first place.
// The solution is simple: Just explicitly call the `codegen_unit` query for
// each CGU, right after partitioning. This way `try_mark_green` will always
// hit the cache instead of having to go through `force_from_dep_node`.
// This assertion makes sure, we actually keep applying the solution above.
debug_assert!(
dep_node.kind != DepKind::codegen_unit,
"calling force_from_dep_node() on DepKind::codegen_unit"
);

(dep_node.kind.force_from_dep_node)(*self, dep_node)
}

fn has_errors_or_delayed_span_bugs(&self) -> bool {
self.sess.has_errors_or_delayed_span_bugs()
}

fn diagnostic(&self) -> &rustc_errors::Handler {
self.sess.diagnostic()
}

// Interactions with on_disk_cache
fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic> {
self.on_disk_cache
.as_ref()
.map(|c| c.load_diagnostics(*self, prev_dep_node_index))
.unwrap_or_default()
}

fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>) {
if let Some(c) = self.on_disk_cache.as_ref() {
c.store_diagnostics(dep_node_index, diagnostics)
}
}

fn store_diagnostics_for_anon_node(
&self,
dep_node_index: DepNodeIndex,
diagnostics: ThinVec<Diagnostic>,
) {
if let Some(c) = self.on_disk_cache.as_ref() {
c.store_diagnostics_for_anon_node(dep_node_index, diagnostics)
}
}

fn profiler(&self) -> &SelfProfilerRef {
&self.prof
}
}

fn def_id_corresponds_to_hir_dep_node(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
def_id == hir_id.owner
}
5 changes: 5 additions & 0 deletions compiler/rustc_middle/src/ty/query/mod.rs
Expand Up @@ -119,6 +119,11 @@ impl TyCtxt<'tcx> {
pub fn at(self, span: Span) -> TyCtxtAt<'tcx> {
TyCtxtAt { tcx: self, span }
}

pub fn try_mark_green(self, dep_node: &dep_graph::DepNode) -> bool {
let qcx = QueryCtxt { tcx: self, queries: self.queries };
self.dep_graph.try_mark_green(qcx, dep_node).is_some()
}
}

macro_rules! define_callbacks {
Expand Down
91 changes: 90 additions & 1 deletion compiler/rustc_middle/src/ty/query/plumbing.rs
Expand Up @@ -2,7 +2,7 @@
//! generate the actual methods on tcx which find and execute the provider,
//! manage the caches, and so forth.

use crate::dep_graph;
use crate::dep_graph::{self, DepKind, DepNode, DepNodeExt, DepNodeIndex, SerializedDepNodeIndex};
use crate::ty::query::{on_disk_cache, Queries, Query};
use crate::ty::tls::{self, ImplicitCtxt};
use crate::ty::{self, TyCtxt};
Expand Down Expand Up @@ -72,6 +72,95 @@ impl QueryContext for QueryCtxt<'tcx> {
(dep_node.kind.try_load_from_on_disk_cache)(*self, dep_node)
}

fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
// FIXME: This match is just a workaround for incremental bugs and should
// be removed. https://github.com/rust-lang/rust/issues/62649 is one such
// bug that must be fixed before removing this.
match dep_node.kind {
DepKind::hir_owner | DepKind::hir_owner_nodes => {
if let Some(def_id) = dep_node.extract_def_id(**self) {
let def_id = def_id.expect_local();
let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
if def_id != hir_id.owner {
// This `DefPath` does not have a
// corresponding `DepNode` (e.g. a
// struct field), and the ` DefPath`
// collided with the `DefPath` of a
// proper item that existed in the
// previous compilation session.
//
// Since the given `DefPath` does not
// denote the item that previously
// existed, we just fail to mark green.
return false;
}
} else {
// If the node does not exist anymore, we
// just fail to mark green.
return false;
}
}
_ => {
// For other kinds of nodes it's OK to be
// forced.
}
}

debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);

// We must avoid ever having to call `force_from_dep_node()` for a
// `DepNode::codegen_unit`:
// Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
// would always end up having to evaluate the first caller of the
// `codegen_unit` query that *is* reconstructible. This might very well be
// the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
// to re-trigger calling the `codegen_unit` query with the right key. At
// that point we would already have re-done all the work we are trying to
// avoid doing in the first place.
// The solution is simple: Just explicitly call the `codegen_unit` query for
// each CGU, right after partitioning. This way `try_mark_green` will always
// hit the cache instead of having to go through `force_from_dep_node`.
// This assertion makes sure, we actually keep applying the solution above.
debug_assert!(
dep_node.kind != DepKind::codegen_unit,
"calling force_from_dep_node() on DepKind::codegen_unit"
);

(dep_node.kind.force_from_dep_node)(**self, dep_node)
}

fn has_errors_or_delayed_span_bugs(&self) -> bool {
self.sess.has_errors_or_delayed_span_bugs()
}

fn diagnostic(&self) -> &rustc_errors::Handler {
self.sess.diagnostic()
}

// Interactions with on_disk_cache
fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic> {
self.on_disk_cache
.as_ref()
.map(|c| c.load_diagnostics(**self, prev_dep_node_index))
.unwrap_or_default()
}

fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>) {
if let Some(c) = self.on_disk_cache.as_ref() {
c.store_diagnostics(dep_node_index, diagnostics)
}
}

fn store_diagnostics_for_anon_node(
&self,
dep_node_index: DepNodeIndex,
diagnostics: ThinVec<Diagnostic>,
) {
if let Some(c) = self.on_disk_cache.as_ref() {
c.store_diagnostics_for_anon_node(dep_node_index, diagnostics)
}
}

/// Executes a job by changing the `ImplicitCtxt` to point to the
/// new query job while it executes. It returns the diagnostics
/// captured during execution and the actual result.
Expand Down
8 changes: 4 additions & 4 deletions compiler/rustc_query_system/src/dep_graph/graph.rs
Expand Up @@ -587,7 +587,7 @@ impl<K: DepKind> DepGraph<K> {
/// A node will have an index, when it's already been marked green, or when we can mark it
/// green. This function will mark the current task as a reader of the specified node, when
/// a node index can be found for that node.
pub fn try_mark_green_and_read<Ctxt: DepContext<DepKind = K>>(
pub fn try_mark_green_and_read<Ctxt: QueryContext<DepKind = K>>(
&self,
tcx: Ctxt,
dep_node: &DepNode<K>,
Expand All @@ -599,7 +599,7 @@ impl<K: DepKind> DepGraph<K> {
})
}

pub fn try_mark_green<Ctxt: DepContext<DepKind = K>>(
pub fn try_mark_green<Ctxt: QueryContext<DepKind = K>>(
&self,
tcx: Ctxt,
dep_node: &DepNode<K>,
Expand Down Expand Up @@ -627,7 +627,7 @@ impl<K: DepKind> DepGraph<K> {
}

/// Try to mark a dep-node which existed in the previous compilation session as green.
fn try_mark_previous_green<Ctxt: DepContext<DepKind = K>>(
fn try_mark_previous_green<Ctxt: QueryContext<DepKind = K>>(
&self,
tcx: Ctxt,
data: &DepGraphData<K>,
Expand Down Expand Up @@ -811,7 +811,7 @@ impl<K: DepKind> DepGraph<K> {
/// This may be called concurrently on multiple threads for the same dep node.
#[cold]
#[inline(never)]
fn emit_diagnostics<Ctxt: DepContext<DepKind = K>>(
fn emit_diagnostics<Ctxt: QueryContext<DepKind = K>>(
&self,
tcx: Ctxt,
data: &DepGraphData<K>,
Expand Down
24 changes: 0 additions & 24 deletions compiler/rustc_query_system/src/dep_graph/mod.rs
Expand Up @@ -13,8 +13,6 @@ pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex};

use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::Diagnostic;

use std::fmt;
use std::hash::Hash;
Expand All @@ -32,30 +30,8 @@ pub trait DepContext: Copy {
/// Access the DepGraph.
fn dep_graph(&self) -> &DepGraph<Self::DepKind>;

/// Try to force a dep node to execute and see if it's green.
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;

fn register_reused_dep_node(&self, dep_node: &DepNode<Self::DepKind>);

/// Return whether the current session is tainted by errors.
fn has_errors_or_delayed_span_bugs(&self) -> bool;

/// Return the diagnostic handler.
fn diagnostic(&self) -> &rustc_errors::Handler;

/// Load diagnostics associated to the node in the previous session.
fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic>;

/// Register diagnostics for the given node, for use in next session.
fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>);

/// Register diagnostics for the given node, for use in next session.
fn store_diagnostics_for_anon_node(
&self,
dep_node_index: DepNodeIndex,
diagnostics: ThinVec<Diagnostic>,
);

/// Access the profiler.
fn profiler(&self) -> &SelfProfilerRef;
}
Expand Down
24 changes: 23 additions & 1 deletion compiler/rustc_query_system/src/query/mod.rs
Expand Up @@ -14,7 +14,7 @@ pub use self::caches::{
mod config;
pub use self::config::{QueryAccessors, QueryConfig, QueryDescription};

use crate::dep_graph::{DepNode, HasDepContext};
use crate::dep_graph::{DepNode, DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
use crate::query::job::QueryMap;

use rustc_data_structures::stable_hasher::HashStable;
Expand All @@ -40,6 +40,28 @@ pub trait QueryContext: HasDepContext {
/// Load data from the on-disk cache.
fn try_load_from_on_disk_cache(&self, dep_node: &DepNode<Self::DepKind>);

/// Try to force a dep node to execute and see if it's green.
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;

/// Return whether the current session is tainted by errors.
fn has_errors_or_delayed_span_bugs(&self) -> bool;

/// Return the diagnostic handler.
fn diagnostic(&self) -> &rustc_errors::Handler;

/// Load diagnostics associated to the node in the previous session.
fn load_diagnostics(&self, prev_dep_node_index: SerializedDepNodeIndex) -> Vec<Diagnostic>;

/// Register diagnostics for the given node, for use in next session.
fn store_diagnostics(&self, dep_node_index: DepNodeIndex, diagnostics: ThinVec<Diagnostic>);

/// Register diagnostics for the given node, for use in next session.
fn store_diagnostics_for_anon_node(
&self,
dep_node_index: DepNodeIndex,
diagnostics: ThinVec<Diagnostic>,
);

/// Executes a job by changing the `ImplicitCtxt` to point to the
/// new query job while it executes. It returns the diagnostics
/// captured during execution and the actual result.
Expand Down

0 comments on commit b27266f

Please sign in to comment.