Skip to content

Commit

Permalink
Invoke callbacks from rustc_middle.
Browse files Browse the repository at this point in the history
  • Loading branch information
cjgillot committed Oct 20, 2021
1 parent b09de95 commit 602d3cb
Show file tree
Hide file tree
Showing 9 changed files with 42 additions and 47 deletions.
1 change: 0 additions & 1 deletion Cargo.lock
Expand Up @@ -4319,7 +4319,6 @@ dependencies = [
"rustc_serialize",
"rustc_session",
"rustc_span",
"tracing",
]

[[package]]
Expand Down
30 changes: 30 additions & 0 deletions compiler/rustc_middle/src/dep_graph/mod.rs
Expand Up @@ -99,4 +99,34 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
fn is_eval_always(&self, kind: DepKind) -> bool {
self.query_kind(kind).is_eval_always
}

fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);

// We must avoid ever having to call `force_from_dep_node()` for a
// `DepNode::codegen_unit`:
// Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
// would always end up having to evaluate the first caller of the
// `codegen_unit` query that *is* reconstructible. This might very well be
// the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
// to re-trigger calling the `codegen_unit` query with the right key. At
// that point we would already have re-done all the work we are trying to
// avoid doing in the first place.
// The solution is simple: Just explicitly call the `codegen_unit` query for
// each CGU, right after partitioning. This way `try_mark_green` will always
// hit the cache instead of having to go through `force_from_dep_node`.
// This assertion makes sure, we actually keep applying the solution above.
debug_assert!(
dep_node.kind != DepKind::codegen_unit,
"calling force_from_dep_node() on DepKind::codegen_unit"
);

let cb = self.query_kind(dep_node.kind);
(cb.force_from_dep_node)(*self, dep_node)
}

fn try_load_from_on_disk_cache(&self, dep_node: &DepNode) {
let cb = self.query_kind(dep_node.kind);
(cb.try_load_from_on_disk_cache)(*self, dep_node)
}
}
1 change: 0 additions & 1 deletion compiler/rustc_query_impl/Cargo.toml
Expand Up @@ -9,7 +9,6 @@ doctest = false
[dependencies]
measureme = "10.0.0"
rustc-rayon-core = "0.3.1"
tracing = "0.1"
rustc_ast = { path = "../rustc_ast" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" }
Expand Down
2 changes: 0 additions & 2 deletions compiler/rustc_query_impl/src/lib.rs
Expand Up @@ -13,8 +13,6 @@
extern crate rustc_macros;
#[macro_use]
extern crate rustc_middle;
#[macro_use]
extern crate tracing;

use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_query_impl/src/on_disk_cache.rs
Expand Up @@ -219,7 +219,7 @@ impl<'sess> rustc_middle::ty::OnDiskCache<'sess> for OnDiskCache<'sess> {
// Do this *before* we clone 'latest_foreign_def_path_hashes', since
// loading existing queries may cause us to create new DepNodes, which
// may in turn end up invoking `store_foreign_def_id_hash`
tcx.dep_graph.exec_cache_promotions(QueryCtxt::from_tcx(tcx));
tcx.dep_graph.exec_cache_promotions(tcx);

*self.serialized_data.write() = None;
}
Expand Down
32 changes: 1 addition & 31 deletions compiler/rustc_query_impl/src/plumbing.rs
Expand Up @@ -3,7 +3,7 @@
//! manage the caches, and so forth.

use crate::{on_disk_cache, queries, Queries};
use rustc_middle::dep_graph::{DepKind, DepNode, DepNodeIndex, SerializedDepNodeIndex};
use rustc_middle::dep_graph::{DepKind, DepNodeIndex, SerializedDepNodeIndex};
use rustc_middle::ty::tls::{self, ImplicitCtxt};
use rustc_middle::ty::{self, TyCtxt};
use rustc_query_system::dep_graph::HasDepContext;
Expand Down Expand Up @@ -53,36 +53,6 @@ impl QueryContext for QueryCtxt<'tcx> {
self.queries.try_collect_active_jobs(**self)
}

fn try_load_from_on_disk_cache(&self, dep_node: &DepNode) {
let cb = &super::QUERY_CALLBACKS[dep_node.kind as usize];
(cb.try_load_from_on_disk_cache)(**self, dep_node)
}

fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool {
debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);

// We must avoid ever having to call `force_from_dep_node()` for a
// `DepNode::codegen_unit`:
// Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
// would always end up having to evaluate the first caller of the
// `codegen_unit` query that *is* reconstructible. This might very well be
// the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
// to re-trigger calling the `codegen_unit` query with the right key. At
// that point we would already have re-done all the work we are trying to
// avoid doing in the first place.
// The solution is simple: Just explicitly call the `codegen_unit` query for
// each CGU, right after partitioning. This way `try_mark_green` will always
// hit the cache instead of having to go through `force_from_dep_node`.
// This assertion makes sure, we actually keep applying the solution above.
debug_assert!(
dep_node.kind != DepKind::codegen_unit,
"calling force_from_dep_node() on DepKind::codegen_unit"
);

let cb = &super::QUERY_CALLBACKS[dep_node.kind as usize];
(cb.force_from_dep_node)(**self, dep_node)
}

// Interactions with on_disk_cache
fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects {
self.queries
Expand Down
7 changes: 3 additions & 4 deletions compiler/rustc_query_system/src/dep_graph/graph.rs
Expand Up @@ -576,7 +576,7 @@ impl<K: DepKind> DepGraph<K> {
"try_mark_previous_green({:?}) --- trying to force dependency {:?}",
dep_node, dep_dep_node
);
if !tcx.try_force_from_dep_node(dep_dep_node) {
if !tcx.dep_context().try_force_from_dep_node(dep_dep_node) {
// The DepNode could not be forced.
debug!(
"try_mark_previous_green({:?}) - END - dependency {:?} could not be forced",
Expand Down Expand Up @@ -741,16 +741,15 @@ impl<K: DepKind> DepGraph<K> {
//
// This method will only load queries that will end up in the disk cache.
// Other queries will not be executed.
pub fn exec_cache_promotions<Ctxt: QueryContext<DepKind = K>>(&self, qcx: Ctxt) {
let tcx = qcx.dep_context();
pub fn exec_cache_promotions<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) {
let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");

let data = self.data.as_ref().unwrap();
for prev_index in data.colors.values.indices() {
match data.colors.get(prev_index) {
Some(DepNodeColor::Green(_)) => {
let dep_node = data.previous.index_to_node(prev_index);
qcx.try_load_from_on_disk_cache(&dep_node);
tcx.try_load_from_on_disk_cache(&dep_node);
}
None | Some(DepNodeColor::Red) => {
// We can skip red nodes because a node can only be marked
Expand Down
6 changes: 6 additions & 0 deletions compiler/rustc_query_system/src/dep_graph/mod.rs
Expand Up @@ -37,6 +37,12 @@ pub trait DepContext: Copy {
fn is_eval_always(&self, kind: Self::DepKind) -> bool;

fn fingerprint_style(&self, kind: Self::DepKind) -> FingerprintStyle;

/// Try to force a dep node to execute and see if it's green.
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;

/// Load data from the on-disk cache.
fn try_load_from_on_disk_cache(&self, dep_node: &DepNode<Self::DepKind>);
}

pub trait HasDepContext: Copy {
Expand Down
8 changes: 1 addition & 7 deletions compiler/rustc_query_system/src/query/mod.rs
Expand Up @@ -14,7 +14,7 @@ pub use self::caches::{
mod config;
pub use self::config::{QueryAccessors, QueryConfig, QueryDescription};

use crate::dep_graph::{DepNode, DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
use crate::dep_graph::{DepNodeIndex, HasDepContext, SerializedDepNodeIndex};

use rustc_data_structures::sync::Lock;
use rustc_data_structures::thin_vec::ThinVec;
Expand Down Expand Up @@ -122,12 +122,6 @@ pub trait QueryContext: HasDepContext {

fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind>>;

/// Load data from the on-disk cache.
fn try_load_from_on_disk_cache(&self, dep_node: &DepNode<Self::DepKind>);

/// Try to force a dep node to execute and see if it's green.
fn try_force_from_dep_node(&self, dep_node: &DepNode<Self::DepKind>) -> bool;

/// Load side effects associated to the node in the previous session.
fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects;

Expand Down

0 comments on commit 602d3cb

Please sign in to comment.