From 602d3cbce3e6227ad7cd5c009c868c9e405a9f32 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Sat, 16 Oct 2021 20:24:08 +0200 Subject: [PATCH] Invoke callbacks from rustc_middle. --- Cargo.lock | 1 - compiler/rustc_middle/src/dep_graph/mod.rs | 30 +++++++++++++++++ compiler/rustc_query_impl/Cargo.toml | 1 - compiler/rustc_query_impl/src/lib.rs | 2 -- .../rustc_query_impl/src/on_disk_cache.rs | 2 +- compiler/rustc_query_impl/src/plumbing.rs | 32 +------------------ .../rustc_query_system/src/dep_graph/graph.rs | 7 ++-- .../rustc_query_system/src/dep_graph/mod.rs | 6 ++++ compiler/rustc_query_system/src/query/mod.rs | 8 +---- 9 files changed, 42 insertions(+), 47 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aaa709e50f60d..8385f1a18e5cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4319,7 +4319,6 @@ dependencies = [ "rustc_serialize", "rustc_session", "rustc_span", - "tracing", ] [[package]] diff --git a/compiler/rustc_middle/src/dep_graph/mod.rs b/compiler/rustc_middle/src/dep_graph/mod.rs index 7ace3a10158a5..8ac3cef9040f7 100644 --- a/compiler/rustc_middle/src/dep_graph/mod.rs +++ b/compiler/rustc_middle/src/dep_graph/mod.rs @@ -99,4 +99,34 @@ impl<'tcx> DepContext for TyCtxt<'tcx> { fn is_eval_always(&self, kind: DepKind) -> bool { self.query_kind(kind).is_eval_always } + + fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool { + debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node); + + // We must avoid ever having to call `force_from_dep_node()` for a + // `DepNode::codegen_unit`: + // Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we + // would always end up having to evaluate the first caller of the + // `codegen_unit` query that *is* reconstructible. This might very well be + // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just + // to re-trigger calling the `codegen_unit` query with the right key. At + // that point we would already have re-done all the work we are trying to + // avoid doing in the first place. + // The solution is simple: Just explicitly call the `codegen_unit` query for + // each CGU, right after partitioning. This way `try_mark_green` will always + // hit the cache instead of having to go through `force_from_dep_node`. + // This assertion makes sure, we actually keep applying the solution above. + debug_assert!( + dep_node.kind != DepKind::codegen_unit, + "calling force_from_dep_node() on DepKind::codegen_unit" + ); + + let cb = self.query_kind(dep_node.kind); + (cb.force_from_dep_node)(*self, dep_node) + } + + fn try_load_from_on_disk_cache(&self, dep_node: &DepNode) { + let cb = self.query_kind(dep_node.kind); + (cb.try_load_from_on_disk_cache)(*self, dep_node) + } } diff --git a/compiler/rustc_query_impl/Cargo.toml b/compiler/rustc_query_impl/Cargo.toml index 814581563896e..f984bb1872b28 100644 --- a/compiler/rustc_query_impl/Cargo.toml +++ b/compiler/rustc_query_impl/Cargo.toml @@ -9,7 +9,6 @@ doctest = false [dependencies] measureme = "10.0.0" rustc-rayon-core = "0.3.1" -tracing = "0.1" rustc_ast = { path = "../rustc_ast" } rustc_data_structures = { path = "../rustc_data_structures" } rustc_errors = { path = "../rustc_errors" } diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs index aa9c5bc5aba5c..d86d1bb8af427 100644 --- a/compiler/rustc_query_impl/src/lib.rs +++ b/compiler/rustc_query_impl/src/lib.rs @@ -13,8 +13,6 @@ extern crate rustc_macros; #[macro_use] extern crate rustc_middle; -#[macro_use] -extern crate tracing; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; diff --git a/compiler/rustc_query_impl/src/on_disk_cache.rs b/compiler/rustc_query_impl/src/on_disk_cache.rs index 499dae650b3a6..86b12b3586a9b 100644 --- a/compiler/rustc_query_impl/src/on_disk_cache.rs +++ b/compiler/rustc_query_impl/src/on_disk_cache.rs @@ -219,7 +219,7 @@ impl<'sess> rustc_middle::ty::OnDiskCache<'sess> for OnDiskCache<'sess> { // Do this *before* we clone 'latest_foreign_def_path_hashes', since // loading existing queries may cause us to create new DepNodes, which // may in turn end up invoking `store_foreign_def_id_hash` - tcx.dep_graph.exec_cache_promotions(QueryCtxt::from_tcx(tcx)); + tcx.dep_graph.exec_cache_promotions(tcx); *self.serialized_data.write() = None; } diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs index 043e6b1c15139..be704b75b2b06 100644 --- a/compiler/rustc_query_impl/src/plumbing.rs +++ b/compiler/rustc_query_impl/src/plumbing.rs @@ -3,7 +3,7 @@ //! manage the caches, and so forth. use crate::{on_disk_cache, queries, Queries}; -use rustc_middle::dep_graph::{DepKind, DepNode, DepNodeIndex, SerializedDepNodeIndex}; +use rustc_middle::dep_graph::{DepKind, DepNodeIndex, SerializedDepNodeIndex}; use rustc_middle::ty::tls::{self, ImplicitCtxt}; use rustc_middle::ty::{self, TyCtxt}; use rustc_query_system::dep_graph::HasDepContext; @@ -53,36 +53,6 @@ impl QueryContext for QueryCtxt<'tcx> { self.queries.try_collect_active_jobs(**self) } - fn try_load_from_on_disk_cache(&self, dep_node: &DepNode) { - let cb = &super::QUERY_CALLBACKS[dep_node.kind as usize]; - (cb.try_load_from_on_disk_cache)(**self, dep_node) - } - - fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool { - debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node); - - // We must avoid ever having to call `force_from_dep_node()` for a - // `DepNode::codegen_unit`: - // Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we - // would always end up having to evaluate the first caller of the - // `codegen_unit` query that *is* reconstructible. This might very well be - // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just - // to re-trigger calling the `codegen_unit` query with the right key. At - // that point we would already have re-done all the work we are trying to - // avoid doing in the first place. - // The solution is simple: Just explicitly call the `codegen_unit` query for - // each CGU, right after partitioning. This way `try_mark_green` will always - // hit the cache instead of having to go through `force_from_dep_node`. - // This assertion makes sure, we actually keep applying the solution above. - debug_assert!( - dep_node.kind != DepKind::codegen_unit, - "calling force_from_dep_node() on DepKind::codegen_unit" - ); - - let cb = &super::QUERY_CALLBACKS[dep_node.kind as usize]; - (cb.force_from_dep_node)(**self, dep_node) - } - // Interactions with on_disk_cache fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects { self.queries diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 5822370388b38..228b405496719 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -576,7 +576,7 @@ impl DepGraph { "try_mark_previous_green({:?}) --- trying to force dependency {:?}", dep_node, dep_dep_node ); - if !tcx.try_force_from_dep_node(dep_dep_node) { + if !tcx.dep_context().try_force_from_dep_node(dep_dep_node) { // The DepNode could not be forced. debug!( "try_mark_previous_green({:?}) - END - dependency {:?} could not be forced", @@ -741,8 +741,7 @@ impl DepGraph { // // This method will only load queries that will end up in the disk cache. // Other queries will not be executed. - pub fn exec_cache_promotions>(&self, qcx: Ctxt) { - let tcx = qcx.dep_context(); + pub fn exec_cache_promotions>(&self, tcx: Ctxt) { let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion"); let data = self.data.as_ref().unwrap(); @@ -750,7 +749,7 @@ impl DepGraph { match data.colors.get(prev_index) { Some(DepNodeColor::Green(_)) => { let dep_node = data.previous.index_to_node(prev_index); - qcx.try_load_from_on_disk_cache(&dep_node); + tcx.try_load_from_on_disk_cache(&dep_node); } None | Some(DepNodeColor::Red) => { // We can skip red nodes because a node can only be marked diff --git a/compiler/rustc_query_system/src/dep_graph/mod.rs b/compiler/rustc_query_system/src/dep_graph/mod.rs index 6a81c401105b3..d7f7758d02cd2 100644 --- a/compiler/rustc_query_system/src/dep_graph/mod.rs +++ b/compiler/rustc_query_system/src/dep_graph/mod.rs @@ -37,6 +37,12 @@ pub trait DepContext: Copy { fn is_eval_always(&self, kind: Self::DepKind) -> bool; fn fingerprint_style(&self, kind: Self::DepKind) -> FingerprintStyle; + + /// Try to force a dep node to execute and see if it's green. + fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool; + + /// Load data from the on-disk cache. + fn try_load_from_on_disk_cache(&self, dep_node: &DepNode); } pub trait HasDepContext: Copy { diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs index dffe7f3689ff4..e2b0a65ab77b1 100644 --- a/compiler/rustc_query_system/src/query/mod.rs +++ b/compiler/rustc_query_system/src/query/mod.rs @@ -14,7 +14,7 @@ pub use self::caches::{ mod config; pub use self::config::{QueryAccessors, QueryConfig, QueryDescription}; -use crate::dep_graph::{DepNode, DepNodeIndex, HasDepContext, SerializedDepNodeIndex}; +use crate::dep_graph::{DepNodeIndex, HasDepContext, SerializedDepNodeIndex}; use rustc_data_structures::sync::Lock; use rustc_data_structures::thin_vec::ThinVec; @@ -122,12 +122,6 @@ pub trait QueryContext: HasDepContext { fn try_collect_active_jobs(&self) -> Option>; - /// Load data from the on-disk cache. - fn try_load_from_on_disk_cache(&self, dep_node: &DepNode); - - /// Try to force a dep node to execute and see if it's green. - fn try_force_from_dep_node(&self, dep_node: &DepNode) -> bool; - /// Load side effects associated to the node in the previous session. fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects;