Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix race condition when emitting stored diagnostics #57066

Merged
merged 1 commit into from Jan 24, 2019
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
84 changes: 63 additions & 21 deletions src/librustc/dep_graph/graph.rs
@@ -1,4 +1,4 @@
use errors::DiagnosticBuilder;
use errors::{Diagnostic, DiagnosticBuilder};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
Expand All @@ -9,6 +9,7 @@ use std::hash::Hash;
use std::collections::hash_map::Entry;
use ty::{self, TyCtxt};
use util::common::{ProfileQueriesMsg, profq_msg};
use parking_lot::{Mutex, Condvar};

use ich::{StableHashingContext, StableHashingContextProvider, Fingerprint};

Expand Down Expand Up @@ -60,6 +61,12 @@ struct DepGraphData {

colors: DepNodeColorMap,

/// A set of loaded diagnostics which has been emitted.
emitted_diagnostics: Mutex<FxHashSet<DepNodeIndex>>,

/// Used to wait for diagnostics to be emitted.
emitted_diagnostics_cond_var: Condvar,

/// When we load, there may be `.o` files, cached mir, or other such
/// things available to us. If we find that they are not dirty, we
/// load the path to the file storing those work-products here into
Expand All @@ -83,6 +90,8 @@ impl DepGraph {
previous_work_products: prev_work_products,
dep_node_debug: Default::default(),
current: Lock::new(CurrentDepGraph::new(prev_graph_node_count)),
emitted_diagnostics: Default::default(),
emitted_diagnostics_cond_var: Condvar::new(),
previous: prev_graph,
colors: DepNodeColorMap::new(prev_graph_node_count),
loaded_from_cache: Default::default(),
Expand Down Expand Up @@ -718,28 +727,18 @@ impl DepGraph {
};

// ... emitting any stored diagnostic ...
if did_allocation {
// Only the thread which did the allocation emits the error messages

// FIXME: Ensure that these are printed before returning for all threads.
// Currently threads where did_allocation = false can continue on
// and emit other diagnostics before these diagnostics are emitted.
// Such diagnostics should be emitted after these.
// See https://github.com/rust-lang/rust/issues/48685
let diagnostics = tcx.queries.on_disk_cache
.load_diagnostics(tcx, prev_dep_node_index);

if diagnostics.len() > 0 {
let handle = tcx.sess.diagnostic();
let diagnostics = tcx.queries.on_disk_cache
.load_diagnostics(tcx, prev_dep_node_index);

// Promote the previous diagnostics to the current session.
tcx.queries.on_disk_cache
.store_diagnostics(dep_node_index, diagnostics.clone().into());

for diagnostic in diagnostics {
DiagnosticBuilder::new_diagnostic(handle, diagnostic).emit();
}
}
if unlikely!(diagnostics.len() > 0) {
self.emit_diagnostics(
tcx,
data,
dep_node_index,
did_allocation,
diagnostics
);
}

// ... and finally storing a "Green" entry in the color map.
Expand All @@ -755,6 +754,49 @@ impl DepGraph {
Some(dep_node_index)
}

/// Atomically emits some loaded diagnotics assuming that this only gets called with
/// did_allocation set to true on one thread
#[cold]
#[inline(never)]
fn emit_diagnostics<'tcx>(
&self,
tcx: TyCtxt<'_, 'tcx, 'tcx>,
data: &DepGraphData,
dep_node_index: DepNodeIndex,
did_allocation: bool,
diagnostics: Vec<Diagnostic>,
) {
if did_allocation || !cfg!(parallel_queries) {
// Only the thread which did the allocation emits the error messages
let handle = tcx.sess.diagnostic();

// Promote the previous diagnostics to the current session.
tcx.queries.on_disk_cache
.store_diagnostics(dep_node_index, diagnostics.clone().into());

for diagnostic in diagnostics {
DiagnosticBuilder::new_diagnostic(handle, diagnostic).emit();
}

#[cfg(parallel_queries)]
{
// Mark the diagnostics and emitted and wake up waiters
data.emitted_diagnostics.lock().insert(dep_node_index);
data.emitted_diagnostics_cond_var.notify_all();
}
} else {
// The other threads will wait for the diagnostics to be emitted

let mut emitted_diagnostics = data.emitted_diagnostics.lock();
loop {
if emitted_diagnostics.contains(&dep_node_index) {
break;
}
data.emitted_diagnostics_cond_var.wait(&mut emitted_diagnostics);
}
}
}

// Returns true if the given node has been marked as green during the
// current compilation session. Used in various assertions
pub fn is_green(&self, dep_node: &DepNode) -> bool {
Expand Down