diff --git a/src/Cargo.lock b/src/Cargo.lock index d17faef82b563..ed6f51093a757 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -1786,12 +1786,16 @@ dependencies = [ "jobserver 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", "polonius-engine 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "proc_macro 0.0.0", + "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_apfloat 0.0.0", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", "rustc_target 0.0.0", + "scoped-tls 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "serialize 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", @@ -1828,7 +1832,7 @@ dependencies = [ "rustc-ap-rustc_cratesio_shim 147.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-ap-serialize 147.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-rayon 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1911,16 +1915,16 @@ dependencies = [ [[package]] name = "rustc-rayon" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-rayon-core 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rustc-rayon-core" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2044,8 +2048,8 @@ dependencies = [ "parking_lot 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot_core 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-rayon 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-rayon-core 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_cratesio_shim 0.0.0", "serialize 0.0.0", "stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2061,7 +2065,7 @@ dependencies = [ "graphviz 0.0.0", "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", - "rustc-rayon 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_allocator 0.0.0", "rustc_borrowck 0.0.0", "rustc_codegen_utils 0.0.0", @@ -3215,8 +3219,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum rustc-ap-syntax_pos 147.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fb707a229093791dc3fc35aca61d9bf0e3708f23da4536683527857bc624b061" "checksum rustc-demangle 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "11fb43a206a04116ffd7cfcf9bcb941f8eb6cc7ff667272246b0a1c74259a3cb" "checksum rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7540fc8b0c49f096ee9c961cda096467dce8084bec6bdca2fc83895fd9b28cb8" -"checksum rustc-rayon 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b1aa5cd8c3a706edb19b6ec6aa7b056bdc635b6e99c5cf7014f9af9d92f15e99" -"checksum rustc-rayon-core 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d69983f8613a9c3ba1a3bbf5e8bdf2fd5c42317b1d8dd8623ca8030173bf8a6b" +"checksum rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c6d5a683c6ba4ed37959097e88d71c9e8e26659a3cb5be8b389078e7ad45306" +"checksum rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "40f06724db71e18d68b3b946fdf890ca8c921d9edccc1404fdfdb537b0d12649" "checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" "checksum rustc_version 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a54aa04a10c68c1c4eacb4337fd883b435997ede17a9385784b990777686b09a" "checksum rustfix 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9da3cf9b79dc889a2c9879643f26d7a53e37e9361c7566b7d2787d5ace0d8396" diff --git a/src/librustc/Cargo.toml b/src/librustc/Cargo.toml index 1d1166ad2c4fd..0ff4dc2eace81 100644 --- a/src/librustc/Cargo.toml +++ b/src/librustc/Cargo.toml @@ -15,9 +15,12 @@ fmt_macros = { path = "../libfmt_macros" } graphviz = { path = "../libgraphviz" } jobserver = "0.1" lazy_static = "1.0.0" +scoped-tls = { version = "0.1.1", features = ["nightly"] } log = { version = "0.4", features = ["release_max_level_info", "std"] } polonius-engine = "0.5.0" proc_macro = { path = "../libproc_macro" } +rustc-rayon = "0.1.1" +rustc-rayon-core = "0.1.1" rustc_apfloat = { path = "../librustc_apfloat" } rustc_target = { path = "../librustc_target" } rustc_data_structures = { path = "../librustc_data_structures" } @@ -26,6 +29,7 @@ serialize = { path = "../libserialize" } syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } backtrace = "0.3.3" +parking_lot = "0.5.5" byteorder = { version = "1.1", features = ["i128"]} chalk-engine = { version = "0.6.0", default-features=false } diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index 10e8905054d11..a006856f58b7d 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -67,6 +67,7 @@ #![feature(unboxed_closures)] #![feature(trace_macros)] #![feature(trusted_len)] +#![feature(vec_remove_item)] #![feature(catch_expr)] #![feature(integer_atomics)] #![feature(test)] @@ -83,13 +84,17 @@ extern crate fmt_macros; extern crate getopts; extern crate graphviz; #[macro_use] extern crate lazy_static; +#[macro_use] extern crate scoped_tls; #[cfg(windows)] extern crate libc; extern crate polonius_engine; extern crate rustc_target; #[macro_use] extern crate rustc_data_structures; extern crate serialize; +extern crate parking_lot; extern crate rustc_errors as errors; +extern crate rustc_rayon as rayon; +extern crate rustc_rayon_core as rayon_core; #[macro_use] extern crate log; #[macro_use] extern crate syntax; extern crate syntax_pos; diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index 35b2ce50da79d..e66ad24231094 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -1699,16 +1699,21 @@ impl<'a, 'tcx> Lift<'tcx> for &'a Slice { pub mod tls { use super::{GlobalCtxt, TyCtxt}; - use std::cell::Cell; use std::fmt; use std::mem; use syntax_pos; use ty::maps; use errors::{Diagnostic, TRACK_DIAGNOSTICS}; use rustc_data_structures::OnDrop; - use rustc_data_structures::sync::{self, Lrc}; + use rustc_data_structures::sync::{self, Lrc, Lock}; use dep_graph::OpenTask; + #[cfg(not(parallel_queries))] + use std::cell::Cell; + + #[cfg(parallel_queries)] + use rayon_core; + /// This is the implicit state of rustc. It contains the current /// TyCtxt and query. It is updated when creating a local interner or /// executing a new query. Whenever there's a TyCtxt value available @@ -1732,9 +1737,29 @@ pub mod tls { pub task: &'a OpenTask, } - // A thread local value which stores a pointer to the current ImplicitCtxt + /// Sets Rayon's thread local variable which is preserved for Rayon jobs + /// to `value` during the call to `f`. It is restored to its previous value after. + /// This is used to set the pointer to the new ImplicitCtxt. + #[cfg(parallel_queries)] + fn set_tlv R, R>(value: usize, f: F) -> R { + rayon_core::tlv::with(value, f) + } + + /// Gets Rayon's thread local variable which is preserved for Rayon jobs. + /// This is used to get the pointer to the current ImplicitCtxt. + #[cfg(parallel_queries)] + fn get_tlv() -> usize { + rayon_core::tlv::get() + } + + /// A thread local variable which stores a pointer to the current ImplicitCtxt + #[cfg(not(parallel_queries))] thread_local!(static TLV: Cell = Cell::new(0)); + /// Sets TLV to `value` during the call to `f`. + /// It is restored to its previous value after. + /// This is used to set the pointer to the new ImplicitCtxt. + #[cfg(not(parallel_queries))] fn set_tlv R, R>(value: usize, f: F) -> R { let old = get_tlv(); let _reset = OnDrop(move || TLV.with(|tlv| tlv.set(old))); @@ -1742,6 +1767,8 @@ pub mod tls { f() } + /// This is used to get the pointer to the current ImplicitCtxt. + #[cfg(not(parallel_queries))] fn get_tlv() -> usize { TLV.with(|tlv| tlv.get()) } @@ -1810,6 +1837,15 @@ pub mod tls { where F: for<'a> FnOnce(TyCtxt<'a, 'gcx, 'gcx>) -> R { with_thread_locals(|| { + // Update GCX_PTR to indicate there's a GlobalCtxt available + GCX_PTR.with(|lock| { + *lock.lock() = gcx as *const _ as usize; + }); + // Set GCX_PTR back to 0 when we exit + let _on_drop = OnDrop(move || { + GCX_PTR.with(|lock| *lock.lock() = 0); + }); + let tcx = TyCtxt { gcx, interners: &gcx.global_interners, @@ -1826,6 +1862,32 @@ pub mod tls { }) } + /// Stores a pointer to the GlobalCtxt if one is available. + /// This is used to access the GlobalCtxt in the deadlock handler + /// given to Rayon. + scoped_thread_local!(pub static GCX_PTR: Lock); + + /// Creates a TyCtxt and ImplicitCtxt based on the GCX_PTR thread local. + /// This is used in the deadlock handler. + pub unsafe fn with_global(f: F) -> R + where F: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R + { + let gcx = GCX_PTR.with(|lock| *lock.lock()); + assert!(gcx != 0); + let gcx = &*(gcx as *const GlobalCtxt<'_>); + let tcx = TyCtxt { + gcx, + interners: &gcx.global_interners, + }; + let icx = ImplicitCtxt { + query: None, + tcx, + layout_depth: 0, + task: &OpenTask::Ignore, + }; + enter_context(&icx, |_| f(tcx)) + } + /// Allows access to the current ImplicitCtxt in a closure if one is available pub fn with_context_opt(f: F) -> R where F: for<'a, 'gcx, 'tcx> FnOnce(Option<&ImplicitCtxt<'a, 'gcx, 'tcx>>) -> R diff --git a/src/librustc/ty/maps/job.rs b/src/librustc/ty/maps/job.rs index 3b6af018d6b78..3da73d47a0bf5 100644 --- a/src/librustc/ty/maps/job.rs +++ b/src/librustc/ty/maps/job.rs @@ -8,13 +8,31 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rustc_data_structures::sync::{Lock, Lrc}; +#![allow(warnings)] + +use std::mem; +use rustc_data_structures::sync::{Lock, LockGuard, Lrc, Weak}; +use rustc_data_structures::OnDrop; use syntax_pos::Span; use ty::tls; use ty::maps::Query; use ty::maps::plumbing::CycleError; use ty::context::TyCtxt; use errors::Diagnostic; +use std::process; +use std::fmt; +use std::collections::HashSet; +#[cfg(parallel_queries)] +use { + rayon_core, + parking_lot::{Mutex, Condvar}, + std::sync::atomic::Ordering, + std::thread, + std::iter, + std::iter::FromIterator, + syntax_pos::DUMMY_SP, + rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher, HashStable}, +}; /// Indicates the state of a query for a given key in a query map pub(super) enum QueryResult<'tcx> { @@ -42,6 +60,10 @@ pub struct QueryJob<'tcx> { /// Diagnostic messages which are emitted while the query executes pub diagnostics: Lock>, + + /// The latch which is used to wait on this job + #[cfg(parallel_queries)] + latch: QueryLatch<'tcx>, } impl<'tcx> QueryJob<'tcx> { @@ -51,6 +73,8 @@ impl<'tcx> QueryJob<'tcx> { diagnostics: Lock::new(Vec::new()), info, parent, + #[cfg(parallel_queries)] + latch: QueryLatch::new(), } } @@ -62,6 +86,36 @@ impl<'tcx> QueryJob<'tcx> { &self, tcx: TyCtxt<'_, 'tcx, 'lcx>, span: Span, + ) -> Result<(), CycleError<'tcx>> { + #[cfg(not(parallel_queries))] + { + self.find_cycle_in_stack(tcx, span) + } + + #[cfg(parallel_queries)] + { + tls::with_related_context(tcx, move |icx| { + let mut waiter = Lrc::new(QueryWaiter { + query: icx.query.clone(), + span, + cycle: Lock::new(None), + condvar: Condvar::new(), + }); + self.latch.await(&waiter); + + match Lrc::get_mut(&mut waiter).unwrap().cycle.get_mut().take() { + None => Ok(()), + Some(cycle) => Err(cycle) + } + }) + } + } + + #[cfg(not(parallel_queries))] + fn find_cycle_in_stack<'lcx>( + &self, + tcx: TyCtxt<'_, 'tcx, 'lcx>, + span: Span, ) -> Result<(), CycleError<'tcx>> { // Get the current executing query (waiter) and find the waitee amongst its parents let mut current_job = tls::with_related_context(tcx, |icx| icx.query.clone()); @@ -93,5 +147,379 @@ impl<'tcx> QueryJob<'tcx> { /// /// This does nothing for single threaded rustc, /// as there are no concurrent jobs which could be waiting on us - pub fn signal_complete(&self) {} + pub fn signal_complete(&self) { + #[cfg(parallel_queries)] + self.latch.set(); + } + + fn as_ptr(&self) -> *const QueryJob<'tcx> { + self as *const _ + } +} + +#[cfg(parallel_queries)] +struct QueryWaiter<'tcx> { + query: Option>>, + condvar: Condvar, + span: Span, + cycle: Lock>>, +} + +#[cfg(parallel_queries)] +impl<'tcx> QueryWaiter<'tcx> { + fn notify(&self, registry: &rayon_core::Registry) { + rayon_core::mark_unblocked(registry); + self.condvar.notify_one(); + } +} + +#[cfg(parallel_queries)] +struct QueryLatchInfo<'tcx> { + complete: bool, + waiters: Vec>>, +} + +#[cfg(parallel_queries)] +struct QueryLatch<'tcx> { + info: Mutex>, +} + +#[cfg(parallel_queries)] +impl<'tcx> QueryLatch<'tcx> { + fn new() -> Self { + QueryLatch { + info: Mutex::new(QueryLatchInfo { + complete: false, + waiters: Vec::new(), + }), + } + } + + /// Awaits the caller on this latch by blocking the current thread. + fn await(&self, waiter: &Lrc>) { + let mut info = self.info.lock(); + if !info.complete { + // We push the waiter on to the `waiters` list. It can be accessed inside + // the `wait` call below, by 1) the `set` method or 2) by deadlock detection. + // Both of these will remove it from the `waiters` list before resuming + // this thread. + info.waiters.push(waiter.clone()); + + // If this detects a deadlock and the deadlock handler wants to resume this thread + // we have to be in the `wait` call. This is ensured by the deadlock handler + // getting the self.info lock. + rayon_core::mark_blocked(); + waiter.condvar.wait(&mut info); + } + } + + /// Sets the latch and resumes all waiters on it + fn set(&self) { + let mut info = self.info.lock(); + debug_assert!(!info.complete); + info.complete = true; + let registry = rayon_core::Registry::current(); + for waiter in info.waiters.drain(..) { + waiter.notify(®istry); + } + } + + /// Remove a single waiter from the list of waiters. + /// This is used to break query cycles. + fn extract_waiter( + &self, + waiter: usize, + ) -> Lrc> { + let mut info = self.info.lock(); + debug_assert!(!info.complete); + // Remove the waiter from the list of waiters + info.waiters.remove(waiter) + } +} + +/// A resumable waiter of a query. The usize is the index into waiters in the query's latch +#[cfg(parallel_queries)] +type Waiter<'tcx> = (Lrc>, usize); + +/// Visits all the non-resumable and resumable waiters of a query. +/// Only waiters in a query are visited. +/// `visit` is called for every waiter and is passed a query waiting on `query_ref` +/// and a span indicating the reason the query waited on `query_ref`. +/// If `visit` returns Some, this function returns. +/// For visits of non-resumable waiters it returns the return value of `visit`. +/// For visits of resumable waiters it returns Some(Some(Waiter)) which has the +/// required information to resume the waiter. +/// If all `visit` calls returns None, this function also returns None. +#[cfg(parallel_queries)] +fn visit_waiters<'tcx, F>(query: Lrc>, mut visit: F) -> Option>> +where + F: FnMut(Span, Lrc>) -> Option>> +{ + // Visit the parent query which is a non-resumable waiter since it's on the same stack + if let Some(ref parent) = query.parent { + if let Some(cycle) = visit(query.info.span, parent.clone()) { + return Some(cycle); + } + } + + // Visit the explict waiters which use condvars and are resumable + for (i, waiter) in query.latch.info.lock().waiters.iter().enumerate() { + if let Some(ref waiter_query) = waiter.query { + if visit(waiter.span, waiter_query.clone()).is_some() { + // Return a value which indicates that this waiter can be resumed + return Some(Some((query.clone(), i))); + } + } + } + None +} + +/// Look for query cycles by doing a depth first search starting at `query`. +/// `span` is the reason for the `query` to execute. This is initially DUMMY_SP. +/// If a cycle is detected, this initial value is replaced with the span causing +/// the cycle. +#[cfg(parallel_queries)] +fn cycle_check<'tcx>(query: Lrc>, + span: Span, + stack: &mut Vec<(Span, Lrc>)>, + visited: &mut HashSet<*const QueryJob<'tcx>> +) -> Option>> { + if visited.contains(&query.as_ptr()) { + return if let Some(p) = stack.iter().position(|q| q.1.as_ptr() == query.as_ptr()) { + // We detected a query cycle, fix up the initial span and return Some + + // Remove previous stack entries + stack.splice(0..p, iter::empty()); + // Replace the span for the first query with the cycle cause + stack[0].0 = span; + Some(None) + } else { + None + } + } + + // Mark this query is visited and add it to the stack + visited.insert(query.as_ptr()); + stack.push((span, query.clone())); + + // Visit all the waiters + let r = visit_waiters(query, |span, successor| { + cycle_check(successor, span, stack, visited) + }); + + // Remove the entry in our stack if we didn't find a cycle + if r.is_none() { + stack.pop(); + } + + r +} + +/// Finds out if there's a path to the compiler root (aka. code which isn't in a query) +/// from `query` without going through any of the queries in `visited`. +/// This is achieved with a depth first search. +#[cfg(parallel_queries)] +fn connected_to_root<'tcx>( + query: Lrc>, + visited: &mut HashSet<*const QueryJob<'tcx>> +) -> bool { + // We already visited this or we're deliberately ignoring it + if visited.contains(&query.as_ptr()) { + return false; + } + + // This query is connected to the root (it has no query parent), return true + if query.parent.is_none() { + return true; + } + + visited.insert(query.as_ptr()); + + let mut connected = false; + + visit_waiters(query, |_, successor| { + if connected_to_root(successor, visited) { + Some(None) + } else { + None + } + }).is_some() +} + +/// Looks for query cycles starting from the last query in `jobs`. +/// If a cycle is found, all queries in the cycle is removed from `jobs` and +/// the function return true. +/// If a cycle was not found, the starting query is removed from `jobs` and +/// the function returns false. +#[cfg(parallel_queries)] +fn remove_cycle<'tcx>( + jobs: &mut Vec>>, + wakelist: &mut Vec>>, + tcx: TyCtxt<'_, 'tcx, '_> +) -> bool { + let mut visited = HashSet::new(); + let mut stack = Vec::new(); + // Look for a cycle starting with the last query in `jobs` + if let Some(waiter) = cycle_check(jobs.pop().unwrap(), + DUMMY_SP, + &mut stack, + &mut visited) { + // Reverse the stack so earlier entries require later entries + stack.reverse(); + + // Extract the spans and queries into separate arrays + let mut spans: Vec<_> = stack.iter().map(|e| e.0).collect(); + let queries = stack.into_iter().map(|e| e.1); + + // Shift the spans so that queries are matched with the span for their waitee + let last = spans.pop().unwrap(); + spans.insert(0, last); + + // Zip them back together + let mut stack: Vec<_> = spans.into_iter().zip(queries).collect(); + + // Remove the queries in our cycle from the list of jobs to look at + for r in &stack { + if let Some(pos) = jobs.iter().position(|j| j.as_ptr() == r.1.as_ptr()) { + jobs.remove(pos); + } + } + + // Find the queries in the cycle which are + // connected to queries outside the cycle + let entry_points: Vec>> = stack.iter().filter_map(|query| { + // Mark all the other queries in the cycle as already visited + let mut visited = HashSet::from_iter(stack.iter().filter_map(|q| { + if q.1.as_ptr() != query.1.as_ptr() { + Some(q.1.as_ptr()) + } else { + None + } + })); + + if connected_to_root(query.1.clone(), &mut visited) { + Some(query.1.clone()) + } else { + None + } + }).collect(); + + // Deterministically pick an entry point + // FIXME: Sort this instead + let mut hcx = tcx.create_stable_hashing_context(); + let entry_point = entry_points.iter().min_by_key(|q| { + let mut stable_hasher = StableHasher::::new(); + q.info.query.hash_stable(&mut hcx, &mut stable_hasher); + stable_hasher.finish() + }).unwrap().as_ptr(); + + // Shift the stack until our entry point is first + while stack[0].1.as_ptr() != entry_point { + let last = stack.pop().unwrap(); + stack.insert(0, last); + } + + // Create the cycle error + let mut error = CycleError { + usage: None, + cycle: stack.iter().map(|&(s, ref q)| QueryInfo { + span: s, + query: q.info.query.clone(), + } ).collect(), + }; + + // We unwrap `waiter` here since there must always be one + // edge which is resumeable / waited using a query latch + let (waitee_query, waiter_idx) = waiter.unwrap(); + + // Extract the waiter we want to resume + let waiter = waitee_query.latch.extract_waiter(waiter_idx); + + // Set the cycle error so it will be picked up when resumed + *waiter.cycle.lock() = Some(error); + + // Put the waiter on the list of things to resume + wakelist.push(waiter); + + true + } else { + false + } +} + +/// Creates a new thread and forwards information in thread locals to it. +/// The new thread runs the deadlock handler. +/// Must only be called when a deadlock is about to happen. +#[cfg(parallel_queries)] +pub unsafe fn handle_deadlock() { + use syntax; + use syntax_pos; + + let registry = rayon_core::Registry::current(); + + let gcx_ptr = tls::GCX_PTR.with(|gcx_ptr| { + gcx_ptr as *const _ + }); + let gcx_ptr = &*gcx_ptr; + + let syntax_globals = syntax::GLOBALS.with(|syntax_globals| { + syntax_globals as *const _ + }); + let syntax_globals = &*syntax_globals; + + let syntax_pos_globals = syntax_pos::GLOBALS.with(|syntax_pos_globals| { + syntax_pos_globals as *const _ + }); + let syntax_pos_globals = &*syntax_pos_globals; + thread::spawn(move || { + tls::GCX_PTR.set(gcx_ptr, || { + syntax_pos::GLOBALS.set(syntax_pos_globals, || { + syntax_pos::GLOBALS.set(syntax_pos_globals, || { + tls::with_thread_locals(|| { + tls::with_global(|tcx| deadlock(tcx, ®istry)) + }) + }) + }) + }) + }); +} + +/// Detects query cycles by using depth first search over all active query jobs. +/// If a query cycle is found it will break the cycle by finding an edge which +/// uses a query latch and then resuming that waiter. +/// There may be multiple cycles involved in a deadlock, so this searches +/// all active queries for cycles before finally resuming all the waiters at once. +#[cfg(parallel_queries)] +fn deadlock(tcx: TyCtxt<'_, '_, '_>, registry: &rayon_core::Registry) { + let on_panic = OnDrop(|| { + eprintln!("deadlock handler panicked, aborting process"); + process::abort(); + }); + + let mut wakelist = Vec::new(); + let mut jobs: Vec<_> = tcx.maps.collect_active_jobs(); + + let mut found_cycle = false; + + while jobs.len() > 0 { + if remove_cycle(&mut jobs, &mut wakelist, tcx) { + found_cycle = true; + } + } + + // Check that a cycle was found. It is possible for a deadlock to occur without + // a query cycle if a query which can be waited on uses Rayon to do multithreading + // internally. Such a query (X) may be executing on 2 threads (A and B) and A may + // wait using Rayon on B. Rayon may then switch to executing another query (Y) + // which in turn will wait on X causing a deadlock. We have a false dependency from + // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here + // only considers the true dependency and won't detect a cycle. + assert!(found_cycle); + + // FIXME: Ensure this won't cause a deadlock before we return + for waiter in wakelist.into_iter() { + waiter.notify(registry); + } + + on_panic.disable(); } diff --git a/src/librustc/ty/maps/mod.rs b/src/librustc/ty/maps/mod.rs index 6556e47720c62..b50b43aace760 100644 --- a/src/librustc/ty/maps/mod.rs +++ b/src/librustc/ty/maps/mod.rs @@ -63,10 +63,12 @@ use syntax::symbol::Symbol; #[macro_use] mod plumbing; use self::plumbing::*; -pub use self::plumbing::force_from_dep_node; +pub use self::plumbing::{force_from_dep_node, CycleError}; mod job; pub use self::job::{QueryJob, QueryInfo}; +#[cfg(parallel_queries)] +pub use self::job::handle_deadlock; mod keys; pub use self::keys::Key; diff --git a/src/librustc/ty/maps/plumbing.rs b/src/librustc/ty/maps/plumbing.rs index 4a9d44b7403b9..13d2a13e5ddc5 100644 --- a/src/librustc/ty/maps/plumbing.rs +++ b/src/librustc/ty/maps/plumbing.rs @@ -223,7 +223,7 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> Drop for JobOwner<'a, 'tcx, Q> { } #[derive(Clone)] -pub(super) struct CycleError<'tcx> { +pub struct CycleError<'tcx> { /// The query and related span which uses the cycle pub(super) usage: Option<(Span, Query<'tcx>)>, pub(super) cycle: Vec>, @@ -632,7 +632,15 @@ macro_rules! define_maps { $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*) => { + use std::mem; + use ty::maps::job::QueryResult; use rustc_data_structures::sync::Lock; + use { + rustc_data_structures::stable_hasher::HashStable, + rustc_data_structures::stable_hasher::StableHasherResult, + rustc_data_structures::stable_hasher::StableHasher, + ich::StableHashingContext + }; define_map_struct! { tcx: $tcx, @@ -647,10 +655,25 @@ macro_rules! define_maps { $($name: Lock::new(QueryMap::new())),* } } + + pub fn collect_active_jobs(&self) -> Vec>> { + let mut jobs = Vec::new(); + + // We use try_lock here since we are only called from the + // deadlock handler, and this shouldn't be locked + $(for v in self.$name.try_lock().unwrap().active.values() { + match *v { + QueryResult::Started(ref job) => jobs.push(job.clone()), + _ => (), + } + })* + + return jobs; + } } #[allow(bad_style)] - #[derive(Copy, Clone, Debug, PartialEq, Eq)] + #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub enum Query<$tcx> { $($(#[$attr])* $name($K)),* } @@ -692,6 +715,17 @@ macro_rules! define_maps { } } + impl<'a, $tcx> HashStable> for Query<$tcx> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + mem::discriminant(self).hash_stable(hcx, hasher); + match *self { + $(Query::$name(key) => key.hash_stable(hcx, hasher),)* + } + } + } + pub mod queries { use std::marker::PhantomData; diff --git a/src/librustc_data_structures/Cargo.toml b/src/librustc_data_structures/Cargo.toml index 17ee771e52940..fc5fe91c977d4 100644 --- a/src/librustc_data_structures/Cargo.toml +++ b/src/librustc_data_structures/Cargo.toml @@ -16,8 +16,8 @@ serialize = { path = "../libserialize" } cfg-if = "0.1.2" stable_deref_trait = "1.0.0" parking_lot_core = "0.2.8" -rustc-rayon = "0.1.0" -rustc-rayon-core = "0.1.0" +rustc-rayon = "0.1.1" +rustc-rayon-core = "0.1.1" rustc-hash = "1.0.1" [dependencies.parking_lot] diff --git a/src/librustc_data_structures/lib.rs b/src/librustc_data_structures/lib.rs index 7046a2a2a493d..5844edf000a8b 100644 --- a/src/librustc_data_structures/lib.rs +++ b/src/librustc_data_structures/lib.rs @@ -80,6 +80,14 @@ pub mod sorted_map; pub struct OnDrop(pub F); +impl OnDrop { + /// Forgets the function which prevents it from running. + /// Ensure that the function owns no memory, otherwise it will be leaked. + pub fn disable(self) { + std::mem::forget(self); + } +} + impl Drop for OnDrop { fn drop(&mut self) { (self.0)(); diff --git a/src/librustc_data_structures/sync.rs b/src/librustc_data_structures/sync.rs index 6f7d9e1b54b1e..33f6eda2a8753 100644 --- a/src/librustc_data_structures/sync.rs +++ b/src/librustc_data_structures/sync.rs @@ -519,6 +519,18 @@ impl Lock { self.0.get_mut() } + #[cfg(parallel_queries)] + #[inline(always)] + pub fn try_lock(&self) -> Option> { + self.0.try_lock() + } + + #[cfg(not(parallel_queries))] + #[inline(always)] + pub fn try_lock(&self) -> Option> { + self.0.try_borrow_mut().ok() + } + #[cfg(parallel_queries)] #[inline(always)] pub fn lock(&self) -> LockGuard { diff --git a/src/librustc_driver/Cargo.toml b/src/librustc_driver/Cargo.toml index 24bf07d793f3d..5b75912c18f50 100644 --- a/src/librustc_driver/Cargo.toml +++ b/src/librustc_driver/Cargo.toml @@ -13,7 +13,7 @@ arena = { path = "../libarena" } graphviz = { path = "../libgraphviz" } log = "0.4" env_logger = { version = "0.5", default-features = false } -rustc-rayon = "0.1.0" +rustc-rayon = "0.1.1" scoped-tls = { version = "0.1.1", features = ["nightly"] } rustc = { path = "../librustc" } rustc_allocator = { path = "../librustc_allocator" } diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index 8dcbda917b243..37f8bff964f5b 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -49,7 +49,7 @@ use std::fs; use std::io::{self, Write}; use std::iter; use std::path::{Path, PathBuf}; -use rustc_data_structures::sync::{self, Lrc}; +use rustc_data_structures::sync::{self, Lrc, Lock}; use std::sync::mpsc; use syntax::{self, ast, attr, diagnostics, visit}; use syntax::ext::base::ExtCtxt; @@ -69,7 +69,9 @@ pub fn spawn_thread_pool R + sync::Send, R: sync:: opts: config::Options, f: F ) -> R { - f(opts) + ty::tls::GCX_PTR.set(&Lock::new(0), || { + f(opts) + }) } #[cfg(parallel_queries)] @@ -81,8 +83,12 @@ pub fn spawn_thread_pool R + sync::Send, R: sync:: use syntax_pos; use rayon::{ThreadPoolBuilder, ThreadPool}; - let config = ThreadPoolBuilder::new().num_threads(Session::query_threads_from_opts(&opts)) - .stack_size(16 * 1024 * 1024); + let gcx_ptr = &Lock::new(0); + + let config = ThreadPoolBuilder::new() + .num_threads(Session::query_threads_from_opts(&opts)) + .deadlock_handler(|| unsafe { ty::maps::handle_deadlock() }) + .stack_size(16 * 1024 * 1024); let with_pool = move |pool: &ThreadPool| { pool.install(move || f(opts)) @@ -98,7 +104,9 @@ pub fn spawn_thread_pool R + sync::Send, R: sync:: syntax::GLOBALS.set(syntax_globals, || { syntax_pos::GLOBALS.set(syntax_pos_globals, || { ty::tls::with_thread_locals(|| { - worker() + ty::tls::GCX_PTR.set(gcx_ptr, || { + worker() + }) }) }) }) diff --git a/src/test/run-make-fulldeps/issue-19371/foo.rs b/src/test/run-make-fulldeps/issue-19371/foo.rs index 403f4f79843e8..6f5e2affdbd3c 100644 --- a/src/test/run-make-fulldeps/issue-19371/foo.rs +++ b/src/test/run-make-fulldeps/issue-19371/foo.rs @@ -19,9 +19,9 @@ extern crate rustc_codegen_utils; extern crate syntax; use rustc::session::{build_session, Session}; -use rustc::session::config::{basic_options, Input, +use rustc::session::config::{basic_options, Input, Options, OutputType, OutputTypes}; -use rustc_driver::driver::{compile_input, CompileController}; +use rustc_driver::driver::{self, compile_input, CompileController}; use rustc_metadata::cstore::CStore; use rustc_errors::registry::Registry; use syntax::codemap::FileName; @@ -52,14 +52,7 @@ fn main() { compile(src.to_string(), tmpdir.join("out"), sysroot.clone()); } -fn basic_sess(sysroot: PathBuf) -> (Session, Rc, Box) { - let mut opts = basic_options(); - opts.output_types = OutputTypes::new(&[(OutputType::Exe, None)]); - opts.maybe_sysroot = Some(sysroot); - if let Ok(linker) = std::env::var("RUSTC_LINKER") { - opts.cg.linker = Some(linker.into()); - } - +fn basic_sess(opts: Options) -> (Session, Rc, Box) { let descriptions = Registry::new(&rustc::DIAGNOSTICS); let sess = build_session(opts, None, descriptions); let codegen_backend = rustc_driver::get_codegen_backend(&sess); @@ -70,19 +63,27 @@ fn basic_sess(sysroot: PathBuf) -> (Session, Rc, Box) { fn compile(code: String, output: PathBuf, sysroot: PathBuf) { syntax::with_globals(|| { - let (sess, cstore, codegen_backend) = basic_sess(sysroot); - let control = CompileController::basic(); - let input = Input::Str { name: FileName::Anon, input: code }; - let _ = compile_input( - codegen_backend, - &sess, - &cstore, - &None, - &input, - &None, - &Some(output), - None, - &control - ); + let mut opts = basic_options(); + opts.output_types = OutputTypes::new(&[(OutputType::Exe, None)]); + opts.maybe_sysroot = Some(sysroot); + if let Ok(linker) = std::env::var("RUSTC_LINKER") { + opts.cg.linker = Some(linker.into()); + } + driver::spawn_thread_pool(opts, |opts| { + let (sess, cstore, codegen_backend) = basic_sess(opts); + let control = CompileController::basic(); + let input = Input::Str { name: FileName::Anon, input: code }; + let _ = compile_input( + codegen_backend, + &sess, + &cstore, + &None, + &input, + &None, + &Some(output), + None, + &control + ); + }); }); }