From f9e6fbc4f751a5fd4befaf91ad4b1022efb99501 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20K=C3=A5re=20Alsaker?= Date: Fri, 6 Apr 2018 12:56:59 +0200 Subject: [PATCH 01/11] Make queries block and handle query cycles --- src/librustc/Cargo.toml | 4 + src/librustc/lib.rs | 5 + src/librustc/ty/context.rs | 50 ++++- src/librustc/ty/maps/job.rs | 367 ++++++++++++++++++++++++++++++- src/librustc/ty/maps/mod.rs | 4 +- src/librustc/ty/maps/plumbing.rs | 36 ++- src/librustc_driver/driver.rs | 13 +- 7 files changed, 469 insertions(+), 10 deletions(-) diff --git a/src/librustc/Cargo.toml b/src/librustc/Cargo.toml index 1d1166ad2c4fd..4dc818c650e34 100644 --- a/src/librustc/Cargo.toml +++ b/src/librustc/Cargo.toml @@ -15,9 +15,12 @@ fmt_macros = { path = "../libfmt_macros" } graphviz = { path = "../libgraphviz" } jobserver = "0.1" lazy_static = "1.0.0" +scoped-tls = { version = "0.1.1", features = ["nightly"] } log = { version = "0.4", features = ["release_max_level_info", "std"] } polonius-engine = "0.5.0" proc_macro = { path = "../libproc_macro" } +rustc-rayon = "0.1.0" +rustc-rayon-core = "0.1.0" rustc_apfloat = { path = "../librustc_apfloat" } rustc_target = { path = "../librustc_target" } rustc_data_structures = { path = "../librustc_data_structures" } @@ -26,6 +29,7 @@ serialize = { path = "../libserialize" } syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } backtrace = "0.3.3" +parking_lot = "0.5.5" byteorder = { version = "1.1", features = ["i128"]} chalk-engine = { version = "0.6.0", default-features=false } diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index 10e8905054d11..a006856f58b7d 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -67,6 +67,7 @@ #![feature(unboxed_closures)] #![feature(trace_macros)] #![feature(trusted_len)] +#![feature(vec_remove_item)] #![feature(catch_expr)] #![feature(integer_atomics)] #![feature(test)] @@ -83,13 +84,17 @@ extern crate fmt_macros; extern crate getopts; extern crate graphviz; #[macro_use] extern crate lazy_static; +#[macro_use] extern crate scoped_tls; #[cfg(windows)] extern crate libc; extern crate polonius_engine; extern crate rustc_target; #[macro_use] extern crate rustc_data_structures; extern crate serialize; +extern crate parking_lot; extern crate rustc_errors as errors; +extern crate rustc_rayon as rayon; +extern crate rustc_rayon_core as rayon_core; #[macro_use] extern crate log; #[macro_use] extern crate syntax; extern crate syntax_pos; diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index 35b2ce50da79d..ef584774f694c 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -1699,16 +1699,21 @@ impl<'a, 'tcx> Lift<'tcx> for &'a Slice { pub mod tls { use super::{GlobalCtxt, TyCtxt}; - use std::cell::Cell; use std::fmt; use std::mem; use syntax_pos; use ty::maps; use errors::{Diagnostic, TRACK_DIAGNOSTICS}; use rustc_data_structures::OnDrop; - use rustc_data_structures::sync::{self, Lrc}; + use rustc_data_structures::sync::{self, Lrc, Lock}; use dep_graph::OpenTask; + #[cfg(not(parallel_queries))] + use std::cell::Cell; + + #[cfg(parallel_queries)] + use rayon_core; + /// This is the implicit state of rustc. It contains the current /// TyCtxt and query. It is updated when creating a local interner or /// executing a new query. Whenever there's a TyCtxt value available @@ -1732,9 +1737,21 @@ pub mod tls { pub task: &'a OpenTask, } + #[cfg(parallel_queries)] + fn set_tlv R, R>(value: usize, f: F) -> R { + rayon_core::tlv::with(value, f) + } + + #[cfg(parallel_queries)] + fn get_tlv() -> usize { + rayon_core::tlv::get() + } + // A thread local value which stores a pointer to the current ImplicitCtxt + #[cfg(not(parallel_queries))] thread_local!(static TLV: Cell = Cell::new(0)); + #[cfg(not(parallel_queries))] fn set_tlv R, R>(value: usize, f: F) -> R { let old = get_tlv(); let _reset = OnDrop(move || TLV.with(|tlv| tlv.set(old))); @@ -1742,6 +1759,7 @@ pub mod tls { f() } + #[cfg(not(parallel_queries))] fn get_tlv() -> usize { TLV.with(|tlv| tlv.get()) } @@ -1810,6 +1828,13 @@ pub mod tls { where F: for<'a> FnOnce(TyCtxt<'a, 'gcx, 'gcx>) -> R { with_thread_locals(|| { + GCX_PTR.with(|lock| { + *lock.lock() = gcx as *const _ as usize; + }); + let _on_drop = OnDrop(move || { + GCX_PTR.with(|lock| *lock.lock() = 0); + }); + let tcx = TyCtxt { gcx, interners: &gcx.global_interners, @@ -1826,6 +1851,27 @@ pub mod tls { }) } + scoped_thread_local!(pub static GCX_PTR: Lock); + + pub unsafe fn with_global(f: F) -> R + where F: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R + { + let gcx = GCX_PTR.with(|lock| *lock.lock()); + assert!(gcx != 0); + let gcx = &*(gcx as *const GlobalCtxt<'_>); + let tcx = TyCtxt { + gcx, + interners: &gcx.global_interners, + }; + let icx = ImplicitCtxt { + query: None, + tcx, + layout_depth: 0, + task: &OpenTask::Ignore, + }; + enter_context(&icx, |_| f(tcx)) + } + /// Allows access to the current ImplicitCtxt in a closure if one is available pub fn with_context_opt(f: F) -> R where F: for<'a, 'gcx, 'tcx> FnOnce(Option<&ImplicitCtxt<'a, 'gcx, 'tcx>>) -> R diff --git a/src/librustc/ty/maps/job.rs b/src/librustc/ty/maps/job.rs index 3b6af018d6b78..3fe22dba6e153 100644 --- a/src/librustc/ty/maps/job.rs +++ b/src/librustc/ty/maps/job.rs @@ -8,13 +8,31 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rustc_data_structures::sync::{Lock, Lrc}; +#![allow(warnings)] + +use std::mem; +use rustc_data_structures::sync::{Lock, LockGuard, Lrc, Weak}; +use rustc_data_structures::OnDrop; use syntax_pos::Span; use ty::tls; use ty::maps::Query; use ty::maps::plumbing::CycleError; use ty::context::TyCtxt; use errors::Diagnostic; +use std::process; +use std::fmt; +use std::collections::HashSet; +#[cfg(parallel_queries)] +use { + rayon_core, + parking_lot::{Mutex, Condvar}, + std::sync::atomic::Ordering, + std::thread, + std::iter, + std::iter::FromIterator, + syntax_pos::DUMMY_SP, + rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher, HashStable}, +}; /// Indicates the state of a query for a given key in a query map pub(super) enum QueryResult<'tcx> { @@ -42,6 +60,9 @@ pub struct QueryJob<'tcx> { /// Diagnostic messages which are emitted while the query executes pub diagnostics: Lock>, + + #[cfg(parallel_queries)] + latch: QueryLatch, } impl<'tcx> QueryJob<'tcx> { @@ -51,6 +72,8 @@ impl<'tcx> QueryJob<'tcx> { diagnostics: Lock::new(Vec::new()), info, parent, + #[cfg(parallel_queries)] + latch: QueryLatch::new(), } } @@ -62,6 +85,36 @@ impl<'tcx> QueryJob<'tcx> { &self, tcx: TyCtxt<'_, 'tcx, 'lcx>, span: Span, + ) -> Result<(), CycleError<'tcx>> { + #[cfg(not(parallel_queries))] + { + self.find_cycle_in_stack(tcx, span) + } + + #[cfg(parallel_queries)] + { + tls::with_related_context(tcx, move |icx| { + let mut waiter = QueryWaiter { + query: &icx.query, + span, + cycle: None, + condvar: Condvar::new(), + }; + self.latch.await(&mut waiter); + + match waiter.cycle { + None => Ok(()), + Some(cycle) => Err(cycle) + } + }) + } + } + + #[cfg(not(parallel_queries))] + fn find_cycle_in_stack<'lcx>( + &self, + tcx: TyCtxt<'_, 'tcx, 'lcx>, + span: Span, ) -> Result<(), CycleError<'tcx>> { // Get the current executing query (waiter) and find the waitee amongst its parents let mut current_job = tls::with_related_context(tcx, |icx| icx.query.clone()); @@ -93,5 +146,315 @@ impl<'tcx> QueryJob<'tcx> { /// /// This does nothing for single threaded rustc, /// as there are no concurrent jobs which could be waiting on us - pub fn signal_complete(&self) {} + pub fn signal_complete(&self, tcx: TyCtxt<'_, 'tcx, '_>) { + #[cfg(parallel_queries)] + self.latch.set(tcx); + } +} + +#[cfg(parallel_queries)] +struct QueryWaiter<'a, 'tcx: 'a> { + query: &'a Option>>, + condvar: Condvar, + span: Span, + cycle: Option>, +} + +#[cfg(parallel_queries)] +impl<'a, 'tcx> QueryWaiter<'a, 'tcx> { + fn notify(&self, tcx: TyCtxt<'_, '_, '_>, registry: &rayon_core::Registry) { + rayon_core::mark_unblocked(registry); + self.condvar.notify_one(); + } +} + +#[cfg(parallel_queries)] +struct QueryLatchInfo { + complete: bool, + waiters: Vec<&'static mut QueryWaiter<'static, 'static>>, +} + +#[cfg(parallel_queries)] +struct QueryLatch { + info: Mutex, +} + +#[cfg(parallel_queries)] +impl QueryLatch { + fn new() -> Self { + QueryLatch { + info: Mutex::new(QueryLatchInfo { + complete: false, + waiters: Vec::new(), + }), + } + } + + fn await(&self, waiter: &mut QueryWaiter<'_, '_>) { + let mut info = self.info.lock(); + if !info.complete { + let waiter = &*waiter; + unsafe { + #[allow(mutable_transmutes)] + info.waiters.push(mem::transmute(waiter)); + } + // If this detects a deadlock and the deadlock handler want to resume this thread + // we have to be in the `wait` call. This is ensured by the deadlock handler + // getting the self.info lock. + rayon_core::mark_blocked(); + waiter.condvar.wait(&mut info); + } + } + + fn set(&self, tcx: TyCtxt<'_, '_, '_>) { + let mut info = self.info.lock(); + debug_assert!(!info.complete); + info.complete = true; + let registry = rayon_core::Registry::current(); + for waiter in info.waiters.drain(..) { + waiter.notify(tcx, ®istry); + } + } + + fn resume_waiter( + &self, + waiter: usize, + error: CycleError + ) -> &'static mut QueryWaiter<'static, 'static> { + let mut info = self.info.lock(); + debug_assert!(!info.complete); + // Remove the waiter from the list of waiters + let waiter = info.waiters.remove(waiter); + + // Set the cycle error it will be picked it up when resumed + waiter.cycle = unsafe { Some(mem::transmute(error)) }; + + waiter + } +} + +#[cfg(parallel_queries)] +type Ref<'tcx> = *const QueryJob<'tcx>; + +#[cfg(parallel_queries)] +type Waiter<'tcx> = (Ref<'tcx>, usize); + +#[cfg(parallel_queries)] +fn visit_waiters<'tcx, F>(query_ref: Ref<'tcx>, mut visit: F) -> Option>> +where + F: FnMut(Span, Ref<'tcx>) -> Option>> +{ + let query = unsafe { &*query_ref }; + if let Some(ref parent) = query.parent { + if let Some(cycle) = visit(query.info.span, &**parent as Ref) { + return Some(cycle); + } + } + for (i, waiter) in query.latch.info.lock().waiters.iter().enumerate() { + if let Some(ref waiter_query) = waiter.query { + if visit(waiter.span, &**waiter_query as Ref).is_some() { + return Some(Some((query_ref, i))); + } + } + } + None +} + +#[cfg(parallel_queries)] +fn cycle_check<'tcx>(query: Ref<'tcx>, + span: Span, + stack: &mut Vec<(Span, Ref<'tcx>)>, + visited: &mut HashSet>) -> Option>> { + if visited.contains(&query) { + return if let Some(p) = stack.iter().position(|q| q.1 == query) { + // Remove previous stack entries + stack.splice(0..p, iter::empty()); + // Replace the span for the first query with the cycle cause + stack[0].0 = span; + Some(None) + } else { + None + } + } + + visited.insert(query); + stack.push((span, query)); + + let r = visit_waiters(query, |span, successor| { + cycle_check(successor, span, stack, visited) + }); + + if r.is_none() { + stack.pop(); + } + + r +} + +#[cfg(parallel_queries)] +fn connected_to_root<'tcx>(query: Ref<'tcx>, visited: &mut HashSet>) -> bool { + if visited.contains(&query) { + return false; + } + + if unsafe { (*query).parent.is_none() } { + return true; + } + + visited.insert(query); + + let mut connected = false; + + visit_waiters(query, |_, successor| { + if connected_to_root(successor, visited) { + Some(None) + } else { + None + } + }).is_some() +} + +#[cfg(parallel_queries)] +fn query_entry<'tcx>(r: Ref<'tcx>) -> QueryInfo<'tcx> { + unsafe { (*r).info.clone() } +} + +#[cfg(parallel_queries)] +fn remove_cycle<'tcx>( + jobs: &mut Vec>, + wakelist: &mut Vec<&'static mut QueryWaiter<'static, 'static>>, + tcx: TyCtxt<'_, 'tcx, '_> +) { + let mut visited = HashSet::new(); + let mut stack = Vec::new(); + if let Some(waiter) = cycle_check(jobs.pop().unwrap(), + DUMMY_SP, + &mut stack, + &mut visited) { + // Reverse the stack so earlier entries require later entries + stack.reverse(); + + let mut spans: Vec<_> = stack.iter().map(|e| e.0).collect(); + let queries = stack.iter().map(|e| e.1); + + // Shift the spans so that a query is matched the span for its waitee + let last = spans.pop().unwrap(); + spans.insert(0, last); + + let mut stack: Vec<_> = spans.into_iter().zip(queries).collect(); + + // Remove the queries in our cycle from the list of jobs to look at + for r in &stack { + jobs.remove_item(&r.1); + } + + let (waitee_query, waiter_idx) = waiter.unwrap(); + let waitee_query = unsafe { &*waitee_query }; + + // Find the queries in the cycle which are + // connected to queries outside the cycle + let entry_points: Vec> = stack.iter().filter_map(|query| { + // Mark all the other queries in the cycle as already visited + let mut visited = HashSet::from_iter(stack.iter().filter_map(|q| { + if q.1 != query.1 { + Some(q.1) + } else { + None + } + })); + + if connected_to_root(query.1, &mut visited) { + Some(query.1) + } else { + None + } + }).collect(); + + // Deterministically pick an entry point + // FIXME: Sort this instead + let mut hcx = tcx.create_stable_hashing_context(); + let entry_point = *entry_points.iter().min_by_key(|&&q| { + let mut stable_hasher = StableHasher::::new(); + unsafe { (*q).info.query.hash_stable(&mut hcx, &mut stable_hasher); } + stable_hasher.finish() + }).unwrap(); + + // Shift the stack until our entry point is first + while stack[0].1 != entry_point { + let last = stack.pop().unwrap(); + stack.insert(0, last); + } + + let mut error = CycleError { + usage: None, + cycle: stack.iter().map(|&(s, q)| QueryInfo { + span: s, + query: unsafe { (*q).info.query.clone() }, + } ).collect(), + }; + + wakelist.push(waitee_query.latch.resume_waiter(waiter_idx, error)); + } +} + +#[cfg(parallel_queries)] +pub fn handle_deadlock() { + use syntax; + use syntax_pos; + + let registry = rayon_core::Registry::current(); + + let gcx_ptr = tls::GCX_PTR.with(|gcx_ptr| { + gcx_ptr as *const _ + }); + let gcx_ptr = unsafe { &*gcx_ptr }; + + let syntax_globals = syntax::GLOBALS.with(|syntax_globals| { + syntax_globals as *const _ + }); + let syntax_globals = unsafe { &*syntax_globals }; + + let syntax_pos_globals = syntax_pos::GLOBALS.with(|syntax_pos_globals| { + syntax_pos_globals as *const _ + }); + let syntax_pos_globals = unsafe { &*syntax_pos_globals }; + thread::spawn(move || { + tls::GCX_PTR.set(gcx_ptr, || { + syntax_pos::GLOBALS.set(syntax_pos_globals, || { + syntax_pos::GLOBALS.set(syntax_pos_globals, || { + tls::with_thread_locals(|| { + unsafe { + tls::with_global(|tcx| deadlock(tcx, ®istry)) + } + }) + }) + }) + }) + }); +} + +#[cfg(parallel_queries)] +fn deadlock(tcx: TyCtxt<'_, '_, '_>, registry: &rayon_core::Registry) { + let on_panic = OnDrop(|| { + eprintln!("deadlock handler panicked, aborting process"); + process::abort(); + }); + + let mut wakelist = Vec::new(); + let mut jobs: Vec<_> = tcx.maps.collect_active_jobs().iter().map(|j| &**j as Ref).collect(); + + while jobs.len() > 0 { + remove_cycle(&mut jobs, &mut wakelist, tcx); + } + + // FIXME: Panic if no cycle is detected + + // FIXME: Write down the conditions when a deadlock happens without a cycle + + // FIXME: Ensure this won't cause a deadlock before we return + for waiter in wakelist.into_iter() { + waiter.notify(tcx, registry); + } + + mem::forget(on_panic); } diff --git a/src/librustc/ty/maps/mod.rs b/src/librustc/ty/maps/mod.rs index 6556e47720c62..b50b43aace760 100644 --- a/src/librustc/ty/maps/mod.rs +++ b/src/librustc/ty/maps/mod.rs @@ -63,10 +63,12 @@ use syntax::symbol::Symbol; #[macro_use] mod plumbing; use self::plumbing::*; -pub use self::plumbing::force_from_dep_node; +pub use self::plumbing::{force_from_dep_node, CycleError}; mod job; pub use self::job::{QueryJob, QueryInfo}; +#[cfg(parallel_queries)] +pub use self::job::handle_deadlock; mod keys; pub use self::keys::Key; diff --git a/src/librustc/ty/maps/plumbing.rs b/src/librustc/ty/maps/plumbing.rs index 4a9d44b7403b9..2ab8d18e3e7dd 100644 --- a/src/librustc/ty/maps/plumbing.rs +++ b/src/librustc/ty/maps/plumbing.rs @@ -223,7 +223,7 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> Drop for JobOwner<'a, 'tcx, Q> { } #[derive(Clone)] -pub(super) struct CycleError<'tcx> { +pub struct CycleError<'tcx> { /// The query and related span which uses the cycle pub(super) usage: Option<(Span, Query<'tcx>)>, pub(super) cycle: Vec>, @@ -632,7 +632,15 @@ macro_rules! define_maps { $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*) => { + use std::mem; + use ty::maps::job::QueryResult; use rustc_data_structures::sync::Lock; + use { + rustc_data_structures::stable_hasher::HashStable, + rustc_data_structures::stable_hasher::StableHasherResult, + rustc_data_structures::stable_hasher::StableHasher, + ich::StableHashingContext + }; define_map_struct! { tcx: $tcx, @@ -647,10 +655,23 @@ macro_rules! define_maps { $($name: Lock::new(QueryMap::new())),* } } + + pub fn collect_active_jobs(&self) -> Vec>> { + let mut jobs = Vec::new(); + + $(for v in self.$name.lock().active.values() { + match *v { + QueryResult::Started(ref job) => jobs.push(job.clone()), + _ => (), + } + })* + + return jobs; + } } #[allow(bad_style)] - #[derive(Copy, Clone, Debug, PartialEq, Eq)] + #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub enum Query<$tcx> { $($(#[$attr])* $name($K)),* } @@ -692,6 +713,17 @@ macro_rules! define_maps { } } + impl<'a, $tcx> HashStable> for Query<$tcx> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + mem::discriminant(self).hash_stable(hcx, hasher); + match *self { + $(Query::$name(key) => key.hash_stable(hcx, hasher),)* + } + } + } + pub mod queries { use std::marker::PhantomData; diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index 8dcbda917b243..c2a450a112292 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -49,7 +49,7 @@ use std::fs; use std::io::{self, Write}; use std::iter; use std::path::{Path, PathBuf}; -use rustc_data_structures::sync::{self, Lrc}; +use rustc_data_structures::sync::{self, Lrc, Lock}; use std::sync::mpsc; use syntax::{self, ast, attr, diagnostics, visit}; use syntax::ext::base::ExtCtxt; @@ -69,7 +69,9 @@ pub fn spawn_thread_pool R + sync::Send, R: sync:: opts: config::Options, f: F ) -> R { - f(opts) + ty::tls::GCX_PTR.set(&Lock::new(0), || { + f(opts) + }) } #[cfg(parallel_queries)] @@ -81,7 +83,10 @@ pub fn spawn_thread_pool R + sync::Send, R: sync:: use syntax_pos; use rayon::{ThreadPoolBuilder, ThreadPool}; + let gcx_ptr = &Lock::new(0); + let config = ThreadPoolBuilder::new().num_threads(Session::query_threads_from_opts(&opts)) + .deadlock_handler(ty::maps::handle_deadlock) .stack_size(16 * 1024 * 1024); let with_pool = move |pool: &ThreadPool| { @@ -98,7 +103,9 @@ pub fn spawn_thread_pool R + sync::Send, R: sync:: syntax::GLOBALS.set(syntax_globals, || { syntax_pos::GLOBALS.set(syntax_pos_globals, || { ty::tls::with_thread_locals(|| { - worker() + ty::tls::GCX_PTR.set(gcx_ptr, || { + worker() + }) }) }) }) From d85b5eadeaca624b886838787150adbdfdb1dcbe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20K=C3=A5re=20Alsaker?= Date: Mon, 14 May 2018 03:00:52 +0200 Subject: [PATCH 02/11] Update Rayon version --- src/librustc/Cargo.toml | 4 ++-- src/librustc_data_structures/Cargo.toml | 4 ++-- src/librustc_driver/Cargo.toml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/librustc/Cargo.toml b/src/librustc/Cargo.toml index 4dc818c650e34..0ff4dc2eace81 100644 --- a/src/librustc/Cargo.toml +++ b/src/librustc/Cargo.toml @@ -19,8 +19,8 @@ scoped-tls = { version = "0.1.1", features = ["nightly"] } log = { version = "0.4", features = ["release_max_level_info", "std"] } polonius-engine = "0.5.0" proc_macro = { path = "../libproc_macro" } -rustc-rayon = "0.1.0" -rustc-rayon-core = "0.1.0" +rustc-rayon = "0.1.1" +rustc-rayon-core = "0.1.1" rustc_apfloat = { path = "../librustc_apfloat" } rustc_target = { path = "../librustc_target" } rustc_data_structures = { path = "../librustc_data_structures" } diff --git a/src/librustc_data_structures/Cargo.toml b/src/librustc_data_structures/Cargo.toml index 17ee771e52940..fc5fe91c977d4 100644 --- a/src/librustc_data_structures/Cargo.toml +++ b/src/librustc_data_structures/Cargo.toml @@ -16,8 +16,8 @@ serialize = { path = "../libserialize" } cfg-if = "0.1.2" stable_deref_trait = "1.0.0" parking_lot_core = "0.2.8" -rustc-rayon = "0.1.0" -rustc-rayon-core = "0.1.0" +rustc-rayon = "0.1.1" +rustc-rayon-core = "0.1.1" rustc-hash = "1.0.1" [dependencies.parking_lot] diff --git a/src/librustc_driver/Cargo.toml b/src/librustc_driver/Cargo.toml index 24bf07d793f3d..5b75912c18f50 100644 --- a/src/librustc_driver/Cargo.toml +++ b/src/librustc_driver/Cargo.toml @@ -13,7 +13,7 @@ arena = { path = "../libarena" } graphviz = { path = "../libgraphviz" } log = "0.4" env_logger = { version = "0.5", default-features = false } -rustc-rayon = "0.1.0" +rustc-rayon = "0.1.1" scoped-tls = { version = "0.1.1", features = ["nightly"] } rustc = { path = "../librustc" } rustc_allocator = { path = "../librustc_allocator" } From 77259af56ab337d476cafeeb657c9b8953f8b75b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20K=C3=A5re=20Alsaker?= Date: Sun, 27 May 2018 07:46:19 +0200 Subject: [PATCH 03/11] Use try_lock in collect_active_jobs --- src/librustc/ty/maps/plumbing.rs | 4 +++- src/librustc_data_structures/sync.rs | 12 ++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/src/librustc/ty/maps/plumbing.rs b/src/librustc/ty/maps/plumbing.rs index 2ab8d18e3e7dd..13d2a13e5ddc5 100644 --- a/src/librustc/ty/maps/plumbing.rs +++ b/src/librustc/ty/maps/plumbing.rs @@ -659,7 +659,9 @@ macro_rules! define_maps { pub fn collect_active_jobs(&self) -> Vec>> { let mut jobs = Vec::new(); - $(for v in self.$name.lock().active.values() { + // We use try_lock here since we are only called from the + // deadlock handler, and this shouldn't be locked + $(for v in self.$name.try_lock().unwrap().active.values() { match *v { QueryResult::Started(ref job) => jobs.push(job.clone()), _ => (), diff --git a/src/librustc_data_structures/sync.rs b/src/librustc_data_structures/sync.rs index 6f7d9e1b54b1e..33f6eda2a8753 100644 --- a/src/librustc_data_structures/sync.rs +++ b/src/librustc_data_structures/sync.rs @@ -519,6 +519,18 @@ impl Lock { self.0.get_mut() } + #[cfg(parallel_queries)] + #[inline(always)] + pub fn try_lock(&self) -> Option> { + self.0.try_lock() + } + + #[cfg(not(parallel_queries))] + #[inline(always)] + pub fn try_lock(&self) -> Option> { + self.0.try_borrow_mut().ok() + } + #[cfg(parallel_queries)] #[inline(always)] pub fn lock(&self) -> LockGuard { From 090b8341bcd1eb7de3d0cbaa71eb2d77924fc4bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20K=C3=A5re=20Alsaker?= Date: Sun, 27 May 2018 07:47:44 +0200 Subject: [PATCH 04/11] Add and use OnDrop::disable --- src/librustc/ty/maps/job.rs | 2 +- src/librustc_data_structures/lib.rs | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/librustc/ty/maps/job.rs b/src/librustc/ty/maps/job.rs index 3fe22dba6e153..6b7170f2c476c 100644 --- a/src/librustc/ty/maps/job.rs +++ b/src/librustc/ty/maps/job.rs @@ -456,5 +456,5 @@ fn deadlock(tcx: TyCtxt<'_, '_, '_>, registry: &rayon_core::Registry) { waiter.notify(tcx, registry); } - mem::forget(on_panic); + on_panic.disable(); } diff --git a/src/librustc_data_structures/lib.rs b/src/librustc_data_structures/lib.rs index 7046a2a2a493d..5844edf000a8b 100644 --- a/src/librustc_data_structures/lib.rs +++ b/src/librustc_data_structures/lib.rs @@ -80,6 +80,14 @@ pub mod sorted_map; pub struct OnDrop(pub F); +impl OnDrop { + /// Forgets the function which prevents it from running. + /// Ensure that the function owns no memory, otherwise it will be leaked. + pub fn disable(self) { + std::mem::forget(self); + } +} + impl Drop for OnDrop { fn drop(&mut self) { (self.0)(); From 302aae58648aed753ecb9752ff01534b42a97f69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20K=C3=A5re=20Alsaker?= Date: Sun, 27 May 2018 09:01:57 +0200 Subject: [PATCH 05/11] Use raw pointers --- src/librustc/ty/maps/job.rs | 69 +++++++++++++++++++++---------------- 1 file changed, 39 insertions(+), 30 deletions(-) diff --git a/src/librustc/ty/maps/job.rs b/src/librustc/ty/maps/job.rs index 6b7170f2c476c..adc06a9e45746 100644 --- a/src/librustc/ty/maps/job.rs +++ b/src/librustc/ty/maps/job.rs @@ -62,7 +62,7 @@ pub struct QueryJob<'tcx> { pub diagnostics: Lock>, #[cfg(parallel_queries)] - latch: QueryLatch, + latch: QueryLatch<'tcx>, } impl<'tcx> QueryJob<'tcx> { @@ -146,41 +146,45 @@ impl<'tcx> QueryJob<'tcx> { /// /// This does nothing for single threaded rustc, /// as there are no concurrent jobs which could be waiting on us - pub fn signal_complete(&self, tcx: TyCtxt<'_, 'tcx, '_>) { + pub fn signal_complete(&self) { #[cfg(parallel_queries)] - self.latch.set(tcx); + self.latch.set(); } } #[cfg(parallel_queries)] -struct QueryWaiter<'a, 'tcx: 'a> { - query: &'a Option>>, +struct QueryWaiter<'tcx> { + query: *const Option>>, condvar: Condvar, span: Span, cycle: Option>, } #[cfg(parallel_queries)] -impl<'a, 'tcx> QueryWaiter<'a, 'tcx> { - fn notify(&self, tcx: TyCtxt<'_, '_, '_>, registry: &rayon_core::Registry) { +impl<'tcx> QueryWaiter<'tcx> { + fn notify(&self, registry: &rayon_core::Registry) { rayon_core::mark_unblocked(registry); self.condvar.notify_one(); } } #[cfg(parallel_queries)] -struct QueryLatchInfo { +struct QueryLatchInfo<'tcx> { complete: bool, - waiters: Vec<&'static mut QueryWaiter<'static, 'static>>, + waiters: Vec<*mut QueryWaiter<'tcx>>, } +// Required because of raw pointers #[cfg(parallel_queries)] -struct QueryLatch { - info: Mutex, +unsafe impl<'tcx> Send for QueryLatchInfo<'tcx> {} + +#[cfg(parallel_queries)] +struct QueryLatch<'tcx> { + info: Mutex>, } #[cfg(parallel_queries)] -impl QueryLatch { +impl<'tcx> QueryLatch<'tcx> { fn new() -> Self { QueryLatch { info: Mutex::new(QueryLatchInfo { @@ -190,44 +194,45 @@ impl QueryLatch { } } - fn await(&self, waiter: &mut QueryWaiter<'_, '_>) { + fn await(&self, waiter: &mut QueryWaiter<'tcx>) { let mut info = self.info.lock(); if !info.complete { - let waiter = &*waiter; - unsafe { - #[allow(mutable_transmutes)] - info.waiters.push(mem::transmute(waiter)); - } + info.waiters.push(waiter); + let condvar = &waiter.condvar; // If this detects a deadlock and the deadlock handler want to resume this thread // we have to be in the `wait` call. This is ensured by the deadlock handler // getting the self.info lock. rayon_core::mark_blocked(); - waiter.condvar.wait(&mut info); + condvar.wait(&mut info); } } - fn set(&self, tcx: TyCtxt<'_, '_, '_>) { + fn set(&self) { let mut info = self.info.lock(); debug_assert!(!info.complete); info.complete = true; let registry = rayon_core::Registry::current(); for waiter in info.waiters.drain(..) { - waiter.notify(tcx, ®istry); + unsafe { + (*waiter).notify(®istry); + } } } fn resume_waiter( &self, waiter: usize, - error: CycleError - ) -> &'static mut QueryWaiter<'static, 'static> { + error: CycleError<'tcx> + ) -> *mut QueryWaiter<'tcx> { let mut info = self.info.lock(); debug_assert!(!info.complete); // Remove the waiter from the list of waiters let waiter = info.waiters.remove(waiter); // Set the cycle error it will be picked it up when resumed - waiter.cycle = unsafe { Some(mem::transmute(error)) }; + unsafe { + (*waiter).cycle = Some(error); + } waiter } @@ -250,10 +255,12 @@ where return Some(cycle); } } - for (i, waiter) in query.latch.info.lock().waiters.iter().enumerate() { - if let Some(ref waiter_query) = waiter.query { - if visit(waiter.span, &**waiter_query as Ref).is_some() { - return Some(Some((query_ref, i))); + for (i, &waiter) in query.latch.info.lock().waiters.iter().enumerate() { + unsafe { + if let Some(ref waiter_query) = *(*waiter).query { + if visit((*waiter).span, &**waiter_query as Ref).is_some() { + return Some(Some((query_ref, i))); + } } } } @@ -322,7 +329,7 @@ fn query_entry<'tcx>(r: Ref<'tcx>) -> QueryInfo<'tcx> { #[cfg(parallel_queries)] fn remove_cycle<'tcx>( jobs: &mut Vec>, - wakelist: &mut Vec<&'static mut QueryWaiter<'static, 'static>>, + wakelist: &mut Vec<*mut QueryWaiter<'tcx>>, tcx: TyCtxt<'_, 'tcx, '_> ) { let mut visited = HashSet::new(); @@ -453,7 +460,9 @@ fn deadlock(tcx: TyCtxt<'_, '_, '_>, registry: &rayon_core::Registry) { // FIXME: Ensure this won't cause a deadlock before we return for waiter in wakelist.into_iter() { - waiter.notify(tcx, registry); + unsafe { + (*waiter).notify(registry); + } } on_panic.disable(); From c819ba043aa00ca15c5ab63da89e387c2130f5bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20K=C3=A5re=20Alsaker?= Date: Sun, 27 May 2018 13:05:41 +0200 Subject: [PATCH 06/11] Add comments and misc changes --- src/librustc/ty/maps/job.rs | 119 ++++++++++++++++++++++++++++-------- 1 file changed, 92 insertions(+), 27 deletions(-) diff --git a/src/librustc/ty/maps/job.rs b/src/librustc/ty/maps/job.rs index adc06a9e45746..8b8f84201630e 100644 --- a/src/librustc/ty/maps/job.rs +++ b/src/librustc/ty/maps/job.rs @@ -194,19 +194,25 @@ impl<'tcx> QueryLatch<'tcx> { } } + /// Awaits the caller on this latch by blocking the current thread. fn await(&self, waiter: &mut QueryWaiter<'tcx>) { let mut info = self.info.lock(); if !info.complete { + // We push the waiter on to the `waiters` list. It can be accessed inside + // the `wait` call below, by 1) the `set` method or 2) by deadlock detection. + // Both of these will remove it from the `waiters` list before resuming + // this thread. info.waiters.push(waiter); - let condvar = &waiter.condvar; + // If this detects a deadlock and the deadlock handler want to resume this thread // we have to be in the `wait` call. This is ensured by the deadlock handler // getting the self.info lock. rayon_core::mark_blocked(); - condvar.wait(&mut info); + waiter.condvar.wait(&mut info); } } + /// Sets the latch and resumes all waiters on it fn set(&self) { let mut info = self.info.lock(); debug_assert!(!info.complete); @@ -219,46 +225,56 @@ impl<'tcx> QueryLatch<'tcx> { } } - fn resume_waiter( + /// Remove a single waiter from the list of waiters. + /// This is used to break query cycles. + fn extract_waiter( &self, waiter: usize, - error: CycleError<'tcx> ) -> *mut QueryWaiter<'tcx> { let mut info = self.info.lock(); debug_assert!(!info.complete); // Remove the waiter from the list of waiters - let waiter = info.waiters.remove(waiter); - - // Set the cycle error it will be picked it up when resumed - unsafe { - (*waiter).cycle = Some(error); - } - - waiter + info.waiters.remove(waiter) } } +/// A pointer to an active query job. This is used to give query jobs an identity. #[cfg(parallel_queries)] type Ref<'tcx> = *const QueryJob<'tcx>; +/// A resumable waiter of a query. The usize is the index into waiters in the query's latch #[cfg(parallel_queries)] type Waiter<'tcx> = (Ref<'tcx>, usize); +/// Visits all the non-resumable and resumable waiters of a query. +/// Only waiters in a query are visited. +/// `visit` is called for every waiter and is passed a query waiting on `query_ref` +/// and a span indicating the reason the query waited on `query_ref`. +/// If `visit` returns Some, this function returns. +/// For visits of non-resumable waiters it returns the return value of `visit`. +/// For visits of resumable waiters it returns Some(Some(Waiter)) which has the +/// required information to resume the waiter. +/// If all `visit` calls returns None, this function also returns None. #[cfg(parallel_queries)] fn visit_waiters<'tcx, F>(query_ref: Ref<'tcx>, mut visit: F) -> Option>> where F: FnMut(Span, Ref<'tcx>) -> Option>> { let query = unsafe { &*query_ref }; + + // Visit the parent query which is a non-resumable waiter since it's on the same stack if let Some(ref parent) = query.parent { if let Some(cycle) = visit(query.info.span, &**parent as Ref) { return Some(cycle); } } + + // Visit the explict waiters which use condvars and are resumable for (i, &waiter) in query.latch.info.lock().waiters.iter().enumerate() { unsafe { if let Some(ref waiter_query) = *(*waiter).query { if visit((*waiter).span, &**waiter_query as Ref).is_some() { + // Return a value which indicates that this waiter can be resumed return Some(Some((query_ref, i))); } } @@ -267,6 +283,10 @@ where None } +/// Look for query cycles by doing a depth first search starting at `query`. +/// `span` is the reason for the `query` to execute. This is initially DUMMY_SP. +/// If a cycle is detected, this initial value is replaced with the span causing +/// the cycle. #[cfg(parallel_queries)] fn cycle_check<'tcx>(query: Ref<'tcx>, span: Span, @@ -274,6 +294,8 @@ fn cycle_check<'tcx>(query: Ref<'tcx>, visited: &mut HashSet>) -> Option>> { if visited.contains(&query) { return if let Some(p) = stack.iter().position(|q| q.1 == query) { + // We detected a query cycle, fix up the initial span and return Some + // Remove previous stack entries stack.splice(0..p, iter::empty()); // Replace the span for the first query with the cycle cause @@ -284,13 +306,16 @@ fn cycle_check<'tcx>(query: Ref<'tcx>, } } + // Mark this query is visited and add it to the stack visited.insert(query); stack.push((span, query)); + // Visit all the waiters let r = visit_waiters(query, |span, successor| { cycle_check(successor, span, stack, visited) }); + // Remove the entry in our stack if we didn't find a cycle if r.is_none() { stack.pop(); } @@ -298,12 +323,17 @@ fn cycle_check<'tcx>(query: Ref<'tcx>, r } +/// Finds out if there's a path to the compiler root (aka. code which isn't in a query) +/// from `query` without going through any of the queries in `visited`. +/// This is achieved with a depth first search. #[cfg(parallel_queries)] fn connected_to_root<'tcx>(query: Ref<'tcx>, visited: &mut HashSet>) -> bool { + // We already visited this or we're deliberately ignoring it if visited.contains(&query) { return false; } + // This query is connected to the root (it has no query parent), return true if unsafe { (*query).parent.is_none() } { return true; } @@ -321,19 +351,20 @@ fn connected_to_root<'tcx>(query: Ref<'tcx>, visited: &mut HashSet>) - }).is_some() } -#[cfg(parallel_queries)] -fn query_entry<'tcx>(r: Ref<'tcx>) -> QueryInfo<'tcx> { - unsafe { (*r).info.clone() } -} - +/// Looks for query cycles starting from the last query in `jobs`. +/// If a cycle is found, all queries in the cycle is removed from `jobs` and +/// the function return true. +/// If a cycle was not found, the starting query is removed from `jobs` and +/// the function returns false. #[cfg(parallel_queries)] fn remove_cycle<'tcx>( jobs: &mut Vec>, wakelist: &mut Vec<*mut QueryWaiter<'tcx>>, tcx: TyCtxt<'_, 'tcx, '_> -) { +) -> bool { let mut visited = HashSet::new(); let mut stack = Vec::new(); + // Look for a cycle starting with the last query in `jobs` if let Some(waiter) = cycle_check(jobs.pop().unwrap(), DUMMY_SP, &mut stack, @@ -341,13 +372,15 @@ fn remove_cycle<'tcx>( // Reverse the stack so earlier entries require later entries stack.reverse(); + // Extract the spans and queries into separate arrays let mut spans: Vec<_> = stack.iter().map(|e| e.0).collect(); let queries = stack.iter().map(|e| e.1); - // Shift the spans so that a query is matched the span for its waitee + // Shift the spans so that queries are matched with the span for their waitee let last = spans.pop().unwrap(); spans.insert(0, last); + // Zip them back together let mut stack: Vec<_> = spans.into_iter().zip(queries).collect(); // Remove the queries in our cycle from the list of jobs to look at @@ -355,9 +388,6 @@ fn remove_cycle<'tcx>( jobs.remove_item(&r.1); } - let (waitee_query, waiter_idx) = waiter.unwrap(); - let waitee_query = unsafe { &*waitee_query }; - // Find the queries in the cycle which are // connected to queries outside the cycle let entry_points: Vec> = stack.iter().filter_map(|query| { @@ -392,6 +422,7 @@ fn remove_cycle<'tcx>( stack.insert(0, last); } + // Create the cycle error let mut error = CycleError { usage: None, cycle: stack.iter().map(|&(s, q)| QueryInfo { @@ -400,10 +431,30 @@ fn remove_cycle<'tcx>( } ).collect(), }; - wakelist.push(waitee_query.latch.resume_waiter(waiter_idx, error)); + // We unwrap `waiter` here since there must always be one + // edge which is resumeable / waited using a query latch + let (waitee_query, waiter_idx) = waiter.unwrap(); + let waitee_query = unsafe { &*waitee_query }; + + // Extract the waiter we want to resume + let waiter = waitee_query.latch.extract_waiter(waiter_idx); + + // Set the cycle error it will be picked it up when resumed + unsafe { + (*waiter).cycle = Some(error); + } + + // Put the waiter on the list of things to resume + wakelist.push(waiter); + + true + } else { + false } } +/// Creates a new thread and forwards information in thread locals to it. +/// The new thread runs the deadlock handler. #[cfg(parallel_queries)] pub fn handle_deadlock() { use syntax; @@ -440,6 +491,11 @@ pub fn handle_deadlock() { }); } +/// Detects query cycles by using depth first search over all active query jobs. +/// If a query cycle is found it will break the cycle by finding an edge which +/// uses a query latch and then resuming that waiter. +/// There may be multiple cycles involved in a deadlock, so this searches +/// all active queries for cycles before finally resuming all the waiters at once. #[cfg(parallel_queries)] fn deadlock(tcx: TyCtxt<'_, '_, '_>, registry: &rayon_core::Registry) { let on_panic = OnDrop(|| { @@ -450,13 +506,22 @@ fn deadlock(tcx: TyCtxt<'_, '_, '_>, registry: &rayon_core::Registry) { let mut wakelist = Vec::new(); let mut jobs: Vec<_> = tcx.maps.collect_active_jobs().iter().map(|j| &**j as Ref).collect(); + let mut found_cycle = false; + while jobs.len() > 0 { - remove_cycle(&mut jobs, &mut wakelist, tcx); + if remove_cycle(&mut jobs, &mut wakelist, tcx) { + found_cycle = true; + } } - // FIXME: Panic if no cycle is detected - - // FIXME: Write down the conditions when a deadlock happens without a cycle + // Check that a cycle was found. It is possible for a deadlock to occur without + // a query cycle if a query which can be waited on uses Rayon to do multithreading + // internally. Such a query (X) may be executing on 2 threads (A and B) and A may + // wait using Rayon on B. Rayon may then switch to executing another query (Y) + // which in turn will wait on X causing a deadlock. We have a false dependency from + // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here + // only considers the true dependency and won't detect a cycle. + assert!(found_cycle); // FIXME: Ensure this won't cause a deadlock before we return for waiter in wakelist.into_iter() { From 3571684d2c8eaae3ce9118c702a3d4adf6e56404 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20K=C3=A5re=20Alsaker?= Date: Sun, 27 May 2018 13:10:12 +0200 Subject: [PATCH 07/11] Create thread-pool --- src/test/run-make-fulldeps/issue-19371/foo.rs | 49 ++++++++++--------- 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/src/test/run-make-fulldeps/issue-19371/foo.rs b/src/test/run-make-fulldeps/issue-19371/foo.rs index 403f4f79843e8..6f5e2affdbd3c 100644 --- a/src/test/run-make-fulldeps/issue-19371/foo.rs +++ b/src/test/run-make-fulldeps/issue-19371/foo.rs @@ -19,9 +19,9 @@ extern crate rustc_codegen_utils; extern crate syntax; use rustc::session::{build_session, Session}; -use rustc::session::config::{basic_options, Input, +use rustc::session::config::{basic_options, Input, Options, OutputType, OutputTypes}; -use rustc_driver::driver::{compile_input, CompileController}; +use rustc_driver::driver::{self, compile_input, CompileController}; use rustc_metadata::cstore::CStore; use rustc_errors::registry::Registry; use syntax::codemap::FileName; @@ -52,14 +52,7 @@ fn main() { compile(src.to_string(), tmpdir.join("out"), sysroot.clone()); } -fn basic_sess(sysroot: PathBuf) -> (Session, Rc, Box) { - let mut opts = basic_options(); - opts.output_types = OutputTypes::new(&[(OutputType::Exe, None)]); - opts.maybe_sysroot = Some(sysroot); - if let Ok(linker) = std::env::var("RUSTC_LINKER") { - opts.cg.linker = Some(linker.into()); - } - +fn basic_sess(opts: Options) -> (Session, Rc, Box) { let descriptions = Registry::new(&rustc::DIAGNOSTICS); let sess = build_session(opts, None, descriptions); let codegen_backend = rustc_driver::get_codegen_backend(&sess); @@ -70,19 +63,27 @@ fn basic_sess(sysroot: PathBuf) -> (Session, Rc, Box) { fn compile(code: String, output: PathBuf, sysroot: PathBuf) { syntax::with_globals(|| { - let (sess, cstore, codegen_backend) = basic_sess(sysroot); - let control = CompileController::basic(); - let input = Input::Str { name: FileName::Anon, input: code }; - let _ = compile_input( - codegen_backend, - &sess, - &cstore, - &None, - &input, - &None, - &Some(output), - None, - &control - ); + let mut opts = basic_options(); + opts.output_types = OutputTypes::new(&[(OutputType::Exe, None)]); + opts.maybe_sysroot = Some(sysroot); + if let Ok(linker) = std::env::var("RUSTC_LINKER") { + opts.cg.linker = Some(linker.into()); + } + driver::spawn_thread_pool(opts, |opts| { + let (sess, cstore, codegen_backend) = basic_sess(opts); + let control = CompileController::basic(); + let input = Input::Str { name: FileName::Anon, input: code }; + let _ = compile_input( + codegen_backend, + &sess, + &cstore, + &None, + &input, + &None, + &Some(output), + None, + &control + ); + }); }); } From b2555bd5456ee12b24777a4eb65dd80fdc788923 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20K=C3=A5re=20Alsaker?= Date: Thu, 31 May 2018 20:24:56 +0200 Subject: [PATCH 08/11] Make QueryWaiter use safe code --- src/librustc/ty/maps/job.rs | 54 +++++++++++++++---------------------- 1 file changed, 22 insertions(+), 32 deletions(-) diff --git a/src/librustc/ty/maps/job.rs b/src/librustc/ty/maps/job.rs index 8b8f84201630e..1a0347b86ab66 100644 --- a/src/librustc/ty/maps/job.rs +++ b/src/librustc/ty/maps/job.rs @@ -94,15 +94,15 @@ impl<'tcx> QueryJob<'tcx> { #[cfg(parallel_queries)] { tls::with_related_context(tcx, move |icx| { - let mut waiter = QueryWaiter { - query: &icx.query, + let mut waiter = Lrc::new(QueryWaiter { + query: icx.query.clone(), span, - cycle: None, + cycle: Lock::new(None), condvar: Condvar::new(), - }; - self.latch.await(&mut waiter); + }); + self.latch.await(&waiter); - match waiter.cycle { + match Lrc::get_mut(&mut waiter).unwrap().cycle.get_mut().take() { None => Ok(()), Some(cycle) => Err(cycle) } @@ -154,10 +154,10 @@ impl<'tcx> QueryJob<'tcx> { #[cfg(parallel_queries)] struct QueryWaiter<'tcx> { - query: *const Option>>, + query: Option>>, condvar: Condvar, span: Span, - cycle: Option>, + cycle: Lock>>, } #[cfg(parallel_queries)] @@ -171,13 +171,9 @@ impl<'tcx> QueryWaiter<'tcx> { #[cfg(parallel_queries)] struct QueryLatchInfo<'tcx> { complete: bool, - waiters: Vec<*mut QueryWaiter<'tcx>>, + waiters: Vec>>, } -// Required because of raw pointers -#[cfg(parallel_queries)] -unsafe impl<'tcx> Send for QueryLatchInfo<'tcx> {} - #[cfg(parallel_queries)] struct QueryLatch<'tcx> { info: Mutex>, @@ -195,14 +191,14 @@ impl<'tcx> QueryLatch<'tcx> { } /// Awaits the caller on this latch by blocking the current thread. - fn await(&self, waiter: &mut QueryWaiter<'tcx>) { + fn await(&self, waiter: &Lrc>) { let mut info = self.info.lock(); if !info.complete { // We push the waiter on to the `waiters` list. It can be accessed inside // the `wait` call below, by 1) the `set` method or 2) by deadlock detection. // Both of these will remove it from the `waiters` list before resuming // this thread. - info.waiters.push(waiter); + info.waiters.push(waiter.clone()); // If this detects a deadlock and the deadlock handler want to resume this thread // we have to be in the `wait` call. This is ensured by the deadlock handler @@ -219,9 +215,7 @@ impl<'tcx> QueryLatch<'tcx> { info.complete = true; let registry = rayon_core::Registry::current(); for waiter in info.waiters.drain(..) { - unsafe { - (*waiter).notify(®istry); - } + waiter.notify(®istry); } } @@ -230,7 +224,7 @@ impl<'tcx> QueryLatch<'tcx> { fn extract_waiter( &self, waiter: usize, - ) -> *mut QueryWaiter<'tcx> { + ) -> Lrc> { let mut info = self.info.lock(); debug_assert!(!info.complete); // Remove the waiter from the list of waiters @@ -270,13 +264,11 @@ where } // Visit the explict waiters which use condvars and are resumable - for (i, &waiter) in query.latch.info.lock().waiters.iter().enumerate() { - unsafe { - if let Some(ref waiter_query) = *(*waiter).query { - if visit((*waiter).span, &**waiter_query as Ref).is_some() { - // Return a value which indicates that this waiter can be resumed - return Some(Some((query_ref, i))); - } + for (i, waiter) in query.latch.info.lock().waiters.iter().enumerate() { + if let Some(ref waiter_query) = waiter.query { + if visit(waiter.span, &**waiter_query).is_some() { + // Return a value which indicates that this waiter can be resumed + return Some(Some((query_ref, i))); } } } @@ -359,7 +351,7 @@ fn connected_to_root<'tcx>(query: Ref<'tcx>, visited: &mut HashSet>) - #[cfg(parallel_queries)] fn remove_cycle<'tcx>( jobs: &mut Vec>, - wakelist: &mut Vec<*mut QueryWaiter<'tcx>>, + wakelist: &mut Vec>>, tcx: TyCtxt<'_, 'tcx, '_> ) -> bool { let mut visited = HashSet::new(); @@ -439,9 +431,9 @@ fn remove_cycle<'tcx>( // Extract the waiter we want to resume let waiter = waitee_query.latch.extract_waiter(waiter_idx); - // Set the cycle error it will be picked it up when resumed + // Set the cycle error so it will be picked up when resumed unsafe { - (*waiter).cycle = Some(error); + *waiter.cycle.lock() = Some(error); } // Put the waiter on the list of things to resume @@ -525,9 +517,7 @@ fn deadlock(tcx: TyCtxt<'_, '_, '_>, registry: &rayon_core::Registry) { // FIXME: Ensure this won't cause a deadlock before we return for waiter in wakelist.into_iter() { - unsafe { - (*waiter).notify(registry); - } + waiter.notify(registry); } on_panic.disable(); From 3e832484414e4b0b28682f9ae6c3607fd5db53f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20K=C3=A5re=20Alsaker?= Date: Thu, 31 May 2018 23:04:21 +0200 Subject: [PATCH 09/11] Add comments --- src/librustc/ty/context.rs | 18 +++++++++++++++++- src/librustc/ty/maps/job.rs | 3 ++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index ef584774f694c..e66ad24231094 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -1737,20 +1737,28 @@ pub mod tls { pub task: &'a OpenTask, } + /// Sets Rayon's thread local variable which is preserved for Rayon jobs + /// to `value` during the call to `f`. It is restored to its previous value after. + /// This is used to set the pointer to the new ImplicitCtxt. #[cfg(parallel_queries)] fn set_tlv R, R>(value: usize, f: F) -> R { rayon_core::tlv::with(value, f) } + /// Gets Rayon's thread local variable which is preserved for Rayon jobs. + /// This is used to get the pointer to the current ImplicitCtxt. #[cfg(parallel_queries)] fn get_tlv() -> usize { rayon_core::tlv::get() } - // A thread local value which stores a pointer to the current ImplicitCtxt + /// A thread local variable which stores a pointer to the current ImplicitCtxt #[cfg(not(parallel_queries))] thread_local!(static TLV: Cell = Cell::new(0)); + /// Sets TLV to `value` during the call to `f`. + /// It is restored to its previous value after. + /// This is used to set the pointer to the new ImplicitCtxt. #[cfg(not(parallel_queries))] fn set_tlv R, R>(value: usize, f: F) -> R { let old = get_tlv(); @@ -1759,6 +1767,7 @@ pub mod tls { f() } + /// This is used to get the pointer to the current ImplicitCtxt. #[cfg(not(parallel_queries))] fn get_tlv() -> usize { TLV.with(|tlv| tlv.get()) @@ -1828,9 +1837,11 @@ pub mod tls { where F: for<'a> FnOnce(TyCtxt<'a, 'gcx, 'gcx>) -> R { with_thread_locals(|| { + // Update GCX_PTR to indicate there's a GlobalCtxt available GCX_PTR.with(|lock| { *lock.lock() = gcx as *const _ as usize; }); + // Set GCX_PTR back to 0 when we exit let _on_drop = OnDrop(move || { GCX_PTR.with(|lock| *lock.lock() = 0); }); @@ -1851,8 +1862,13 @@ pub mod tls { }) } + /// Stores a pointer to the GlobalCtxt if one is available. + /// This is used to access the GlobalCtxt in the deadlock handler + /// given to Rayon. scoped_thread_local!(pub static GCX_PTR: Lock); + /// Creates a TyCtxt and ImplicitCtxt based on the GCX_PTR thread local. + /// This is used in the deadlock handler. pub unsafe fn with_global(f: F) -> R where F: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R { diff --git a/src/librustc/ty/maps/job.rs b/src/librustc/ty/maps/job.rs index 1a0347b86ab66..e28435489bd20 100644 --- a/src/librustc/ty/maps/job.rs +++ b/src/librustc/ty/maps/job.rs @@ -61,6 +61,7 @@ pub struct QueryJob<'tcx> { /// Diagnostic messages which are emitted while the query executes pub diagnostics: Lock>, + /// The latch which is used to wait on this job #[cfg(parallel_queries)] latch: QueryLatch<'tcx>, } @@ -200,7 +201,7 @@ impl<'tcx> QueryLatch<'tcx> { // this thread. info.waiters.push(waiter.clone()); - // If this detects a deadlock and the deadlock handler want to resume this thread + // If this detects a deadlock and the deadlock handler wants to resume this thread // we have to be in the `wait` call. This is ensured by the deadlock handler // getting the self.info lock. rayon_core::mark_blocked(); From 131ef97c4b160017c61f0b3bef6e01ace6c04ecf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20K=C3=A5re=20Alsaker?= Date: Tue, 5 Jun 2018 23:12:19 +0200 Subject: [PATCH 10/11] Reduce the amount of unsafe code and mark handle_deadlock as unsafe --- src/librustc/ty/maps/job.rs | 98 +++++++++++++++++------------------ src/librustc_driver/driver.rs | 7 +-- 2 files changed, 53 insertions(+), 52 deletions(-) diff --git a/src/librustc/ty/maps/job.rs b/src/librustc/ty/maps/job.rs index e28435489bd20..3da73d47a0bf5 100644 --- a/src/librustc/ty/maps/job.rs +++ b/src/librustc/ty/maps/job.rs @@ -151,6 +151,10 @@ impl<'tcx> QueryJob<'tcx> { #[cfg(parallel_queries)] self.latch.set(); } + + fn as_ptr(&self) -> *const QueryJob<'tcx> { + self as *const _ + } } #[cfg(parallel_queries)] @@ -233,13 +237,9 @@ impl<'tcx> QueryLatch<'tcx> { } } -/// A pointer to an active query job. This is used to give query jobs an identity. -#[cfg(parallel_queries)] -type Ref<'tcx> = *const QueryJob<'tcx>; - /// A resumable waiter of a query. The usize is the index into waiters in the query's latch #[cfg(parallel_queries)] -type Waiter<'tcx> = (Ref<'tcx>, usize); +type Waiter<'tcx> = (Lrc>, usize); /// Visits all the non-resumable and resumable waiters of a query. /// Only waiters in a query are visited. @@ -251,15 +251,13 @@ type Waiter<'tcx> = (Ref<'tcx>, usize); /// required information to resume the waiter. /// If all `visit` calls returns None, this function also returns None. #[cfg(parallel_queries)] -fn visit_waiters<'tcx, F>(query_ref: Ref<'tcx>, mut visit: F) -> Option>> +fn visit_waiters<'tcx, F>(query: Lrc>, mut visit: F) -> Option>> where - F: FnMut(Span, Ref<'tcx>) -> Option>> + F: FnMut(Span, Lrc>) -> Option>> { - let query = unsafe { &*query_ref }; - // Visit the parent query which is a non-resumable waiter since it's on the same stack if let Some(ref parent) = query.parent { - if let Some(cycle) = visit(query.info.span, &**parent as Ref) { + if let Some(cycle) = visit(query.info.span, parent.clone()) { return Some(cycle); } } @@ -267,9 +265,9 @@ where // Visit the explict waiters which use condvars and are resumable for (i, waiter) in query.latch.info.lock().waiters.iter().enumerate() { if let Some(ref waiter_query) = waiter.query { - if visit(waiter.span, &**waiter_query).is_some() { + if visit(waiter.span, waiter_query.clone()).is_some() { // Return a value which indicates that this waiter can be resumed - return Some(Some((query_ref, i))); + return Some(Some((query.clone(), i))); } } } @@ -281,12 +279,13 @@ where /// If a cycle is detected, this initial value is replaced with the span causing /// the cycle. #[cfg(parallel_queries)] -fn cycle_check<'tcx>(query: Ref<'tcx>, +fn cycle_check<'tcx>(query: Lrc>, span: Span, - stack: &mut Vec<(Span, Ref<'tcx>)>, - visited: &mut HashSet>) -> Option>> { - if visited.contains(&query) { - return if let Some(p) = stack.iter().position(|q| q.1 == query) { + stack: &mut Vec<(Span, Lrc>)>, + visited: &mut HashSet<*const QueryJob<'tcx>> +) -> Option>> { + if visited.contains(&query.as_ptr()) { + return if let Some(p) = stack.iter().position(|q| q.1.as_ptr() == query.as_ptr()) { // We detected a query cycle, fix up the initial span and return Some // Remove previous stack entries @@ -300,8 +299,8 @@ fn cycle_check<'tcx>(query: Ref<'tcx>, } // Mark this query is visited and add it to the stack - visited.insert(query); - stack.push((span, query)); + visited.insert(query.as_ptr()); + stack.push((span, query.clone())); // Visit all the waiters let r = visit_waiters(query, |span, successor| { @@ -320,18 +319,21 @@ fn cycle_check<'tcx>(query: Ref<'tcx>, /// from `query` without going through any of the queries in `visited`. /// This is achieved with a depth first search. #[cfg(parallel_queries)] -fn connected_to_root<'tcx>(query: Ref<'tcx>, visited: &mut HashSet>) -> bool { +fn connected_to_root<'tcx>( + query: Lrc>, + visited: &mut HashSet<*const QueryJob<'tcx>> +) -> bool { // We already visited this or we're deliberately ignoring it - if visited.contains(&query) { + if visited.contains(&query.as_ptr()) { return false; } // This query is connected to the root (it has no query parent), return true - if unsafe { (*query).parent.is_none() } { + if query.parent.is_none() { return true; } - visited.insert(query); + visited.insert(query.as_ptr()); let mut connected = false; @@ -351,7 +353,7 @@ fn connected_to_root<'tcx>(query: Ref<'tcx>, visited: &mut HashSet>) - /// the function returns false. #[cfg(parallel_queries)] fn remove_cycle<'tcx>( - jobs: &mut Vec>, + jobs: &mut Vec>>, wakelist: &mut Vec>>, tcx: TyCtxt<'_, 'tcx, '_> ) -> bool { @@ -367,7 +369,7 @@ fn remove_cycle<'tcx>( // Extract the spans and queries into separate arrays let mut spans: Vec<_> = stack.iter().map(|e| e.0).collect(); - let queries = stack.iter().map(|e| e.1); + let queries = stack.into_iter().map(|e| e.1); // Shift the spans so that queries are matched with the span for their waitee let last = spans.pop().unwrap(); @@ -378,23 +380,25 @@ fn remove_cycle<'tcx>( // Remove the queries in our cycle from the list of jobs to look at for r in &stack { - jobs.remove_item(&r.1); + if let Some(pos) = jobs.iter().position(|j| j.as_ptr() == r.1.as_ptr()) { + jobs.remove(pos); + } } // Find the queries in the cycle which are // connected to queries outside the cycle - let entry_points: Vec> = stack.iter().filter_map(|query| { + let entry_points: Vec>> = stack.iter().filter_map(|query| { // Mark all the other queries in the cycle as already visited let mut visited = HashSet::from_iter(stack.iter().filter_map(|q| { - if q.1 != query.1 { - Some(q.1) + if q.1.as_ptr() != query.1.as_ptr() { + Some(q.1.as_ptr()) } else { None } })); - if connected_to_root(query.1, &mut visited) { - Some(query.1) + if connected_to_root(query.1.clone(), &mut visited) { + Some(query.1.clone()) } else { None } @@ -403,14 +407,14 @@ fn remove_cycle<'tcx>( // Deterministically pick an entry point // FIXME: Sort this instead let mut hcx = tcx.create_stable_hashing_context(); - let entry_point = *entry_points.iter().min_by_key(|&&q| { + let entry_point = entry_points.iter().min_by_key(|q| { let mut stable_hasher = StableHasher::::new(); - unsafe { (*q).info.query.hash_stable(&mut hcx, &mut stable_hasher); } + q.info.query.hash_stable(&mut hcx, &mut stable_hasher); stable_hasher.finish() - }).unwrap(); + }).unwrap().as_ptr(); // Shift the stack until our entry point is first - while stack[0].1 != entry_point { + while stack[0].1.as_ptr() != entry_point { let last = stack.pop().unwrap(); stack.insert(0, last); } @@ -418,24 +422,21 @@ fn remove_cycle<'tcx>( // Create the cycle error let mut error = CycleError { usage: None, - cycle: stack.iter().map(|&(s, q)| QueryInfo { + cycle: stack.iter().map(|&(s, ref q)| QueryInfo { span: s, - query: unsafe { (*q).info.query.clone() }, + query: q.info.query.clone(), } ).collect(), }; // We unwrap `waiter` here since there must always be one // edge which is resumeable / waited using a query latch let (waitee_query, waiter_idx) = waiter.unwrap(); - let waitee_query = unsafe { &*waitee_query }; // Extract the waiter we want to resume let waiter = waitee_query.latch.extract_waiter(waiter_idx); // Set the cycle error so it will be picked up when resumed - unsafe { - *waiter.cycle.lock() = Some(error); - } + *waiter.cycle.lock() = Some(error); // Put the waiter on the list of things to resume wakelist.push(waiter); @@ -448,8 +449,9 @@ fn remove_cycle<'tcx>( /// Creates a new thread and forwards information in thread locals to it. /// The new thread runs the deadlock handler. +/// Must only be called when a deadlock is about to happen. #[cfg(parallel_queries)] -pub fn handle_deadlock() { +pub unsafe fn handle_deadlock() { use syntax; use syntax_pos; @@ -458,25 +460,23 @@ pub fn handle_deadlock() { let gcx_ptr = tls::GCX_PTR.with(|gcx_ptr| { gcx_ptr as *const _ }); - let gcx_ptr = unsafe { &*gcx_ptr }; + let gcx_ptr = &*gcx_ptr; let syntax_globals = syntax::GLOBALS.with(|syntax_globals| { syntax_globals as *const _ }); - let syntax_globals = unsafe { &*syntax_globals }; + let syntax_globals = &*syntax_globals; let syntax_pos_globals = syntax_pos::GLOBALS.with(|syntax_pos_globals| { syntax_pos_globals as *const _ }); - let syntax_pos_globals = unsafe { &*syntax_pos_globals }; + let syntax_pos_globals = &*syntax_pos_globals; thread::spawn(move || { tls::GCX_PTR.set(gcx_ptr, || { syntax_pos::GLOBALS.set(syntax_pos_globals, || { syntax_pos::GLOBALS.set(syntax_pos_globals, || { tls::with_thread_locals(|| { - unsafe { - tls::with_global(|tcx| deadlock(tcx, ®istry)) - } + tls::with_global(|tcx| deadlock(tcx, ®istry)) }) }) }) @@ -497,7 +497,7 @@ fn deadlock(tcx: TyCtxt<'_, '_, '_>, registry: &rayon_core::Registry) { }); let mut wakelist = Vec::new(); - let mut jobs: Vec<_> = tcx.maps.collect_active_jobs().iter().map(|j| &**j as Ref).collect(); + let mut jobs: Vec<_> = tcx.maps.collect_active_jobs(); let mut found_cycle = false; diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index c2a450a112292..37f8bff964f5b 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -85,9 +85,10 @@ pub fn spawn_thread_pool R + sync::Send, R: sync:: let gcx_ptr = &Lock::new(0); - let config = ThreadPoolBuilder::new().num_threads(Session::query_threads_from_opts(&opts)) - .deadlock_handler(ty::maps::handle_deadlock) - .stack_size(16 * 1024 * 1024); + let config = ThreadPoolBuilder::new() + .num_threads(Session::query_threads_from_opts(&opts)) + .deadlock_handler(|| unsafe { ty::maps::handle_deadlock() }) + .stack_size(16 * 1024 * 1024); let with_pool = move |pool: &ThreadPool| { pool.install(move || f(opts)) From f273f285b887e84471eaae5d841e125eec197186 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20K=C3=A5re=20Alsaker?= Date: Wed, 6 Jun 2018 15:25:47 +0200 Subject: [PATCH 11/11] Update Cargo --- src/Cargo.lock | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/src/Cargo.lock b/src/Cargo.lock index d17faef82b563..ed6f51093a757 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -1786,12 +1786,16 @@ dependencies = [ "jobserver 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", "polonius-engine 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "proc_macro 0.0.0", + "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_apfloat 0.0.0", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", "rustc_target 0.0.0", + "scoped-tls 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "serialize 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", @@ -1828,7 +1832,7 @@ dependencies = [ "rustc-ap-rustc_cratesio_shim 147.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-ap-serialize 147.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-rayon 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1911,16 +1915,16 @@ dependencies = [ [[package]] name = "rustc-rayon" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-rayon-core 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rustc-rayon-core" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2044,8 +2048,8 @@ dependencies = [ "parking_lot 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot_core 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-rayon 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-rayon-core 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_cratesio_shim 0.0.0", "serialize 0.0.0", "stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2061,7 +2065,7 @@ dependencies = [ "graphviz 0.0.0", "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", - "rustc-rayon 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_allocator 0.0.0", "rustc_borrowck 0.0.0", "rustc_codegen_utils 0.0.0", @@ -3215,8 +3219,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum rustc-ap-syntax_pos 147.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fb707a229093791dc3fc35aca61d9bf0e3708f23da4536683527857bc624b061" "checksum rustc-demangle 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "11fb43a206a04116ffd7cfcf9bcb941f8eb6cc7ff667272246b0a1c74259a3cb" "checksum rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7540fc8b0c49f096ee9c961cda096467dce8084bec6bdca2fc83895fd9b28cb8" -"checksum rustc-rayon 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b1aa5cd8c3a706edb19b6ec6aa7b056bdc635b6e99c5cf7014f9af9d92f15e99" -"checksum rustc-rayon-core 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d69983f8613a9c3ba1a3bbf5e8bdf2fd5c42317b1d8dd8623ca8030173bf8a6b" +"checksum rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c6d5a683c6ba4ed37959097e88d71c9e8e26659a3cb5be8b389078e7ad45306" +"checksum rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "40f06724db71e18d68b3b946fdf890ca8c921d9edccc1404fdfdb537b0d12649" "checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" "checksum rustc_version 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a54aa04a10c68c1c4eacb4337fd883b435997ede17a9385784b990777686b09a" "checksum rustfix 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9da3cf9b79dc889a2c9879643f26d7a53e37e9361c7566b7d2787d5ace0d8396"