diff --git a/src/librustc/dep_graph/graph.rs b/src/librustc/dep_graph/graph.rs index 32f40367faba4..96d6b0f79cfff 100644 --- a/src/librustc/dep_graph/graph.rs +++ b/src/librustc/dep_graph/graph.rs @@ -461,10 +461,10 @@ impl DepGraph { self.data.as_ref().and_then(|data| data.colors.borrow().get(dep_node).cloned()) } - pub fn try_mark_green(&self, - tcx: TyCtxt, - dep_node: &DepNode) - -> Option { + pub fn try_mark_green<'tcx>(&self, + tcx: TyCtxt<'_, 'tcx, 'tcx>, + dep_node: &DepNode) + -> Option { debug!("try_mark_green({:?}) - BEGIN", dep_node); let data = self.data.as_ref().unwrap(); @@ -621,7 +621,7 @@ impl DepGraph { // ... emitting any stored diagnostic ... { let diagnostics = tcx.on_disk_query_result_cache - .load_diagnostics(prev_dep_node_index); + .load_diagnostics(tcx, prev_dep_node_index); if diagnostics.len() > 0 { let handle = tcx.sess.diagnostic(); diff --git a/src/librustc/ich/hcx.rs b/src/librustc/ich/hcx.rs index f204d352842bf..d95b825b9e562 100644 --- a/src/librustc/ich/hcx.rs +++ b/src/librustc/ich/hcx.rs @@ -28,7 +28,7 @@ use syntax::attr; use syntax::codemap::CodeMap; use syntax::ext::hygiene::SyntaxContext; use syntax::symbol::Symbol; -use syntax_pos::Span; +use syntax_pos::{Span, DUMMY_SP}; use rustc_data_structures::stable_hasher::{HashStable, StableHashingContextProvider, StableHasher, StableHasherResult, @@ -362,64 +362,53 @@ impl<'gcx> HashStable> for Span { fn hash_stable(&self, hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { - use syntax_pos::Pos; + const TAG_VALID_SPAN: u8 = 0; + const TAG_INVALID_SPAN: u8 = 1; + const TAG_EXPANSION: u8 = 0; + const TAG_NO_EXPANSION: u8 = 1; if !hcx.hash_spans { return } + if *self == DUMMY_SP { + return std_hash::Hash::hash(&TAG_INVALID_SPAN, hasher); + } + // If this is not an empty or invalid span, we want to hash the last // position that belongs to it, as opposed to hashing the first // position past it. let span = self.data(); - let span_hi = if span.hi > span.lo { - // We might end up in the middle of a multibyte character here, - // but that's OK, since we are not trying to decode anything at - // this position. - span.hi - ::syntax_pos::BytePos(1) - } else { - span.hi - }; - { - let loc1 = hcx.codemap().byte_pos_to_line_and_col(span.lo); - let loc1 = loc1.as_ref() - .map(|&(ref fm, line, col)| (&fm.name[..], line, col.to_usize())) - .unwrap_or(("???", 0, 0)); - - let loc2 = hcx.codemap().byte_pos_to_line_and_col(span_hi); - let loc2 = loc2.as_ref() - .map(|&(ref fm, line, col)| (&fm.name[..], line, col.to_usize())) - .unwrap_or(("???", 0, 0)); - - if loc1.0 == loc2.0 { - std_hash::Hash::hash(&0u8, hasher); - - std_hash::Hash::hash(loc1.0, hasher); - std_hash::Hash::hash(&loc1.1, hasher); - std_hash::Hash::hash(&loc1.2, hasher); - - // Do not hash the file name twice - std_hash::Hash::hash(&loc2.1, hasher); - std_hash::Hash::hash(&loc2.2, hasher); - } else { - std_hash::Hash::hash(&1u8, hasher); - - std_hash::Hash::hash(loc1.0, hasher); - std_hash::Hash::hash(&loc1.1, hasher); - std_hash::Hash::hash(&loc1.2, hasher); - - std_hash::Hash::hash(loc2.0, hasher); - std_hash::Hash::hash(&loc2.1, hasher); - std_hash::Hash::hash(&loc2.2, hasher); + if span.hi < span.lo { + return std_hash::Hash::hash(&TAG_INVALID_SPAN, hasher); + } + + let (file_lo, line_lo, col_lo) = match hcx.codemap() + .byte_pos_to_line_and_col(span.lo) { + Some(pos) => pos, + None => { + return std_hash::Hash::hash(&TAG_INVALID_SPAN, hasher); } + }; + + if !file_lo.contains(span.hi) { + return std_hash::Hash::hash(&TAG_INVALID_SPAN, hasher); } + let len = span.hi - span.lo; + + std_hash::Hash::hash(&TAG_VALID_SPAN, hasher); + std_hash::Hash::hash(&file_lo.name, hasher); + std_hash::Hash::hash(&line_lo, hasher); + std_hash::Hash::hash(&col_lo, hasher); + std_hash::Hash::hash(&len, hasher); + if span.ctxt == SyntaxContext::empty() { - 0u8.hash_stable(hcx, hasher); + TAG_NO_EXPANSION.hash_stable(hcx, hasher); } else { - 1u8.hash_stable(hcx, hasher); - self.source_callsite().hash_stable(hcx, hasher); + TAG_EXPANSION.hash_stable(hcx, hasher); + span.ctxt.outer().expn_info().hash_stable(hcx, hasher); } } } diff --git a/src/librustc/ich/impls_syntax.rs b/src/librustc/ich/impls_syntax.rs index fea4e283db13f..c414349c8ffd6 100644 --- a/src/librustc/ich/impls_syntax.rs +++ b/src/librustc/ich/impls_syntax.rs @@ -347,6 +347,30 @@ impl_stable_hash_for!(enum ::syntax::ast::MetaItemKind { NameValue(lit) }); +impl_stable_hash_for!(struct ::syntax_pos::hygiene::ExpnInfo { + call_site, + callee +}); + +impl_stable_hash_for!(struct ::syntax_pos::hygiene::NameAndSpan { + format, + allow_internal_unstable, + allow_internal_unsafe, + span +}); + +impl_stable_hash_for!(enum ::syntax_pos::hygiene::ExpnFormat { + MacroAttribute(sym), + MacroBang(sym), + CompilerDesugaring(kind) +}); + +impl_stable_hash_for!(enum ::syntax_pos::hygiene::CompilerDesugaringKind { + BackArrow, + DotFill, + QuestionMark +}); + impl<'gcx> HashStable> for FileMap { fn hash_stable(&self, hcx: &mut StableHashingContext<'gcx>, diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index 385cf0ecd8512..0ab769d4fe307 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -1235,7 +1235,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { -> Result<(), E::Error> where E: ty::codec::TyEncoder { - self.on_disk_query_result_cache.serialize(self.global_tcx(), self.cstore, encoder) + self.on_disk_query_result_cache.serialize(self.global_tcx(), encoder) } } diff --git a/src/librustc/ty/maps/config.rs b/src/librustc/ty/maps/config.rs index 066b80cefa4b5..496284ad9c9f4 100644 --- a/src/librustc/ty/maps/config.rs +++ b/src/librustc/ty/maps/config.rs @@ -31,9 +31,9 @@ pub(super) trait QueryDescription<'tcx>: QueryConfig { false } - fn load_from_disk<'a>(_: TyCtxt<'a, 'tcx, 'tcx>, + fn try_load_from_disk(_: TyCtxt<'_, 'tcx, 'tcx>, _: SerializedDepNodeIndex) - -> Self::Value { + -> Option { bug!("QueryDescription::load_from_disk() called for unsupport query.") } } @@ -556,12 +556,14 @@ impl<'tcx> QueryDescription<'tcx> for queries::typeck_tables_of<'tcx> { def_id.is_local() } - fn load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + fn try_load_from_disk(tcx: TyCtxt<'_, 'tcx, 'tcx>, id: SerializedDepNodeIndex) - -> Self::Value { - let typeck_tables: ty::TypeckTables<'tcx> = tcx.on_disk_query_result_cache - .load_query_result(tcx, id); - tcx.alloc_tables(typeck_tables) + -> Option { + let typeck_tables: Option> = tcx + .on_disk_query_result_cache + .try_load_query_result(tcx, id); + + typeck_tables.map(|tables| tcx.alloc_tables(tables)) } } diff --git a/src/librustc/ty/maps/on_disk_cache.rs b/src/librustc/ty/maps/on_disk_cache.rs index 01f2374033da8..8dc9b0877a01c 100644 --- a/src/librustc/ty/maps/on_disk_cache.rs +++ b/src/librustc/ty/maps/on_disk_cache.rs @@ -14,7 +14,7 @@ use hir; use hir::def_id::{CrateNum, DefIndex, DefId, LocalDefId, RESERVED_FOR_INCR_COMP_CACHE, LOCAL_CRATE}; use hir::map::definitions::DefPathHash; -use middle::cstore::CrateStore; +use ich::CachingCodemapView; use mir; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; @@ -23,23 +23,28 @@ use rustc_serialize::{Decodable, Decoder, Encodable, Encoder, opaque, UseSpecializedDecodable, UseSpecializedEncodable}; use session::{CrateDisambiguator, Session}; use std::cell::RefCell; -use std::collections::BTreeMap; use std::mem; +use std::rc::Rc; use syntax::ast::NodeId; use syntax::codemap::{CodeMap, StableFilemapId}; -use syntax_pos::{BytePos, Span, NO_EXPANSION, DUMMY_SP}; +use syntax_pos::{BytePos, Span, DUMMY_SP, FileMap}; +use syntax_pos::hygiene::{Mark, SyntaxContext, ExpnInfo}; use ty; use ty::codec::{self as ty_codec, TyDecoder, TyEncoder}; use ty::context::TyCtxt; -// Some magic values used for verifying that encoding and decoding. These are -// basically random numbers. -const PREV_DIAGNOSTICS_TAG: u64 = 0x1234_5678_A1A1_A1A1; -const QUERY_RESULT_INDEX_TAG: u64 = 0x1234_5678_C3C3_C3C3; +const TAG_FILE_FOOTER: u128 = 0xC0FFEE_C0FFEE_C0FFEE_C0FFEE_C0FFEE; const TAG_CLEAR_CROSS_CRATE_CLEAR: u8 = 0; const TAG_CLEAR_CROSS_CRATE_SET: u8 = 1; +const TAG_NO_EXPANSION_INFO: u8 = 0; +const TAG_EXPANSION_INFO_SHORTHAND: u8 = 1; +const TAG_EXPANSION_INFO_INLINE: u8 = 2; + +const TAG_VALID_SPAN: u8 = 0; +const TAG_INVALID_SPAN: u8 = 1; + /// `OnDiskCache` provides an interface to incr. comp. data cached from the /// previous compilation session. This data will eventually include the results /// of a few selected queries (like `typeck_tables_of` and `mir_optimized`) and @@ -49,9 +54,6 @@ pub struct OnDiskCache<'sess> { // The complete cache data in serialized form. serialized_data: Vec, - // The diagnostics emitted during the previous compilation session. - prev_diagnostics: FxHashMap>, - // This field collects all Diagnostics emitted during the current // compilation session. current_diagnostics: RefCell>>, @@ -59,101 +61,105 @@ pub struct OnDiskCache<'sess> { prev_cnums: Vec<(u32, String, CrateDisambiguator)>, cnum_map: RefCell>>>, - prev_filemap_starts: BTreeMap, codemap: &'sess CodeMap, + file_index_to_stable_id: FxHashMap, + + // These two fields caches that are populated lazily during decoding. + file_index_to_file: RefCell>>, + synthetic_expansion_infos: RefCell>, // A map from dep-node to the position of the cached query result in // `serialized_data`. - query_result_index: FxHashMap, + query_result_index: FxHashMap, + + // A map from dep-node to the position of any associated diagnostics in + // `serialized_data`. + prev_diagnostics_index: FxHashMap, } // This type is used only for (de-)serialization. #[derive(RustcEncodable, RustcDecodable)] -struct Header { - prev_filemap_starts: BTreeMap, +struct Footer { + file_index_to_stable_id: FxHashMap, prev_cnums: Vec<(u32, String, CrateDisambiguator)>, + query_result_index: EncodedQueryResultIndex, + diagnostics_index: EncodedQueryResultIndex, } -type EncodedPrevDiagnostics = Vec<(SerializedDepNodeIndex, Vec)>; -type EncodedQueryResultIndex = Vec<(SerializedDepNodeIndex, usize)>; +type EncodedQueryResultIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>; +type EncodedDiagnosticsIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>; +type EncodedDiagnostics = Vec; + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +struct FileMapIndex(u32); + +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, RustcEncodable, RustcDecodable)] +struct AbsoluteBytePos(u32); + +impl AbsoluteBytePos { + fn new(pos: usize) -> AbsoluteBytePos { + debug_assert!(pos <= ::std::u32::MAX as usize); + AbsoluteBytePos(pos as u32) + } + + fn to_usize(self) -> usize { + self.0 as usize + } +} impl<'sess> OnDiskCache<'sess> { /// Create a new OnDiskCache instance from the serialized data in `data`. pub fn new(sess: &'sess Session, data: Vec, start_pos: usize) -> OnDiskCache<'sess> { debug_assert!(sess.opts.incremental.is_some()); - // Decode the header - let (header, post_header_pos) = { + // Wrapping in a scope so we can borrow `data` + let footer: Footer = { let mut decoder = opaque::Decoder::new(&data[..], start_pos); - let header = Header::decode(&mut decoder) - .expect("Error while trying to decode incr. comp. cache header."); - (header, decoder.position()) - }; - let (prev_diagnostics, query_result_index) = { - let mut decoder = CacheDecoder { - tcx: None, - opaque: opaque::Decoder::new(&data[..], post_header_pos), - codemap: sess.codemap(), - prev_filemap_starts: &header.prev_filemap_starts, - cnum_map: &IndexVec::new(), - }; - - // Decode Diagnostics - let prev_diagnostics: FxHashMap<_, _> = { - let diagnostics: EncodedPrevDiagnostics = - decode_tagged(&mut decoder, PREV_DIAGNOSTICS_TAG) - .expect("Error while trying to decode previous session \ - diagnostics from incr. comp. cache."); - diagnostics.into_iter().collect() - }; - - // Decode the *position* of the query result index - let query_result_index_pos = { - let pos_pos = data.len() - IntEncodedWithFixedSize::ENCODED_SIZE; - decoder.with_position(pos_pos, |decoder| { - IntEncodedWithFixedSize::decode(decoder) - }).expect("Error while trying to decode query result index position.") - .0 as usize - }; - - // Decode the query result index itself - let query_result_index: EncodedQueryResultIndex = - decoder.with_position(query_result_index_pos, |decoder| { - decode_tagged(decoder, QUERY_RESULT_INDEX_TAG) - }).expect("Error while trying to decode query result index."); - - (prev_diagnostics, query_result_index) + // Decode the *position* of the footer which can be found in the + // last 8 bytes of the file. + decoder.set_position(data.len() - IntEncodedWithFixedSize::ENCODED_SIZE); + let query_result_index_pos = IntEncodedWithFixedSize::decode(&mut decoder) + .expect("Error while trying to decode query result index position.") + .0 as usize; + + // Decoder the file footer which contains all the lookup tables, etc. + decoder.set_position(query_result_index_pos); + decode_tagged(&mut decoder, TAG_FILE_FOOTER) + .expect("Error while trying to decode query result index position.") }; OnDiskCache { serialized_data: data, - prev_diagnostics, - prev_filemap_starts: header.prev_filemap_starts, - prev_cnums: header.prev_cnums, + file_index_to_stable_id: footer.file_index_to_stable_id, + file_index_to_file: RefCell::new(FxHashMap()), + prev_cnums: footer.prev_cnums, cnum_map: RefCell::new(None), codemap: sess.codemap(), current_diagnostics: RefCell::new(FxHashMap()), - query_result_index: query_result_index.into_iter().collect(), + query_result_index: footer.query_result_index.into_iter().collect(), + prev_diagnostics_index: footer.diagnostics_index.into_iter().collect(), + synthetic_expansion_infos: RefCell::new(FxHashMap()), } } pub fn new_empty(codemap: &'sess CodeMap) -> OnDiskCache<'sess> { OnDiskCache { serialized_data: Vec::new(), - prev_diagnostics: FxHashMap(), - prev_filemap_starts: BTreeMap::new(), + file_index_to_stable_id: FxHashMap(), + file_index_to_file: RefCell::new(FxHashMap()), prev_cnums: vec![], cnum_map: RefCell::new(None), codemap, current_diagnostics: RefCell::new(FxHashMap()), query_result_index: FxHashMap(), + prev_diagnostics_index: FxHashMap(), + synthetic_expansion_infos: RefCell::new(FxHashMap()), } } pub fn serialize<'a, 'tcx, E>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - cstore: &CrateStore, encoder: &mut E) -> Result<(), E::Error> where E: ty_codec::TyEncoder @@ -161,46 +167,31 @@ impl<'sess> OnDiskCache<'sess> { // Serializing the DepGraph should not modify it: let _in_ignore = tcx.dep_graph.in_ignore(); + // Allocate FileMapIndices + let (file_to_file_index, file_index_to_stable_id) = { + let mut file_to_file_index = FxHashMap(); + let mut file_index_to_stable_id = FxHashMap(); + + for (index, file) in tcx.sess.codemap().files().iter().enumerate() { + let index = FileMapIndex(index as u32); + let file_ptr: *const FileMap = &**file as *const _; + file_to_file_index.insert(file_ptr, index); + file_index_to_stable_id.insert(index, StableFilemapId::new(&file)); + } + + (file_to_file_index, file_index_to_stable_id) + }; + let mut encoder = CacheEncoder { tcx, encoder, type_shorthands: FxHashMap(), predicate_shorthands: FxHashMap(), + expn_info_shorthands: FxHashMap(), + codemap: CachingCodemapView::new(tcx.sess.codemap()), + file_to_file_index, }; - - // Encode the file header - let prev_filemap_starts: BTreeMap<_, _> = self - .codemap - .files() - .iter() - .map(|fm| (fm.start_pos, StableFilemapId::new(fm))) - .collect(); - - let sorted_cnums = sorted_cnums_including_local_crate(cstore); - - let prev_cnums: Vec<_> = sorted_cnums.iter().map(|&cnum| { - let crate_name = tcx.original_crate_name(cnum).as_str().to_string(); - let crate_disambiguator = tcx.crate_disambiguator(cnum); - (cnum.as_u32(), crate_name, crate_disambiguator) - }).collect(); - - Header { - prev_filemap_starts, - prev_cnums, - }.encode(&mut encoder)?; - - - // Encode Diagnostics - let diagnostics: EncodedPrevDiagnostics = - self.current_diagnostics - .borrow() - .iter() - .map(|(k, v)| (SerializedDepNodeIndex::new(k.index()), v.clone())) - .collect(); - - encoder.encode_tagged(PREV_DIAGNOSTICS_TAG, &diagnostics)?; - // Load everything into memory so we can write it out to the on-disk // cache. The vast majority of cacheable query results should already // be in memory, so this should be a cheap operation. @@ -218,19 +209,53 @@ impl<'sess> OnDiskCache<'sess> { encode_query_results::(tcx, enc, qri)?; } - // Encode query result index - let query_result_index_pos = encoder.position() as u64; - encoder.encode_tagged(QUERY_RESULT_INDEX_TAG, &query_result_index)?; + // Encode diagnostics + let diagnostics_index = { + let mut diagnostics_index = EncodedDiagnosticsIndex::new(); + + for (dep_node_index, diagnostics) in self.current_diagnostics + .borrow() + .iter() { + let pos = AbsoluteBytePos::new(encoder.position()); + // Let's make sure we get the expected type here: + let diagnostics: &EncodedDiagnostics = diagnostics; + let dep_node_index = + SerializedDepNodeIndex::new(dep_node_index.index()); + encoder.encode_tagged(dep_node_index, diagnostics)?; + diagnostics_index.push((dep_node_index, pos)); + } - // Encode the position of the query result index as the last 8 bytes of + diagnostics_index + }; + + let sorted_cnums = sorted_cnums_including_local_crate(tcx); + let prev_cnums: Vec<_> = sorted_cnums.iter().map(|&cnum| { + let crate_name = tcx.original_crate_name(cnum).as_str().to_string(); + let crate_disambiguator = tcx.crate_disambiguator(cnum); + (cnum.as_u32(), crate_name, crate_disambiguator) + }).collect(); + + // Encode the file footer + let footer_pos = encoder.position() as u64; + encoder.encode_tagged(TAG_FILE_FOOTER, &Footer { + file_index_to_stable_id, + prev_cnums, + query_result_index, + diagnostics_index, + })?; + + // Encode the position of the footer as the last 8 bytes of the // file so we know where to look for it. - IntEncodedWithFixedSize(query_result_index_pos).encode(&mut encoder)?; + IntEncodedWithFixedSize(footer_pos).encode(encoder.encoder)?; + + // DO NOT WRITE ANYTHING TO THE ENCODER AFTER THIS POINT! The address + // of the footer must be the last thing in the data stream. return Ok(()); - fn sorted_cnums_including_local_crate(cstore: &CrateStore) -> Vec { + fn sorted_cnums_including_local_crate(tcx: TyCtxt) -> Vec { let mut cnums = vec![LOCAL_CRATE]; - cnums.extend_from_slice(&cstore.crates_untracked()[..]); + cnums.extend_from_slice(&tcx.crates()[..]); cnums.sort_unstable(); // Just to be sure... cnums.dedup(); @@ -239,10 +264,17 @@ impl<'sess> OnDiskCache<'sess> { } /// Load a diagnostic emitted during the previous compilation session. - pub fn load_diagnostics(&self, - dep_node_index: SerializedDepNodeIndex) - -> Vec { - self.prev_diagnostics.get(&dep_node_index).cloned().unwrap_or(vec![]) + pub fn load_diagnostics<'a, 'tcx>(&self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + dep_node_index: SerializedDepNodeIndex) + -> Vec { + let diagnostics: Option = self.load_indexed( + tcx, + dep_node_index, + &self.prev_diagnostics_index, + "diagnostics"); + + diagnostics.unwrap_or(Vec::new()) } /// Store a diagnostic emitted during the current compilation session. @@ -256,53 +288,79 @@ impl<'sess> OnDiskCache<'sess> { debug_assert!(prev.is_none()); } - pub fn load_query_result<'a, 'tcx, T>(&self, - tcx: TyCtxt<'a, 'tcx, 'tcx>, + /// Returns the cached query result if there is something in the cache for + /// the given SerializedDepNodeIndex. Otherwise returns None. + pub fn try_load_query_result<'tcx, T>(&self, + tcx: TyCtxt<'_, 'tcx, 'tcx>, dep_node_index: SerializedDepNodeIndex) - -> T + -> Option + where T: Decodable + { + self.load_indexed(tcx, + dep_node_index, + &self.query_result_index, + "query result") + } + + /// Store a diagnostic emitted during computation of an anonymous query. + /// Since many anonymous queries can share the same `DepNode`, we aggregate + /// them -- as opposed to regular queries where we assume that there is a + /// 1:1 relationship between query-key and `DepNode`. + pub fn store_diagnostics_for_anon_node(&self, + dep_node_index: DepNodeIndex, + mut diagnostics: Vec) { + let mut current_diagnostics = self.current_diagnostics.borrow_mut(); + + let x = current_diagnostics.entry(dep_node_index).or_insert_with(|| { + mem::replace(&mut diagnostics, Vec::new()) + }); + + x.extend(diagnostics.into_iter()); + } + + fn load_indexed<'tcx, T>(&self, + tcx: TyCtxt<'_, 'tcx, 'tcx>, + dep_node_index: SerializedDepNodeIndex, + index: &FxHashMap, + debug_tag: &'static str) + -> Option where T: Decodable { - let pos = self.query_result_index[&dep_node_index]; + let pos = if let Some(&pos) = index.get(&dep_node_index) { + pos + } else { + return None + }; let mut cnum_map = self.cnum_map.borrow_mut(); if cnum_map.is_none() { *cnum_map = Some(Self::compute_cnum_map(tcx, &self.prev_cnums[..])); } + let mut synthetic_expansion_infos = self.synthetic_expansion_infos.borrow_mut(); + let mut file_index_to_file = self.file_index_to_file.borrow_mut(); + let mut decoder = CacheDecoder { - tcx: Some(tcx), - opaque: opaque::Decoder::new(&self.serialized_data[..], pos), + tcx, + opaque: opaque::Decoder::new(&self.serialized_data[..], pos.to_usize()), codemap: self.codemap, - prev_filemap_starts: &self.prev_filemap_starts, cnum_map: cnum_map.as_ref().unwrap(), + file_index_to_file: &mut file_index_to_file, + file_index_to_stable_id: &self.file_index_to_stable_id, + synthetic_expansion_infos: &mut synthetic_expansion_infos, }; match decode_tagged(&mut decoder, dep_node_index) { Ok(value) => { - value + Some(value) } Err(e) => { - bug!("Could not decode cached query result: {}", e) + bug!("Could not decode cached {}: {}", debug_tag, e) } } } - /// Store a diagnostic emitted during computation of an anonymous query. - /// Since many anonymous queries can share the same `DepNode`, we aggregate - /// them -- as opposed to regular queries where we assume that there is a - /// 1:1 relationship between query-key and `DepNode`. - pub fn store_diagnostics_for_anon_node(&self, - dep_node_index: DepNodeIndex, - mut diagnostics: Vec) { - let mut current_diagnostics = self.current_diagnostics.borrow_mut(); - - let x = current_diagnostics.entry(dep_node_index).or_insert_with(|| { - mem::replace(&mut diagnostics, Vec::new()) - }); - - x.extend(diagnostics.into_iter()); - } - // This function builds mapping from previous-session-CrateNum to // current-session-CrateNum. There might be CrateNums from the previous // Session that don't occur in the current one. For these, the mapping @@ -345,22 +403,45 @@ impl<'sess> OnDiskCache<'sess> { /// we use for crate metadata decoding in that it can rebase spans and /// eventually will also handle things that contain `Ty` instances. struct CacheDecoder<'a, 'tcx: 'a, 'x> { - tcx: Option>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, opaque: opaque::Decoder<'x>, codemap: &'x CodeMap, - prev_filemap_starts: &'x BTreeMap, cnum_map: &'x IndexVec>, + synthetic_expansion_infos: &'x mut FxHashMap, + file_index_to_file: &'x mut FxHashMap>, + file_index_to_stable_id: &'x FxHashMap, } impl<'a, 'tcx, 'x> CacheDecoder<'a, 'tcx, 'x> { - fn find_filemap_prev_bytepos(&self, - prev_bytepos: BytePos) - -> Option<(BytePos, StableFilemapId)> { - for (start, id) in self.prev_filemap_starts.range(BytePos(0) ..= prev_bytepos).rev() { - return Some((*start, *id)) - } + fn file_index_to_file(&mut self, index: FileMapIndex) -> Rc { + let CacheDecoder { + ref mut file_index_to_file, + ref file_index_to_stable_id, + ref codemap, + .. + } = *self; - None + file_index_to_file.entry(index).or_insert_with(|| { + let stable_id = file_index_to_stable_id[&index]; + codemap.filemap_by_stable_id(stable_id) + .expect("Failed to lookup FileMap in new context.") + }).clone() + } +} + +trait DecoderWithPosition: Decoder { + fn position(&self) -> usize; +} + +impl<'enc> DecoderWithPosition for opaque::Decoder<'enc> { + fn position(&self) -> usize { + self.position() + } +} + +impl<'a, 'tcx, 'x> DecoderWithPosition for CacheDecoder<'a, 'tcx, 'x> { + fn position(&self) -> usize { + self.opaque.position() } } @@ -371,7 +452,7 @@ fn decode_tagged<'a, 'tcx, D, T, V>(decoder: &mut D, -> Result where T: Decodable + Eq + ::std::fmt::Debug, V: Decodable, - D: Decoder + ty_codec::TyDecoder<'a, 'tcx>, + D: DecoderWithPosition, 'tcx: 'a, { let start_pos = decoder.position(); @@ -392,7 +473,7 @@ impl<'a, 'tcx: 'a, 'x> ty_codec::TyDecoder<'a, 'tcx> for CacheDecoder<'a, 'tcx, #[inline] fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { - self.tcx.expect("missing TyCtxt in CacheDecoder") + self.tcx } #[inline] @@ -450,18 +531,55 @@ implement_ty_decoder!( CacheDecoder<'a, 'tcx, 'x> ); impl<'a, 'tcx, 'x> SpecializedDecoder for CacheDecoder<'a, 'tcx, 'x> { fn specialized_decode(&mut self) -> Result { - let lo = BytePos::decode(self)?; - let hi = BytePos::decode(self)?; - - if let Some((prev_filemap_start, filemap_id)) = self.find_filemap_prev_bytepos(lo) { - if let Some(current_filemap) = self.codemap.filemap_by_stable_id(filemap_id) { - let lo = (lo + current_filemap.start_pos) - prev_filemap_start; - let hi = (hi + current_filemap.start_pos) - prev_filemap_start; - return Ok(Span::new(lo, hi, NO_EXPANSION)); - } + let tag: u8 = Decodable::decode(self)?; + + if tag == TAG_INVALID_SPAN { + return Ok(DUMMY_SP); + } else { + debug_assert_eq!(tag, TAG_VALID_SPAN); } - Ok(DUMMY_SP) + let file_lo_index = FileMapIndex::decode(self)?; + let line_lo = usize::decode(self)?; + let col_lo = BytePos::decode(self)?; + let len = BytePos::decode(self)?; + + let file_lo = self.file_index_to_file(file_lo_index); + let lo = file_lo.lines.borrow()[line_lo - 1] + col_lo; + let hi = lo + len; + + let expn_info_tag = u8::decode(self)?; + + let ctxt = match expn_info_tag { + TAG_NO_EXPANSION_INFO => { + SyntaxContext::empty() + } + TAG_EXPANSION_INFO_INLINE => { + let pos = AbsoluteBytePos::new(self.opaque.position()); + let expn_info: ExpnInfo = Decodable::decode(self)?; + let ctxt = SyntaxContext::allocate_directly(expn_info); + self.synthetic_expansion_infos.insert(pos, ctxt); + ctxt + } + TAG_EXPANSION_INFO_SHORTHAND => { + let pos = AbsoluteBytePos::decode(self)?; + if let Some(ctxt) = self.synthetic_expansion_infos.get(&pos).cloned() { + ctxt + } else { + let expn_info = self.with_position(pos.to_usize(), |this| { + ExpnInfo::decode(this) + })?; + let ctxt = SyntaxContext::allocate_directly(expn_info); + self.synthetic_expansion_infos.insert(pos, ctxt); + ctxt + } + } + _ => { + unreachable!() + } + }; + + Ok(Span::new(lo, hi, ctxt)) } } @@ -479,6 +597,7 @@ impl<'a, 'tcx, 'x> SpecializedDecoder for CacheDecoder<'a, 'tcx, 'x> { // compilation sessions. We use the DefPathHash, which is stable across // sessions, to map the old DefId to the new one. impl<'a, 'tcx, 'x> SpecializedDecoder for CacheDecoder<'a, 'tcx, 'x> { + #[inline] fn specialized_decode(&mut self) -> Result { // Load the DefPathHash which is was we encoded the DefId as. let def_path_hash = DefPathHash::decode(self)?; @@ -489,6 +608,7 @@ impl<'a, 'tcx, 'x> SpecializedDecoder for CacheDecoder<'a, 'tcx, 'x> { } impl<'a, 'tcx, 'x> SpecializedDecoder for CacheDecoder<'a, 'tcx, 'x> { + #[inline] fn specialized_decode(&mut self) -> Result { Ok(LocalDefId::from_def_id(DefId::decode(self)?)) } @@ -558,11 +678,18 @@ struct CacheEncoder<'enc, 'a, 'tcx, E> encoder: &'enc mut E, type_shorthands: FxHashMap, usize>, predicate_shorthands: FxHashMap, usize>, + expn_info_shorthands: FxHashMap, + codemap: CachingCodemapView<'tcx>, + file_to_file_index: FxHashMap<*const FileMap, FileMapIndex>, } impl<'enc, 'a, 'tcx, E> CacheEncoder<'enc, 'a, 'tcx, E> where E: 'enc + ty_codec::TyEncoder { + fn filemap_index(&mut self, filemap: Rc) -> FileMapIndex { + self.file_to_file_index[&(&*filemap as *const FileMap)] + } + /// Encode something with additional information that allows to do some /// sanity checks when decoding the data again. This method will first /// encode the specified tag, then the given value, then the number of @@ -584,6 +711,65 @@ impl<'enc, 'a, 'tcx, E> CacheEncoder<'enc, 'a, 'tcx, E> } } +impl<'enc, 'a, 'tcx, E> SpecializedEncoder for CacheEncoder<'enc, 'a, 'tcx, E> + where E: 'enc + ty_codec::TyEncoder +{ + fn specialized_encode(&mut self, span: &Span) -> Result<(), Self::Error> { + + if *span == DUMMY_SP { + return TAG_INVALID_SPAN.encode(self); + } + + let span_data = span.data(); + + if span_data.hi < span_data.lo { + return TAG_INVALID_SPAN.encode(self); + } + + let (file_lo, line_lo, col_lo) = match self.codemap + .byte_pos_to_line_and_col(span_data.lo) { + Some(pos) => pos, + None => { + return TAG_INVALID_SPAN.encode(self); + } + }; + + if !file_lo.contains(span_data.hi) { + return TAG_INVALID_SPAN.encode(self); + } + + let len = span_data.hi - span_data.lo; + + let filemap_index = self.filemap_index(file_lo); + + TAG_VALID_SPAN.encode(self)?; + filemap_index.encode(self)?; + line_lo.encode(self)?; + col_lo.encode(self)?; + len.encode(self)?; + + if span_data.ctxt == SyntaxContext::empty() { + TAG_NO_EXPANSION_INFO.encode(self) + } else { + let mark = span_data.ctxt.outer(); + + if let Some(expn_info) = mark.expn_info() { + if let Some(pos) = self.expn_info_shorthands.get(&mark).cloned() { + TAG_EXPANSION_INFO_SHORTHAND.encode(self)?; + pos.encode(self) + } else { + TAG_EXPANSION_INFO_INLINE.encode(self)?; + let pos = AbsoluteBytePos::new(self.position()); + self.expn_info_shorthands.insert(mark, pos); + expn_info.encode(self) + } + } else { + TAG_NO_EXPANSION_INFO.encode(self) + } + } + } +} + impl<'enc, 'a, 'tcx, E> ty_codec::TyEncoder for CacheEncoder<'enc, 'a, 'tcx, E> where E: 'enc + ty_codec::TyEncoder { @@ -753,10 +939,7 @@ impl IntEncodedWithFixedSize { impl UseSpecializedEncodable for IntEncodedWithFixedSize {} impl UseSpecializedDecodable for IntEncodedWithFixedSize {} -impl<'enc, 'a, 'tcx, E> SpecializedEncoder -for CacheEncoder<'enc, 'a, 'tcx, E> - where E: 'enc + ty_codec::TyEncoder -{ +impl<'enc> SpecializedEncoder for opaque::Encoder<'enc> { fn specialized_encode(&mut self, x: &IntEncodedWithFixedSize) -> Result<(), Self::Error> { let start_pos = self.position(); for i in 0 .. IntEncodedWithFixedSize::ENCODED_SIZE { @@ -768,8 +951,7 @@ for CacheEncoder<'enc, 'a, 'tcx, E> } } -impl<'a, 'tcx, 'x> SpecializedDecoder -for CacheDecoder<'a, 'tcx, 'x> { +impl<'enc> SpecializedDecoder for opaque::Decoder<'enc> { fn specialized_decode(&mut self) -> Result { let mut value: u64 = 0; let start_pos = self.position(); @@ -799,7 +981,7 @@ fn encode_query_results<'enc, 'a, 'tcx, Q, E>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let dep_node = SerializedDepNodeIndex::new(entry.index.index()); // Record position of the cache entry - query_result_index.push((dep_node, encoder.position())); + query_result_index.push((dep_node, AbsoluteBytePos::new(encoder.position()))); // Encode the type check tables with the SerializedDepNodeIndex // as tag. diff --git a/src/librustc/ty/maps/plumbing.rs b/src/librustc/ty/maps/plumbing.rs index 75df4dc524afa..fdaa13e7fd16f 100644 --- a/src/librustc/ty/maps/plumbing.rs +++ b/src/librustc/ty/maps/plumbing.rs @@ -145,7 +145,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { if !self.dep_graph.is_fully_enabled() { return None; } - match self.dep_graph.try_mark_green(self, &dep_node) { + match self.dep_graph.try_mark_green(self.global_tcx(), &dep_node) { Some(dep_node_index) => { debug_assert!(self.dep_graph.is_green(dep_node_index)); self.dep_graph.read_index(dep_node_index); @@ -392,12 +392,31 @@ macro_rules! define_maps { { debug_assert!(tcx.dep_graph.is_green(dep_node_index)); - let result = if tcx.sess.opts.debugging_opts.incremental_queries && - Self::cache_on_disk(key) { + // First we try to load the result from the on-disk cache + let result = if Self::cache_on_disk(key) && + tcx.sess.opts.debugging_opts.incremental_queries { let prev_dep_node_index = tcx.dep_graph.prev_dep_node_index_of(dep_node); - Self::load_from_disk(tcx.global_tcx(), prev_dep_node_index) + let result = Self::try_load_from_disk(tcx.global_tcx(), + prev_dep_node_index); + + // We always expect to find a cached result for things that + // can be forced from DepNode. + debug_assert!(!dep_node.kind.can_reconstruct_query_key() || + result.is_some(), + "Missing on-disk cache entry for {:?}", + dep_node); + result + } else { + // Some things are never cached on disk. + None + }; + + let result = if let Some(result) = result { + result } else { + // We could not load a result from the on-disk cache, so + // recompute. let (result, _ ) = tcx.cycle_check(span, Query::$name(key), || { // The diagnostics for this query have already been // promoted to the current session during diff --git a/src/libserialize/opaque.rs b/src/libserialize/opaque.rs index f3475bd18ce69..99557659b297b 100644 --- a/src/libserialize/opaque.rs +++ b/src/libserialize/opaque.rs @@ -162,6 +162,10 @@ impl<'a> Decoder<'a> { self.position } + pub fn set_position(&mut self, pos: usize) { + self.position = pos + } + pub fn advance(&mut self, bytes: usize) { self.position += bytes; } diff --git a/src/libsyntax/codemap.rs b/src/libsyntax/codemap.rs index 3464db2a81111..3aac5334a38d6 100644 --- a/src/libsyntax/codemap.rs +++ b/src/libsyntax/codemap.rs @@ -105,7 +105,7 @@ impl FileLoader for RealFileLoader { // This is a FileMap identifier that is used to correlate FileMaps between // subsequent compilation sessions (which is something we need to do during // incremental compilation). -#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug)] pub struct StableFilemapId(u128); impl StableFilemapId { diff --git a/src/libsyntax_pos/hygiene.rs b/src/libsyntax_pos/hygiene.rs index 4790fa0a7edc2..9358e654a9fc8 100644 --- a/src/libsyntax_pos/hygiene.rs +++ b/src/libsyntax_pos/hygiene.rs @@ -140,6 +140,31 @@ impl SyntaxContext { SyntaxContext(0) } + // Allocate a new SyntaxContext with the given ExpnInfo. This is used when + // deserializing Spans from the incr. comp. cache. + // FIXME(mw): This method does not restore MarkData::parent or + // SyntaxContextData::prev_ctxt or SyntaxContextData::modern. These things + // don't seem to be used after HIR lowering, so everything should be fine + // as long as incremental compilation does not kick in before that. + pub fn allocate_directly(expansion_info: ExpnInfo) -> Self { + HygieneData::with(|data| { + data.marks.push(MarkData { + parent: Mark::root(), + modern: false, + expn_info: Some(expansion_info) + }); + + let mark = Mark(data.marks.len() as u32 - 1); + + data.syntax_contexts.push(SyntaxContextData { + outer_mark: mark, + prev_ctxt: SyntaxContext::empty(), + modern: SyntaxContext::empty(), + }); + SyntaxContext(data.syntax_contexts.len() as u32 - 1) + }) + } + /// Extend a syntax context with a given mark pub fn apply_mark(self, mark: Mark) -> SyntaxContext { HygieneData::with(|data| { @@ -286,7 +311,7 @@ impl fmt::Debug for SyntaxContext { } /// Extra information for tracking spans of macro and syntax sugar expansion -#[derive(Clone, Hash, Debug)] +#[derive(Clone, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct ExpnInfo { /// The location of the actual macro invocation or syntax sugar , e.g. /// `let x = foo!();` or `if let Some(y) = x {}` @@ -302,7 +327,7 @@ pub struct ExpnInfo { pub callee: NameAndSpan } -#[derive(Clone, Hash, Debug)] +#[derive(Clone, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct NameAndSpan { /// The format with which the macro was invoked. pub format: ExpnFormat, @@ -330,7 +355,7 @@ impl NameAndSpan { } /// The source of expansion. -#[derive(Clone, Hash, Debug, PartialEq, Eq)] +#[derive(Clone, Hash, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] pub enum ExpnFormat { /// e.g. #[derive(...)] MacroAttribute(Symbol), @@ -341,7 +366,7 @@ pub enum ExpnFormat { } /// The kind of compiler desugaring. -#[derive(Clone, Hash, Debug, PartialEq, Eq)] +#[derive(Clone, Hash, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] pub enum CompilerDesugaringKind { BackArrow, DotFill, diff --git a/src/libsyntax_pos/lib.rs b/src/libsyntax_pos/lib.rs index 47755dc1d5468..bf059cac89152 100644 --- a/src/libsyntax_pos/lib.rs +++ b/src/libsyntax_pos/lib.rs @@ -931,6 +931,11 @@ impl FileMap { (lines[line_index], lines[line_index + 1]) } } + + #[inline] + pub fn contains(&self, byte_pos: BytePos) -> bool { + byte_pos >= self.start_pos && byte_pos <= self.end_pos + } } /// Remove utf-8 BOM if any.