diff --git a/src/librustdoc/clean/mod.rs b/src/librustdoc/clean/mod.rs index c00fa5994bfce..5f5521caf68b9 100644 --- a/src/librustdoc/clean/mod.rs +++ b/src/librustdoc/clean/mod.rs @@ -32,6 +32,7 @@ use rustc_span::symbol::{kw, sym, Ident, Symbol}; use rustc_span::{self, ExpnKind}; use std::assert_matches::assert_matches; +use std::borrow::Cow; use std::collections::hash_map::Entry; use std::collections::BTreeMap; use std::default::Default; @@ -39,7 +40,6 @@ use std::hash::Hash; use std::mem; use thin_vec::ThinVec; -use crate::clean::inline::merge_attrs; use crate::core::{self, DocContext, ImplTraitParam}; use crate::formats::item_type::ItemType; use crate::visit_ast::Module as DocModule; @@ -2168,32 +2168,39 @@ impl<'hir> hir::intravisit::Visitor<'hir> for OneLevelVisitor<'hir> { /// documentation. Otherwise, we repeat the same operation until we find the "end item". fn get_all_import_attributes<'hir>( mut item: &hir::Item<'hir>, - tcx: TyCtxt<'hir>, + cx: &mut DocContext<'hir>, target_def_id: LocalDefId, - attributes: &mut Vec, is_inline: bool, -) { + mut prev_import: LocalDefId, +) -> Vec<(Cow<'hir, ast::Attribute>, Option)> { + let mut attributes: Vec<(Cow<'hir, ast::Attribute>, Option)> = Vec::new(); let mut first = true; - let hir_map = tcx.hir(); + let hir_map = cx.tcx.hir(); let mut visitor = OneLevelVisitor::new(hir_map, target_def_id); let mut visited = FxHashSet::default(); // If the item is an import and has at least a path with two parts, we go into it. while let hir::ItemKind::Use(path, _) = item.kind && visited.insert(item.hir_id()) { + let import_parent = cx.tcx.opt_local_parent(prev_import).map(|def_id| def_id.to_def_id()); if first { // This is the "original" reexport so we get all its attributes without filtering them. - attributes.extend_from_slice(hir_map.attrs(item.hir_id())); + attributes = hir_map.attrs(item.hir_id()) + .iter() + .map(|attr| (Cow::Borrowed(attr), import_parent)) + .collect::>(); first = false; } else { - add_without_unwanted_attributes(attributes, hir_map.attrs(item.hir_id()), is_inline); + add_without_unwanted_attributes(&mut attributes, hir_map.attrs(item.hir_id()), is_inline, import_parent); } - if let Some(i) = visitor.find_target(tcx, item.owner_id.def_id.to_def_id(), path) { + if let Some(i) = visitor.find_target(cx.tcx, item.owner_id.def_id.to_def_id(), path) { item = i; } else { break; } + prev_import = item.owner_id.def_id; } + attributes } fn filter_tokens_from_list( @@ -2239,17 +2246,24 @@ fn filter_tokens_from_list( /// * `doc(inline)` /// * `doc(no_inline)` /// * `doc(hidden)` -fn add_without_unwanted_attributes( - attrs: &mut Vec, - new_attrs: &[ast::Attribute], +fn add_without_unwanted_attributes<'hir>( + attrs: &mut Vec<(Cow<'hir, ast::Attribute>, Option)>, + new_attrs: &'hir [ast::Attribute], is_inline: bool, + import_parent: Option, ) { - // If it's `#[doc(inline)]`, we don't want all attributes, otherwise we keep everything. + // If it's not `#[doc(inline)]`, we don't want all attributes, otherwise we keep everything. if !is_inline { - attrs.extend_from_slice(new_attrs); + for attr in new_attrs { + attrs.push((Cow::Borrowed(attr), import_parent)); + } return; } for attr in new_attrs { + if matches!(attr.kind, ast::AttrKind::DocComment(..)) { + attrs.push((Cow::Borrowed(attr), import_parent)); + continue; + } let mut attr = attr.clone(); match attr.kind { ast::AttrKind::Normal(ref mut normal) => { @@ -2276,18 +2290,15 @@ fn add_without_unwanted_attributes( ) }); args.tokens = TokenStream::new(tokens); - attrs.push(attr); + attrs.push((Cow::Owned(attr), import_parent)); } ast::AttrArgs::Empty | ast::AttrArgs::Eq(..) => { - attrs.push(attr); - continue; + attrs.push((Cow::Owned(attr), import_parent)); } } } } - ast::AttrKind::DocComment(..) => { - attrs.push(attr); - } + _ => unreachable!(), } } } @@ -2374,26 +2385,43 @@ fn clean_maybe_renamed_item<'tcx>( _ => unreachable!("not yet converted"), }; - let mut import_attrs = Vec::new(); - let mut target_attrs = Vec::new(); - if let Some(import_id) = import_id && + let attrs = if let Some(import_id) = import_id && let Some(hir::Node::Item(use_node)) = cx.tcx.hir().find_by_def_id(import_id) { - let is_inline = inline::load_attrs(cx, import_id.to_def_id()).lists(sym::doc).get_word_attr(sym::inline).is_some(); + let is_inline = inline::load_attrs(cx, import_id.to_def_id()) + .lists(sym::doc) + .get_word_attr(sym::inline) + .is_some(); // Then we get all the various imports' attributes. - get_all_import_attributes(use_node, cx.tcx, item.owner_id.def_id, &mut import_attrs, is_inline); - add_without_unwanted_attributes(&mut target_attrs, inline::load_attrs(cx, def_id), is_inline); + let mut attrs = get_all_import_attributes( + use_node, + cx, + item.owner_id.def_id, + is_inline, + import_id, + ); + + add_without_unwanted_attributes( + &mut attrs, + inline::load_attrs(cx, def_id), + is_inline, + None + ); + attrs } else { // We only keep the item's attributes. - target_attrs.extend_from_slice(inline::load_attrs(cx, def_id)); - } + inline::load_attrs(cx, def_id).iter().map(|attr| (Cow::Borrowed(attr), None)).collect::>() + }; - let import_id = import_id.map(|def_id| def_id.to_def_id()); - let (attrs, cfg) = merge_attrs(cx, &target_attrs, Some((&import_attrs, import_id))); + let cfg = attrs.cfg(cx.tcx, &cx.cache.hidden_cfg); + let attrs = Attributes::from_ast_iter(attrs.iter().map(|(attr, did)| match attr { + Cow::Borrowed(attr) => (*attr, *did), + Cow::Owned(attr) => (attr, *did) + }), false); let mut item = Item::from_def_id_and_attrs_and_parts(def_id, Some(name), kind, Box::new(attrs), cfg); - item.inline_stmt_id = import_id; + item.inline_stmt_id = import_id.map(|local| local.to_def_id()); vec![item] }) } diff --git a/src/librustdoc/clean/types.rs b/src/librustdoc/clean/types.rs index 7dbb3f76a0a83..9019a6c49ecc5 100644 --- a/src/librustdoc/clean/types.rs +++ b/src/librustdoc/clean/types.rs @@ -1,3 +1,4 @@ +use std::borrow::Cow; use std::cell::RefCell; use std::default::Default; use std::hash::Hash; @@ -867,30 +868,15 @@ pub(crate) struct Module { pub(crate) trait AttributesExt { type AttributeIterator<'a>: Iterator + where + Self: 'a; + type Attributes<'a>: Iterator where Self: 'a; fn lists<'a>(&'a self, name: Symbol) -> Self::AttributeIterator<'a>; - fn span(&self) -> Option; - - fn cfg(&self, tcx: TyCtxt<'_>, hidden_cfg: &FxHashSet) -> Option>; -} - -impl AttributesExt for [ast::Attribute] { - type AttributeIterator<'a> = impl Iterator + 'a; - - fn lists<'a>(&'a self, name: Symbol) -> Self::AttributeIterator<'a> { - self.iter() - .filter(move |attr| attr.has_name(name)) - .filter_map(ast::Attribute::meta_item_list) - .flatten() - } - - /// Return the span of the first doc-comment, if it exists. - fn span(&self) -> Option { - self.iter().find(|attr| attr.doc_str().is_some()).map(|attr| attr.span) - } + fn iter<'a>(&'a self) -> Self::Attributes<'a>; fn cfg(&self, tcx: TyCtxt<'_>, hidden_cfg: &FxHashSet) -> Option> { let sess = tcx.sess; @@ -980,6 +966,43 @@ impl AttributesExt for [ast::Attribute] { } } +impl AttributesExt for [ast::Attribute] { + type AttributeIterator<'a> = impl Iterator + 'a; + type Attributes<'a> = impl Iterator + 'a; + + fn lists<'a>(&'a self, name: Symbol) -> Self::AttributeIterator<'a> { + self.iter() + .filter(move |attr| attr.has_name(name)) + .filter_map(ast::Attribute::meta_item_list) + .flatten() + } + + fn iter<'a>(&'a self) -> Self::Attributes<'a> { + self.into_iter() + } +} + +impl AttributesExt for [(Cow<'_, ast::Attribute>, Option)] { + type AttributeIterator<'a> = impl Iterator + 'a + where Self: 'a; + type Attributes<'a> = impl Iterator + 'a + where Self: 'a; + + fn lists<'a>(&'a self, name: Symbol) -> Self::AttributeIterator<'a> { + AttributesExt::iter(self) + .filter(move |attr| attr.has_name(name)) + .filter_map(ast::Attribute::meta_item_list) + .flatten() + } + + fn iter<'a>(&'a self) -> Self::Attributes<'a> { + self.into_iter().map(move |(attr, _)| match attr { + Cow::Borrowed(attr) => *attr, + Cow::Owned(attr) => attr, + }) + } +} + pub(crate) trait NestedAttributesExt { /// Returns `true` if the attribute list contains a specific `word` fn has_word(self, word: Symbol) -> bool diff --git a/src/librustdoc/doctest.rs b/src/librustdoc/doctest.rs index aaa83ecce4817..1a896b411ab06 100644 --- a/src/librustdoc/doctest.rs +++ b/src/librustdoc/doctest.rs @@ -1239,8 +1239,9 @@ impl<'a, 'hir, 'tcx> HirCollector<'a, 'hir, 'tcx> { if let Some(doc) = attrs.collapsed_doc_value() { // Use the outermost invocation, so that doctest names come from where the docs were written. let span = ast_attrs - .span() - .map(|span| span.ctxt().outer_expn().expansion_cause().unwrap_or(span)) + .iter() + .find(|attr| attr.doc_str().is_some()) + .map(|attr| attr.span.ctxt().outer_expn().expansion_cause().unwrap_or(attr.span)) .unwrap_or(DUMMY_SP); self.collector.set_position(span); markdown::find_testable_code( diff --git a/tests/rustdoc-ui/issue-109282-import-inline-merge.rs b/tests/rustdoc-ui/issue-109282-import-inline-merge.rs new file mode 100644 index 0000000000000..0ec8523222fbe --- /dev/null +++ b/tests/rustdoc-ui/issue-109282-import-inline-merge.rs @@ -0,0 +1,14 @@ +// Regression test for . +// Import for `ValueEnum` is inlined and doc comments on the import and `ValueEnum` itself are +// merged. After the merge they still have correct parent scopes to resolve both `[ValueEnum]`. + +// check-pass + +mod m { + pub enum ValueEnum {} +} +mod m2 { + /// [`ValueEnum`] + pub use crate::m::ValueEnum; +} +pub use m2::ValueEnum;