diff --git a/compiler/rustc_codegen_gcc/src/back/lto.rs b/compiler/rustc_codegen_gcc/src/back/lto.rs index 404064fb7a060..24be3ee4c34d3 100644 --- a/compiler/rustc_codegen_gcc/src/back/lto.rs +++ b/compiler/rustc_codegen_gcc/src/back/lto.rs @@ -26,11 +26,11 @@ use std::sync::atomic::Ordering; use gccjit::{Context, OutputKind}; use object::read::archive::ArchiveFile; use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule, ThinShared}; -use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput}; +use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, SharedEmitter}; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file}; use rustc_data_structures::memmap::Mmap; -use rustc_errors::DiagCtxtHandle; +use rustc_errors::{DiagCtxt, DiagCtxtHandle}; use rustc_log::tracing::info; use rustc_middle::bug; use rustc_middle::dep_graph::WorkProduct; @@ -112,10 +112,11 @@ fn save_as_file(obj: &[u8], path: &Path) -> Result<(), LtoBitcodeFromRlib> { /// for further optimization. pub(crate) fn run_fat( cgcx: &CodegenContext, + shared_emitter: &SharedEmitter, each_linked_rlib_for_lto: &[PathBuf], modules: Vec>, ) -> ModuleCodegen { - let dcx = cgcx.create_dcx(); + let dcx = DiagCtxt::new(Box::new(shared_emitter.clone())); let dcx = dcx.handle(); let lto_data = prepare_lto(cgcx, each_linked_rlib_for_lto, dcx); /*let symbols_below_threshold = @@ -283,14 +284,13 @@ impl ModuleBufferMethods for ModuleBuffer { /// can simply be copied over from the incr. comp. cache. pub(crate) fn run_thin( cgcx: &CodegenContext, + dcx: DiagCtxtHandle<'_>, each_linked_rlib_for_lto: &[PathBuf], modules: Vec<(String, ThinBuffer)>, cached_modules: Vec<(SerializedModule, WorkProduct)>, ) -> (Vec>, Vec) { - let dcx = cgcx.create_dcx(); - let dcx = dcx.handle(); let lto_data = prepare_lto(cgcx, each_linked_rlib_for_lto, dcx); - if cgcx.opts.cg.linker_plugin_lto.enabled() { + if cgcx.use_linker_plugin_lto { unreachable!( "We should never reach this case if the LTO step \ is deferred to the linker" @@ -522,8 +522,6 @@ pub fn optimize_thin_module( thin_module: ThinModule, _cgcx: &CodegenContext, ) -> ModuleCodegen { - //let dcx = cgcx.create_dcx(); - //let module_name = &thin_module.shared.module_names[thin_module.idx]; /*let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, module_name.to_str().unwrap()); let tm = (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&dcx, e))?;*/ diff --git a/compiler/rustc_codegen_gcc/src/back/write.rs b/compiler/rustc_codegen_gcc/src/back/write.rs index eae0f2aa00f6a..b6223c5be370a 100644 --- a/compiler/rustc_codegen_gcc/src/back/write.rs +++ b/compiler/rustc_codegen_gcc/src/back/write.rs @@ -2,8 +2,11 @@ use std::{env, fs}; use gccjit::{Context, OutputKind}; use rustc_codegen_ssa::back::link::ensure_removed; -use rustc_codegen_ssa::back::write::{BitcodeSection, CodegenContext, EmitObj, ModuleConfig}; +use rustc_codegen_ssa::back::write::{ + BitcodeSection, CodegenContext, EmitObj, ModuleConfig, SharedEmitter, +}; use rustc_codegen_ssa::{CompiledModule, ModuleCodegen}; +use rustc_errors::DiagCtxt; use rustc_fs_util::link_or_copy; use rustc_log::tracing::debug; use rustc_session::config::OutputType; @@ -15,10 +18,11 @@ use crate::{GccCodegenBackend, GccContext, LtoMode}; pub(crate) fn codegen( cgcx: &CodegenContext, + shared_emitter: &SharedEmitter, module: ModuleCodegen, config: &ModuleConfig, ) -> CompiledModule { - let dcx = cgcx.create_dcx(); + let dcx = DiagCtxt::new(Box::new(shared_emitter.clone())); let dcx = dcx.handle(); let _timer = cgcx.prof.generic_activity_with_arg("GCC_module_codegen", &*module.name); diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs index 409b7886740a4..5a2ec0a2fc68c 100644 --- a/compiler/rustc_codegen_gcc/src/lib.rs +++ b/compiler/rustc_codegen_gcc/src/lib.rs @@ -82,7 +82,7 @@ use gccjit::{TargetInfo, Version}; use rustc_ast::expand::allocator::AllocatorMethod; use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule}; use rustc_codegen_ssa::back::write::{ - CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryFn, + CodegenContext, FatLtoInput, ModuleConfig, SharedEmitter, TargetMachineFactoryFn, }; use rustc_codegen_ssa::base::codegen_crate; use rustc_codegen_ssa::target_features::cfg_target_feature; @@ -371,23 +371,25 @@ impl WriteBackendMethods for GccCodegenBackend { fn run_and_optimize_fat_lto( cgcx: &CodegenContext, + shared_emitter: &SharedEmitter, // FIXME(bjorn3): Limit LTO exports to these symbols _exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], modules: Vec>, ) -> ModuleCodegen { - back::lto::run_fat(cgcx, each_linked_rlib_for_lto, modules) + back::lto::run_fat(cgcx, shared_emitter, each_linked_rlib_for_lto, modules) } fn run_thin_lto( cgcx: &CodegenContext, + dcx: DiagCtxtHandle<'_>, // FIXME(bjorn3): Limit LTO exports to these symbols _exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], modules: Vec<(String, Self::ThinBuffer)>, cached_modules: Vec<(SerializedModule, WorkProduct)>, ) -> (Vec>, Vec) { - back::lto::run_thin(cgcx, each_linked_rlib_for_lto, modules, cached_modules) + back::lto::run_thin(cgcx, dcx, each_linked_rlib_for_lto, modules, cached_modules) } fn print_pass_timings(&self) { @@ -400,7 +402,7 @@ impl WriteBackendMethods for GccCodegenBackend { fn optimize( _cgcx: &CodegenContext, - _dcx: DiagCtxtHandle<'_>, + _shared_emitter: &SharedEmitter, module: &mut ModuleCodegen, config: &ModuleConfig, ) { @@ -409,6 +411,7 @@ impl WriteBackendMethods for GccCodegenBackend { fn optimize_thin( cgcx: &CodegenContext, + _shared_emitter: &SharedEmitter, thin: ThinModule, ) -> ModuleCodegen { back::lto::optimize_thin_module(thin, cgcx) @@ -416,10 +419,11 @@ impl WriteBackendMethods for GccCodegenBackend { fn codegen( cgcx: &CodegenContext, + shared_emitter: &SharedEmitter, module: ModuleCodegen, config: &ModuleConfig, ) -> CompiledModule { - back::write::codegen(cgcx, module, config) + back::write::codegen(cgcx, shared_emitter, module, config) } fn prepare_thin(module: ModuleCodegen) -> (String, Self::ThinBuffer) { diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index b820b992105fd..3210afc063a9c 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -9,12 +9,12 @@ use std::{io, iter, slice}; use object::read::archive::ArchiveFile; use object::{Object, ObjectSection}; use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule, ThinShared}; -use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput}; +use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, SharedEmitter}; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::memmap::Mmap; -use rustc_errors::DiagCtxtHandle; +use rustc_errors::{DiagCtxt, DiagCtxtHandle}; use rustc_hir::attrs::SanitizerSet; use rustc_middle::bug; use rustc_middle::dep_graph::WorkProduct; @@ -150,17 +150,18 @@ fn get_bitcode_slice_from_object_data<'a>( /// for further optimization. pub(crate) fn run_fat( cgcx: &CodegenContext, + shared_emitter: &SharedEmitter, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], modules: Vec>, ) -> ModuleCodegen { - let dcx = cgcx.create_dcx(); + let dcx = DiagCtxt::new(Box::new(shared_emitter.clone())); let dcx = dcx.handle(); let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx); let symbols_below_threshold = symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::>(); - fat_lto(cgcx, dcx, modules, upstream_modules, &symbols_below_threshold) + fat_lto(cgcx, dcx, shared_emitter, modules, upstream_modules, &symbols_below_threshold) } /// Performs thin LTO by performing necessary global analysis and returning two @@ -168,18 +169,17 @@ pub(crate) fn run_fat( /// can simply be copied over from the incr. comp. cache. pub(crate) fn run_thin( cgcx: &CodegenContext, + dcx: DiagCtxtHandle<'_>, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], modules: Vec<(String, ThinBuffer)>, cached_modules: Vec<(SerializedModule, WorkProduct)>, ) -> (Vec>, Vec) { - let dcx = cgcx.create_dcx(); - let dcx = dcx.handle(); let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx); let symbols_below_threshold = symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::>(); - if cgcx.opts.cg.linker_plugin_lto.enabled() { + if cgcx.use_linker_plugin_lto { unreachable!( "We should never reach this case if the LTO step \ is deferred to the linker" @@ -197,6 +197,7 @@ pub(crate) fn prepare_thin(module: ModuleCodegen) -> (String, ThinBu fn fat_lto( cgcx: &CodegenContext, dcx: DiagCtxtHandle<'_>, + shared_emitter: &SharedEmitter, modules: Vec>, mut serialized_modules: Vec<(SerializedModule, CString)>, symbols_below_threshold: &[*const libc::c_char], @@ -265,8 +266,13 @@ fn fat_lto( // The linking steps below may produce errors and diagnostics within LLVM // which we'd like to handle and print, so set up our diagnostic handlers // (which get unregistered when they go out of scope below). - let _handler = - DiagnosticHandlers::new(cgcx, dcx, llcx, &module, CodegenDiagnosticsStage::LTO); + let _handler = DiagnosticHandlers::new( + cgcx, + shared_emitter, + llcx, + &module, + CodegenDiagnosticsStage::LTO, + ); // For all other modules we codegened we'll need to link them into our own // bitcode. All modules were codegened in their own LLVM context, however, @@ -730,10 +736,11 @@ impl Drop for ThinBuffer { } pub(crate) fn optimize_thin_module( - thin_module: ThinModule, cgcx: &CodegenContext, + shared_emitter: &SharedEmitter, + thin_module: ThinModule, ) -> ModuleCodegen { - let dcx = cgcx.create_dcx(); + let dcx = DiagCtxt::new(Box::new(shared_emitter.clone())); let dcx = dcx.handle(); let module_name = &thin_module.shared.module_names[thin_module.idx]; diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index fde7dd6ef7a85..a5b6ea08a66d3 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -9,15 +9,15 @@ use libc::{c_char, c_int, c_void, size_t}; use rustc_codegen_ssa::back::link::ensure_removed; use rustc_codegen_ssa::back::versioned_llvm_target; use rustc_codegen_ssa::back::write::{ - BitcodeSection, CodegenContext, EmitObj, ModuleConfig, TargetMachineFactoryConfig, - TargetMachineFactoryFn, + BitcodeSection, CodegenContext, EmitObj, InlineAsmError, ModuleConfig, SharedEmitter, + TargetMachineFactoryConfig, TargetMachineFactoryFn, }; use rustc_codegen_ssa::base::wants_wasm_eh; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{CompiledModule, ModuleCodegen, ModuleKind}; use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::small_c_str::SmallCStr; -use rustc_errors::{DiagCtxtHandle, Level}; +use rustc_errors::{DiagCtxt, DiagCtxtHandle, Level}; use rustc_fs_util::{link_or_copy, path_to_c_string}; use rustc_middle::ty::TyCtxt; use rustc_session::Session; @@ -356,7 +356,7 @@ pub(crate) enum CodegenDiagnosticsStage { } pub(crate) struct DiagnosticHandlers<'a> { - data: *mut (&'a CodegenContext, DiagCtxtHandle<'a>), + data: *mut (&'a CodegenContext, &'a SharedEmitter), llcx: &'a llvm::Context, old_handler: Option<&'a llvm::DiagnosticHandler>, } @@ -364,7 +364,7 @@ pub(crate) struct DiagnosticHandlers<'a> { impl<'a> DiagnosticHandlers<'a> { pub(crate) fn new( cgcx: &'a CodegenContext, - dcx: DiagCtxtHandle<'a>, + shared_emitter: &'a SharedEmitter, llcx: &'a llvm::Context, module: &ModuleCodegen, stage: CodegenDiagnosticsStage, @@ -398,8 +398,8 @@ impl<'a> DiagnosticHandlers<'a> { }) .and_then(|dir| dir.to_str().and_then(|p| CString::new(p).ok())); - let pgo_available = cgcx.opts.cg.profile_use.is_some(); - let data = Box::into_raw(Box::new((cgcx, dcx))); + let pgo_available = cgcx.module_config.pgo_use.is_some(); + let data = Box::into_raw(Box::new((cgcx, shared_emitter))); unsafe { let old_handler = llvm::LLVMRustContextGetDiagnosticHandler(llcx); llvm::LLVMRustContextConfigureDiagnosticHandler( @@ -434,7 +434,7 @@ fn report_inline_asm( level: llvm::DiagnosticLevel, cookie: u64, source: Option<(String, Vec)>, -) { +) -> InlineAsmError { // In LTO build we may get srcloc values from other crates which are invalid // since they use a different source map. To be safe we just suppress these // in LTO builds. @@ -454,19 +454,29 @@ fn report_inline_asm( llvm::DiagnosticLevel::Note | llvm::DiagnosticLevel::Remark => Level::Note, }; let msg = msg.trim_prefix("error: ").to_string(); - cgcx.diag_emitter.inline_asm_error(span, msg, level, source); + InlineAsmError { span, msg, level, source } } unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) { if user.is_null() { return; } - let (cgcx, dcx) = - unsafe { *(user as *const (&CodegenContext, DiagCtxtHandle<'_>)) }; + let (cgcx, shared_emitter) = + unsafe { *(user as *const (&CodegenContext, &SharedEmitter)) }; + + let dcx = DiagCtxt::new(Box::new(shared_emitter.clone())); + let dcx = dcx.handle(); match unsafe { llvm::diagnostic::Diagnostic::unpack(info) } { llvm::diagnostic::InlineAsm(inline) => { - report_inline_asm(cgcx, inline.message, inline.level, inline.cookie, inline.source); + // FIXME use dcx + shared_emitter.inline_asm_error(report_inline_asm( + cgcx, + inline.message, + inline.level, + inline.cookie, + inline.source, + )); } llvm::diagnostic::Optimization(opt) => { @@ -732,7 +742,7 @@ pub(crate) unsafe fn llvm_optimize( &*module.module_llvm.tm.raw(), to_pass_builder_opt_level(opt_level), opt_stage, - cgcx.opts.cg.linker_plugin_lto.enabled(), + cgcx.use_linker_plugin_lto, config.no_prepopulate_passes, config.verify_llvm_ir, config.lint_llvm_ir, @@ -771,14 +781,18 @@ pub(crate) unsafe fn llvm_optimize( // Unsafe due to LLVM calls. pub(crate) fn optimize( cgcx: &CodegenContext, - dcx: DiagCtxtHandle<'_>, + shared_emitter: &SharedEmitter, module: &mut ModuleCodegen, config: &ModuleConfig, ) { let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &*module.name); + let dcx = DiagCtxt::new(Box::new(shared_emitter.clone())); + let dcx = dcx.handle(); + let llcx = &*module.module_llvm.llcx; - let _handlers = DiagnosticHandlers::new(cgcx, dcx, llcx, module, CodegenDiagnosticsStage::Opt); + let _handlers = + DiagnosticHandlers::new(cgcx, shared_emitter, llcx, module, CodegenDiagnosticsStage::Opt); if config.emit_no_opt_bc { let out = cgcx.output_filenames.temp_path_ext_for_cgu( @@ -795,7 +809,7 @@ pub(crate) fn optimize( let opt_stage = match cgcx.lto { Lto::Fat => llvm::OptStage::PreLinkFatLTO, Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO, - _ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO, + _ if cgcx.use_linker_plugin_lto => llvm::OptStage::PreLinkThinLTO, _ => llvm::OptStage::PreLinkNoLTO, }; @@ -859,19 +873,26 @@ pub(crate) fn optimize( pub(crate) fn codegen( cgcx: &CodegenContext, + shared_emitter: &SharedEmitter, module: ModuleCodegen, config: &ModuleConfig, ) -> CompiledModule { - let dcx = cgcx.create_dcx(); + let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name); + + let dcx = DiagCtxt::new(Box::new(shared_emitter.clone())); let dcx = dcx.handle(); - let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name); { let llmod = module.module_llvm.llmod(); let llcx = &*module.module_llvm.llcx; let tm = &*module.module_llvm.tm; - let _handlers = - DiagnosticHandlers::new(cgcx, dcx, llcx, &module, CodegenDiagnosticsStage::Codegen); + let _handlers = DiagnosticHandlers::new( + cgcx, + shared_emitter, + llcx, + &module, + CodegenDiagnosticsStage::Codegen, + ); if cgcx.msvc_imps_needed { create_msvc_imps(cgcx, llcx, llmod); diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index 1b65a133d58c1..3901662442e94 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -30,12 +30,13 @@ use llvm_util::target_config; use rustc_ast::expand::allocator::AllocatorMethod; use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule}; use rustc_codegen_ssa::back::write::{ - CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn, + CodegenContext, FatLtoInput, ModuleConfig, SharedEmitter, TargetMachineFactoryConfig, + TargetMachineFactoryFn, }; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen, TargetConfig}; use rustc_data_structures::fx::FxIndexMap; -use rustc_errors::DiagCtxtHandle; +use rustc_errors::{DiagCtxt, DiagCtxtHandle}; use rustc_metadata::EncodedMetadata; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; use rustc_middle::ty::TyCtxt; @@ -166,14 +167,20 @@ impl WriteBackendMethods for LlvmCodegenBackend { } fn run_and_optimize_fat_lto( cgcx: &CodegenContext, + shared_emitter: &SharedEmitter, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], modules: Vec>, ) -> ModuleCodegen { - let mut module = - back::lto::run_fat(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, modules); + let mut module = back::lto::run_fat( + cgcx, + shared_emitter, + exported_symbols_for_lto, + each_linked_rlib_for_lto, + modules, + ); - let dcx = cgcx.create_dcx(); + let dcx = DiagCtxt::new(Box::new(shared_emitter.clone())); let dcx = dcx.handle(); back::lto::run_pass_manager(cgcx, dcx, &mut module, false); @@ -181,6 +188,7 @@ impl WriteBackendMethods for LlvmCodegenBackend { } fn run_thin_lto( cgcx: &CodegenContext, + dcx: DiagCtxtHandle<'_>, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], modules: Vec<(String, Self::ThinBuffer)>, @@ -188,6 +196,7 @@ impl WriteBackendMethods for LlvmCodegenBackend { ) -> (Vec>, Vec) { back::lto::run_thin( cgcx, + dcx, exported_symbols_for_lto, each_linked_rlib_for_lto, modules, @@ -196,24 +205,26 @@ impl WriteBackendMethods for LlvmCodegenBackend { } fn optimize( cgcx: &CodegenContext, - dcx: DiagCtxtHandle<'_>, + shared_emitter: &SharedEmitter, module: &mut ModuleCodegen, config: &ModuleConfig, ) { - back::write::optimize(cgcx, dcx, module, config) + back::write::optimize(cgcx, shared_emitter, module, config) } fn optimize_thin( cgcx: &CodegenContext, + shared_emitter: &SharedEmitter, thin: ThinModule, ) -> ModuleCodegen { - back::lto::optimize_thin_module(thin, cgcx) + back::lto::optimize_thin_module(cgcx, shared_emitter, thin) } fn codegen( cgcx: &CodegenContext, + shared_emitter: &SharedEmitter, module: ModuleCodegen, config: &ModuleConfig, ) -> CompiledModule { - back::write::codegen(cgcx, module, config) + back::write::codegen(cgcx, shared_emitter, module, config) } fn prepare_thin(module: ModuleCodegen) -> (String, Self::ThinBuffer) { back::lto::prepare_thin(module) diff --git a/compiler/rustc_codegen_ssa/src/back/lto.rs b/compiler/rustc_codegen_ssa/src/back/lto.rs index e6df6a2469f37..ef4c193c4c2a1 100644 --- a/compiler/rustc_codegen_ssa/src/back/lto.rs +++ b/compiler/rustc_codegen_ssa/src/back/lto.rs @@ -2,6 +2,7 @@ use std::ffi::CString; use std::sync::Arc; use rustc_data_structures::memmap::Mmap; +use rustc_errors::DiagCtxtHandle; use rustc_hir::def_id::{CrateNum, LOCAL_CRATE}; use rustc_middle::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo, SymbolExportLevel}; use rustc_middle::ty::TyCtxt; @@ -124,28 +125,29 @@ pub(super) fn exported_symbols_for_lto( symbols_below_threshold } -pub(super) fn check_lto_allowed(cgcx: &CodegenContext) { +pub(super) fn check_lto_allowed( + cgcx: &CodegenContext, + dcx: DiagCtxtHandle<'_>, +) { if cgcx.lto == Lto::ThinLocal { // Crate local LTO is always allowed return; } - let dcx = cgcx.create_dcx(); - // Make sure we actually can run LTO for crate_type in cgcx.crate_types.iter() { if !crate_type_allows_lto(*crate_type) { dcx.handle().emit_fatal(LtoDisallowed); } else if *crate_type == CrateType::Dylib { - if !cgcx.opts.unstable_opts.dylib_lto { + if !cgcx.dylib_lto { dcx.handle().emit_fatal(LtoDylib); } - } else if *crate_type == CrateType::ProcMacro && !cgcx.opts.unstable_opts.dylib_lto { + } else if *crate_type == CrateType::ProcMacro && !cgcx.dylib_lto { dcx.handle().emit_fatal(LtoProcMacro); } } - if cgcx.opts.cg.prefer_dynamic && !cgcx.opts.unstable_opts.dylib_lto { + if cgcx.prefer_dynamic && !cgcx.dylib_lto { dcx.handle().emit_fatal(DynamicLinkingWithLTO); } } diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs index fc1edec8de843..ddcd8decf3033 100644 --- a/compiler/rustc_codegen_ssa/src/back/write.rs +++ b/compiler/rustc_codegen_ssa/src/back/write.rs @@ -15,8 +15,8 @@ use rustc_data_structures::profiling::{SelfProfilerRef, VerboseTimingGuard}; use rustc_errors::emitter::Emitter; use rustc_errors::translation::Translator; use rustc_errors::{ - Diag, DiagArgMap, DiagCtxt, DiagMessage, ErrCode, FatalError, FatalErrorMarker, Level, - MultiSpan, Style, Suggestions, + Diag, DiagArgMap, DiagCtxt, DiagCtxtHandle, DiagMessage, ErrCode, FatalError, FatalErrorMarker, + Level, MultiSpan, Style, Suggestions, catch_fatal_errors, }; use rustc_fs_util::link_or_copy; use rustc_incremental::{ @@ -326,15 +326,16 @@ pub struct CodegenContext { // Resources needed when running LTO pub prof: SelfProfilerRef, pub lto: Lto, + pub use_linker_plugin_lto: bool, + pub dylib_lto: bool, + pub prefer_dynamic: bool, pub save_temps: bool, pub fewer_names: bool, pub time_trace: bool, - pub opts: Arc, pub crate_types: Vec, pub output_filenames: Arc, pub invocation_temp: Option, pub module_config: Arc, - pub allocator_config: Arc, pub tm_factory: TargetMachineFactoryFn, pub msvc_imps_needed: bool, pub is_pe_coff: bool, @@ -347,8 +348,6 @@ pub struct CodegenContext { pub split_dwarf_kind: rustc_session::config::SplitDwarfKind, pub pointer_size: Size, - /// Emitter to use for diagnostics produced during codegen. - pub diag_emitter: SharedEmitter, /// LLVM optimizations for which we want to print remarks. pub remark: Passes, /// Directory into which should the LLVM optimization remarks be written. @@ -363,14 +362,9 @@ pub struct CodegenContext { pub parallel: bool, } -impl CodegenContext { - pub fn create_dcx(&self) -> DiagCtxt { - DiagCtxt::new(Box::new(self.diag_emitter.clone())) - } -} - fn generate_thin_lto_work( cgcx: &CodegenContext, + dcx: DiagCtxtHandle<'_>, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], needs_thin_lto: Vec<(String, B::ThinBuffer)>, @@ -380,6 +374,7 @@ fn generate_thin_lto_work( let (lto_modules, copy_jobs) = B::run_thin_lto( cgcx, + dcx, exported_symbols_for_lto, each_linked_rlib_for_lto, needs_thin_lto, @@ -408,6 +403,29 @@ struct CompiledModules { allocator_module: Option, } +enum MaybeLtoModules { + NoLto { + modules: Vec, + allocator_module: Option, + }, + FatLto { + cgcx: CodegenContext, + exported_symbols_for_lto: Arc>, + each_linked_rlib_file_for_lto: Vec, + needs_fat_lto: Vec>, + lto_import_only_modules: + Vec<(SerializedModule<::ModuleBuffer>, WorkProduct)>, + }, + ThinLto { + cgcx: CodegenContext, + exported_symbols_for_lto: Arc>, + each_linked_rlib_file_for_lto: Vec, + needs_thin_lto: Vec<(String, ::ThinBuffer)>, + lto_import_only_modules: + Vec<(SerializedModule<::ModuleBuffer>, WorkProduct)>, + }, +} + fn need_bitcode_in_object(tcx: TyCtxt<'_>) -> bool { let sess = tcx.sess; sess.opts.cg.embed_bitcode @@ -797,20 +815,12 @@ pub(crate) enum ComputedLtoType { pub(crate) fn compute_per_cgu_lto_type( sess_lto: &Lto, - opts: &config::Options, + linker_does_lto: bool, sess_crate_types: &[CrateType], - module_kind: ModuleKind, ) -> ComputedLtoType { // If the linker does LTO, we don't have to do it. Note that we // keep doing full LTO, if it is requested, as not to break the // assumption that the output will be a single module. - let linker_does_lto = opts.cg.linker_plugin_lto.enabled(); - - // When we're automatically doing ThinLTO for multi-codegen-unit - // builds we don't actually want to LTO the allocator module if - // it shows up. This is due to various linker shenanigans that - // we'll encounter later. - let is_allocator = module_kind == ModuleKind::Allocator; // We ignore a request for full crate graph LTO if the crate type // is only an rlib, as there is no full crate graph to process, @@ -823,7 +833,7 @@ pub(crate) fn compute_per_cgu_lto_type( let is_rlib = matches!(sess_crate_types, [CrateType::Rlib]); match sess_lto { - Lto::ThinLocal if !linker_does_lto && !is_allocator => ComputedLtoType::Thin, + Lto::ThinLocal if !linker_does_lto => ComputedLtoType::Thin, Lto::Thin if !linker_does_lto && !is_rlib => ComputedLtoType::Thin, Lto::Fat if !is_rlib => ComputedLtoType::Fat, _ => ComputedLtoType::No, @@ -832,30 +842,24 @@ pub(crate) fn compute_per_cgu_lto_type( fn execute_optimize_work_item( cgcx: &CodegenContext, + shared_emitter: SharedEmitter, mut module: ModuleCodegen, ) -> WorkItemResult { let _timer = cgcx.prof.generic_activity_with_arg("codegen_module_optimize", &*module.name); - let dcx = cgcx.create_dcx(); - let dcx = dcx.handle(); - - let module_config = match module.kind { - ModuleKind::Regular => &cgcx.module_config, - ModuleKind::Allocator => &cgcx.allocator_config, - }; - - B::optimize(cgcx, dcx, &mut module, module_config); + B::optimize(cgcx, &shared_emitter, &mut module, &cgcx.module_config); // After we've done the initial round of optimizations we need to // decide whether to synchronously codegen this module or ship it // back to the coordinator thread for further LTO processing (which // has to wait for all the initial modules to be optimized). - let lto_type = compute_per_cgu_lto_type(&cgcx.lto, &cgcx.opts, &cgcx.crate_types, module.kind); + let lto_type = + compute_per_cgu_lto_type(&cgcx.lto, cgcx.use_linker_plugin_lto, &cgcx.crate_types); // If we're doing some form of incremental LTO then we need to be sure to // save our module to disk first. - let bitcode = if module_config.emit_pre_lto_bc { + let bitcode = if cgcx.module_config.emit_pre_lto_bc { let filename = pre_lto_bitcode_filename(&module.name); cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename)) } else { @@ -864,7 +868,7 @@ fn execute_optimize_work_item( match lto_type { ComputedLtoType::No => { - let module = B::codegen(cgcx, module, module_config); + let module = B::codegen(cgcx, &shared_emitter, module, &cgcx.module_config); WorkItemResult::Finished(module) } ComputedLtoType::Thin => { @@ -894,12 +898,16 @@ fn execute_optimize_work_item( fn execute_copy_from_cache_work_item( cgcx: &CodegenContext, + shared_emitter: SharedEmitter, module: CachedModuleCodegen, ) -> CompiledModule { let _timer = cgcx .prof .generic_activity_with_arg("codegen_copy_artifacts_from_incr_cache", &*module.name); + let dcx = DiagCtxt::new(Box::new(shared_emitter)); + let dcx = dcx.handle(); + let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap(); let mut links_from_incr_cache = Vec::new(); @@ -918,11 +926,7 @@ fn execute_copy_from_cache_work_item( Some(output_path) } Err(error) => { - cgcx.create_dcx().handle().emit_err(errors::CopyPathBuf { - source_file, - output_path, - error, - }); + dcx.emit_err(errors::CopyPathBuf { source_file, output_path, error }); None } } @@ -965,7 +969,7 @@ fn execute_copy_from_cache_work_item( let bytecode = load_from_incr_cache(module_config.emit_bc, OutputType::Bitcode); let object = load_from_incr_cache(should_emit_obj, OutputType::Object); if should_emit_obj && object.is_none() { - cgcx.create_dcx().handle().emit_fatal(errors::NoSavedObjectFile { cgu_name: &module.name }) + dcx.emit_fatal(errors::NoSavedObjectFile { cgu_name: &module.name }) } CompiledModule { @@ -982,6 +986,7 @@ fn execute_copy_from_cache_work_item( fn do_fat_lto( cgcx: &CodegenContext, + shared_emitter: SharedEmitter, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], mut needs_fat_lto: Vec>, @@ -989,7 +994,10 @@ fn do_fat_lto( ) -> CompiledModule { let _timer = cgcx.prof.verbose_generic_activity("LLVM_fatlto"); - check_lto_allowed(&cgcx); + let dcx = DiagCtxt::new(Box::new(shared_emitter.clone())); + let dcx = dcx.handle(); + + check_lto_allowed(&cgcx, dcx); for (module, wp) in import_only_modules { needs_fat_lto.push(FatLtoInput::Serialized { name: wp.cgu_name, buffer: module }) @@ -997,15 +1005,17 @@ fn do_fat_lto( let module = B::run_and_optimize_fat_lto( cgcx, + &shared_emitter, exported_symbols_for_lto, each_linked_rlib_for_lto, needs_fat_lto, ); - B::codegen(cgcx, module, &cgcx.module_config) + B::codegen(cgcx, &shared_emitter, module, &cgcx.module_config) } fn do_thin_lto<'a, B: ExtraBackendMethods>( cgcx: &'a CodegenContext, + shared_emitter: SharedEmitter, exported_symbols_for_lto: Arc>, each_linked_rlib_for_lto: Vec, needs_thin_lto: Vec<(String, ::ThinBuffer)>, @@ -1016,7 +1026,10 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>( ) -> Vec { let _timer = cgcx.prof.verbose_generic_activity("LLVM_thinlto"); - check_lto_allowed(&cgcx); + let dcx = DiagCtxt::new(Box::new(shared_emitter.clone())); + let dcx = dcx.handle(); + + check_lto_allowed(&cgcx, dcx); let (coordinator_send, coordinator_receive) = channel(); @@ -1041,6 +1054,7 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>( // we don't worry about tokens. for (work, cost) in generate_thin_lto_work( cgcx, + dcx, &exported_symbols_for_lto, &each_linked_rlib_for_lto, needs_thin_lto, @@ -1082,7 +1096,7 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>( while used_token_count < tokens.len() + 1 && let Some((item, _)) = work_items.pop() { - spawn_thin_lto_work(&cgcx, coordinator_send.clone(), item); + spawn_thin_lto_work(&cgcx, shared_emitter.clone(), coordinator_send.clone(), item); used_token_count += 1; } } else { @@ -1106,7 +1120,7 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>( } Err(e) => { let msg = &format!("failed to acquire jobserver token: {e}"); - cgcx.diag_emitter.fatal(msg); + shared_emitter.fatal(msg); codegen_aborted = Some(FatalError); } }, @@ -1144,12 +1158,13 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>( fn execute_thin_lto_work_item( cgcx: &CodegenContext, + shared_emitter: SharedEmitter, module: lto::ThinModule, ) -> CompiledModule { let _timer = cgcx.prof.generic_activity_with_arg("codegen_module_perform_lto", module.name()); - let module = B::optimize_thin(cgcx, module); - B::codegen(cgcx, module, &cgcx.module_config) + let module = B::optimize_thin(cgcx, &shared_emitter, module); + B::codegen(cgcx, &shared_emitter, module, &cgcx.module_config) } /// Messages sent to the coordinator. @@ -1208,6 +1223,7 @@ pub struct CguMessage; // - `is_lint`: lints aren't relevant during codegen. // - `emitted_at`: not used for codegen diagnostics. struct Diagnostic { + span: Vec, level: Level, messages: Vec<(DiagMessage, Style)>, code: Option, @@ -1218,7 +1234,7 @@ struct Diagnostic { // A cut-down version of `rustc_errors::Subdiag` that impls `Send`. It's // missing the following fields from `rustc_errors::Subdiag`. // - `span`: it doesn't impl `Send`. -pub(crate) struct Subdiagnostic { +struct Subdiagnostic { level: Level, messages: Vec<(DiagMessage, Style)>, } @@ -1244,9 +1260,9 @@ fn start_executing_work( coordinator_receive: Receiver>, regular_config: Arc, allocator_config: Arc, - allocator_module: Option>, + mut allocator_module: Option>, coordinator_send: Sender>, -) -> thread::JoinHandle> { +) -> thread::JoinHandle, ()>> { let sess = tcx.sess; let mut each_linked_rlib_for_lto = Vec::new(); @@ -1291,18 +1307,18 @@ fn start_executing_work( let cgcx = CodegenContext:: { crate_types: tcx.crate_types().to_vec(), lto: sess.lto(), + use_linker_plugin_lto: sess.opts.cg.linker_plugin_lto.enabled(), + dylib_lto: sess.opts.unstable_opts.dylib_lto, + prefer_dynamic: sess.opts.cg.prefer_dynamic, fewer_names: sess.fewer_names(), save_temps: sess.opts.cg.save_temps, time_trace: sess.opts.unstable_opts.llvm_time_trace, - opts: Arc::new(sess.opts.clone()), prof: sess.prof.clone(), remark: sess.opts.cg.remark.clone(), remark_dir, incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()), - diag_emitter: shared_emitter.clone(), output_filenames: Arc::clone(tcx.output_filenames(())), module_config: regular_config, - allocator_config, tm_factory: backend.target_machine_factory(tcx.sess, ol, backend_features), msvc_imps_needed: msvc_imps_needed(tcx), is_pe_coff: tcx.sess.target.is_like_windows, @@ -1496,16 +1512,9 @@ fn start_executing_work( let mut llvm_start_time: Option> = None; - let compiled_allocator_module = allocator_module.and_then(|allocator_module| { - match execute_optimize_work_item(&cgcx, allocator_module) { - WorkItemResult::Finished(compiled_module) => return Some(compiled_module), - WorkItemResult::NeedsFatLto(fat_lto_input) => needs_fat_lto.push(fat_lto_input), - WorkItemResult::NeedsThinLto(name, thin_buffer) => { - needs_thin_lto.push((name, thin_buffer)) - } - } - None - }); + if let Some(allocator_module) = &mut allocator_module { + B::optimize(&cgcx, &shared_emitter, allocator_module, &allocator_config); + } // Run the message loop while there's still anything that needs message // processing. Note that as soon as codegen is aborted we simply want to @@ -1542,7 +1551,13 @@ fn start_executing_work( let (item, _) = work_items.pop().expect("queue empty - queue_full_enough() broken?"); main_thread_state = MainThreadState::Lending; - spawn_work(&cgcx, coordinator_send.clone(), &mut llvm_start_time, item); + spawn_work( + &cgcx, + shared_emitter.clone(), + coordinator_send.clone(), + &mut llvm_start_time, + item, + ); } } } else if codegen_state == Completed { @@ -1560,7 +1575,13 @@ fn start_executing_work( MainThreadState::Idle => { if let Some((item, _)) = work_items.pop() { main_thread_state = MainThreadState::Lending; - spawn_work(&cgcx, coordinator_send.clone(), &mut llvm_start_time, item); + spawn_work( + &cgcx, + shared_emitter.clone(), + coordinator_send.clone(), + &mut llvm_start_time, + item, + ); } else { // There is no unstarted work, so let the main thread // take over for a running worker. Otherwise the @@ -1596,7 +1617,13 @@ fn start_executing_work( while running_with_own_token < tokens.len() && let Some((item, _)) = work_items.pop() { - spawn_work(&cgcx, coordinator_send.clone(), &mut llvm_start_time, item); + spawn_work( + &cgcx, + shared_emitter.clone(), + coordinator_send.clone(), + &mut llvm_start_time, + item, + ); running_with_own_token += 1; } } @@ -1732,36 +1759,51 @@ fn start_executing_work( assert!(compiled_modules.is_empty()); assert!(needs_thin_lto.is_empty()); - // This uses the implicit token - let module = do_fat_lto( - &cgcx, - &exported_symbols_for_lto, - &each_linked_rlib_file_for_lto, + if let Some(allocator_module) = allocator_module.take() { + needs_fat_lto.push(FatLtoInput::InMemory(allocator_module)); + } + + return Ok(MaybeLtoModules::FatLto { + cgcx, + exported_symbols_for_lto, + each_linked_rlib_file_for_lto, needs_fat_lto, lto_import_only_modules, - ); - compiled_modules.push(module); + }); } else if !needs_thin_lto.is_empty() || !lto_import_only_modules.is_empty() { assert!(compiled_modules.is_empty()); assert!(needs_fat_lto.is_empty()); - compiled_modules.extend(do_thin_lto( - &cgcx, - exported_symbols_for_lto, - each_linked_rlib_file_for_lto, - needs_thin_lto, - lto_import_only_modules, - )); - } + if cgcx.lto == Lto::ThinLocal { + compiled_modules.extend(do_thin_lto( + &cgcx, + shared_emitter.clone(), + exported_symbols_for_lto, + each_linked_rlib_file_for_lto, + needs_thin_lto, + lto_import_only_modules, + )); + } else { + if let Some(allocator_module) = allocator_module.take() { + let (name, thin_buffer) = B::prepare_thin(allocator_module); + needs_thin_lto.push((name, thin_buffer)); + } - // Regardless of what order these modules completed in, report them to - // the backend in the same order every time to ensure that we're handing - // out deterministic results. - compiled_modules.sort_by(|a, b| a.name.cmp(&b.name)); + return Ok(MaybeLtoModules::ThinLto { + cgcx, + exported_symbols_for_lto, + each_linked_rlib_file_for_lto, + needs_thin_lto, + lto_import_only_modules, + }); + } + } - Ok(CompiledModules { + Ok(MaybeLtoModules::NoLto { modules: compiled_modules, - allocator_module: compiled_allocator_module, + allocator_module: allocator_module.map(|allocator_module| { + B::codegen(&cgcx, &shared_emitter, allocator_module, &allocator_config) + }), }) }) .expect("failed to spawn coordinator thread"); @@ -1830,6 +1872,7 @@ pub(crate) struct WorkerFatalError; fn spawn_work<'a, B: ExtraBackendMethods>( cgcx: &'a CodegenContext, + shared_emitter: SharedEmitter, coordinator_send: Sender>, llvm_start_time: &mut Option>, work: WorkItem, @@ -1842,10 +1885,10 @@ fn spawn_work<'a, B: ExtraBackendMethods>( B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || { let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work { - WorkItem::Optimize(m) => execute_optimize_work_item(&cgcx, m), - WorkItem::CopyPostLtoArtifacts(m) => { - WorkItemResult::Finished(execute_copy_from_cache_work_item(&cgcx, m)) - } + WorkItem::Optimize(m) => execute_optimize_work_item(&cgcx, shared_emitter, m), + WorkItem::CopyPostLtoArtifacts(m) => WorkItemResult::Finished( + execute_copy_from_cache_work_item(&cgcx, shared_emitter, m), + ), })); let msg = match result { @@ -1867,6 +1910,7 @@ fn spawn_work<'a, B: ExtraBackendMethods>( fn spawn_thin_lto_work<'a, B: ExtraBackendMethods>( cgcx: &'a CodegenContext, + shared_emitter: SharedEmitter, coordinator_send: Sender, work: ThinLtoWorkItem, ) { @@ -1874,8 +1918,10 @@ fn spawn_thin_lto_work<'a, B: ExtraBackendMethods>( B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || { let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work { - ThinLtoWorkItem::CopyPostLtoArtifacts(m) => execute_copy_from_cache_work_item(&cgcx, m), - ThinLtoWorkItem::ThinLto(m) => execute_thin_lto_work_item(&cgcx, m), + ThinLtoWorkItem::CopyPostLtoArtifacts(m) => { + execute_copy_from_cache_work_item(&cgcx, shared_emitter, m) + } + ThinLtoWorkItem::ThinLto(m) => execute_thin_lto_work_item(&cgcx, shared_emitter, m), })); let msg = match result { @@ -1897,10 +1943,17 @@ fn spawn_thin_lto_work<'a, B: ExtraBackendMethods>( enum SharedEmitterMessage { Diagnostic(Diagnostic), - InlineAsmError(SpanData, String, Level, Option<(String, Vec)>), + InlineAsmError(InlineAsmError), Fatal(String), } +pub struct InlineAsmError { + pub span: SpanData, + pub msg: String, + pub level: Level, + pub source: Option<(String, Vec)>, +} + #[derive(Clone)] pub struct SharedEmitter { sender: Sender, @@ -1917,14 +1970,8 @@ impl SharedEmitter { (SharedEmitter { sender }, SharedEmitterMain { receiver }) } - pub fn inline_asm_error( - &self, - span: SpanData, - msg: String, - level: Level, - source: Option<(String, Vec)>, - ) { - drop(self.sender.send(SharedEmitterMessage::InlineAsmError(span, msg, level, source))); + pub fn inline_asm_error(&self, err: InlineAsmError) { + drop(self.sender.send(SharedEmitterMessage::InlineAsmError(err))); } fn fatal(&self, msg: &str) { @@ -1940,7 +1987,7 @@ impl Emitter for SharedEmitter { ) { // Check that we aren't missing anything interesting when converting to // the cut-down local `DiagInner`. - assert_eq!(diag.span, MultiSpan::new()); + assert!(!diag.span.has_span_labels()); assert_eq!(diag.suggestions, Suggestions::Enabled(vec![])); assert_eq!(diag.sort_span, rustc_span::DUMMY_SP); assert_eq!(diag.is_lint, None); @@ -1949,6 +1996,7 @@ impl Emitter for SharedEmitter { let args = mem::replace(&mut diag.args, DiagArgMap::default()); drop( self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { + span: diag.span.primary_spans().iter().map(|span| span.data()).collect::>(), level: diag.level(), messages: diag.messages, code: diag.code, @@ -1993,6 +2041,9 @@ impl SharedEmitterMain { let dcx = sess.dcx(); let mut d = rustc_errors::DiagInner::new_with_messages(diag.level, diag.messages); + d.span = MultiSpan::from_spans( + diag.span.into_iter().map(|span| span.span()).collect(), + ); d.code = diag.code; // may be `None`, that's ok d.children = diag .children @@ -2007,15 +2058,15 @@ impl SharedEmitterMain { dcx.emit_diagnostic(d); sess.dcx().abort_if_errors(); } - Ok(SharedEmitterMessage::InlineAsmError(span, msg, level, source)) => { - assert_matches!(level, Level::Error | Level::Warning | Level::Note); - let mut err = Diag::<()>::new(sess.dcx(), level, msg); - if !span.is_dummy() { - err.span(span.span()); + Ok(SharedEmitterMessage::InlineAsmError(inner)) => { + assert_matches!(inner.level, Level::Error | Level::Warning | Level::Note); + let mut err = Diag::<()>::new(sess.dcx(), inner.level, inner.msg); + if !inner.span.is_dummy() { + err.span(inner.span.span()); } // Point to the generated assembly if it is available. - if let Some((buffer, spans)) = source { + if let Some((buffer, spans)) = inner.source { let source = sess .source_map() .new_source_file(FileName::inline_asm_source_code(&buffer), buffer); @@ -2046,13 +2097,13 @@ impl SharedEmitterMain { pub struct Coordinator { sender: Sender>, - future: Option>>, + future: Option, ()>>>, // Only used for the Message type. phantom: PhantomData, } impl Coordinator { - fn join(mut self) -> std::thread::Result> { + fn join(mut self) -> std::thread::Result, ()>> { self.future.take().unwrap().join() } } @@ -2083,8 +2134,9 @@ pub struct OngoingCodegen { impl OngoingCodegen { pub fn join(self, sess: &Session) -> (CodegenResults, FxIndexMap) { self.shared_emitter_main.check(sess, true); - let compiled_modules = sess.time("join_worker_thread", || match self.coordinator.join() { - Ok(Ok(compiled_modules)) => compiled_modules, + + let maybe_lto_modules = sess.time("join_worker_thread", || match self.coordinator.join() { + Ok(Ok(maybe_lto_modules)) => maybe_lto_modules, Ok(Err(())) => { sess.dcx().abort_if_errors(); panic!("expected abort due to worker thread errors") @@ -2096,6 +2148,62 @@ impl OngoingCodegen { sess.dcx().abort_if_errors(); + let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); + + // Catch fatal errors to ensure shared_emitter_main.check() can emit the actual diagnostics + let compiled_modules = catch_fatal_errors(|| match maybe_lto_modules { + MaybeLtoModules::NoLto { modules, allocator_module } => { + drop(shared_emitter); + CompiledModules { modules, allocator_module } + } + MaybeLtoModules::FatLto { + cgcx, + exported_symbols_for_lto, + each_linked_rlib_file_for_lto, + needs_fat_lto, + lto_import_only_modules, + } => CompiledModules { + modules: vec![do_fat_lto( + &cgcx, + shared_emitter, + &exported_symbols_for_lto, + &each_linked_rlib_file_for_lto, + needs_fat_lto, + lto_import_only_modules, + )], + allocator_module: None, + }, + MaybeLtoModules::ThinLto { + cgcx, + exported_symbols_for_lto, + each_linked_rlib_file_for_lto, + needs_thin_lto, + lto_import_only_modules, + } => CompiledModules { + modules: do_thin_lto( + &cgcx, + shared_emitter, + exported_symbols_for_lto, + each_linked_rlib_file_for_lto, + needs_thin_lto, + lto_import_only_modules, + ), + allocator_module: None, + }, + }); + + shared_emitter_main.check(sess, true); + + sess.dcx().abort_if_errors(); + + let mut compiled_modules = + compiled_modules.expect("fatal error emitted but not sent to SharedEmitter"); + + // Regardless of what order these modules completed in, report them to + // the backend in the same order every time to ensure that we're handing + // out deterministic results. + compiled_modules.modules.sort_by(|a, b| a.name.cmp(&b.name)); + let work_products = copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, &compiled_modules); produce_final_output_artifacts(sess, &compiled_modules, &self.output_filenames); diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index 414e9ce1c821c..2943df8f02aae 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -49,9 +49,7 @@ use crate::meth::load_vtable; use crate::mir::operand::OperandValue; use crate::mir::place::PlaceRef; use crate::traits::*; -use crate::{ - CachedModuleCodegen, CodegenLintLevels, CrateInfo, ModuleCodegen, ModuleKind, errors, meth, mir, -}; +use crate::{CachedModuleCodegen, CodegenLintLevels, CrateInfo, ModuleCodegen, errors, meth, mir}; pub(crate) fn bin_op_to_icmp_predicate(op: BinOp, signed: bool) -> IntPredicate { match (op, signed) { @@ -1134,9 +1132,8 @@ pub fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> // reuse pre-LTO artifacts match compute_per_cgu_lto_type( &tcx.sess.lto(), - &tcx.sess.opts, + tcx.sess.opts.cg.linker_plugin_lto.enabled(), tcx.crate_types(), - ModuleKind::Regular, ) { ComputedLtoType::No => CguReuse::PostLto, _ => CguReuse::PreLto, diff --git a/compiler/rustc_codegen_ssa/src/traits/write.rs b/compiler/rustc_codegen_ssa/src/traits/write.rs index 1ac1d7ef2e2eb..e1d23841118cb 100644 --- a/compiler/rustc_codegen_ssa/src/traits/write.rs +++ b/compiler/rustc_codegen_ssa/src/traits/write.rs @@ -4,7 +4,7 @@ use rustc_errors::DiagCtxtHandle; use rustc_middle::dep_graph::WorkProduct; use crate::back::lto::{SerializedModule, ThinModule}; -use crate::back::write::{CodegenContext, FatLtoInput, ModuleConfig}; +use crate::back::write::{CodegenContext, FatLtoInput, ModuleConfig, SharedEmitter}; use crate::{CompiledModule, ModuleCodegen}; pub trait WriteBackendMethods: Clone + 'static { @@ -19,6 +19,7 @@ pub trait WriteBackendMethods: Clone + 'static { /// if necessary and running any further optimizations fn run_and_optimize_fat_lto( cgcx: &CodegenContext, + shared_emitter: &SharedEmitter, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], modules: Vec>, @@ -28,6 +29,7 @@ pub trait WriteBackendMethods: Clone + 'static { /// can simply be copied over from the incr. comp. cache. fn run_thin_lto( cgcx: &CodegenContext, + dcx: DiagCtxtHandle<'_>, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], modules: Vec<(String, Self::ThinBuffer)>, @@ -37,16 +39,18 @@ pub trait WriteBackendMethods: Clone + 'static { fn print_statistics(&self); fn optimize( cgcx: &CodegenContext, - dcx: DiagCtxtHandle<'_>, + shared_emitter: &SharedEmitter, module: &mut ModuleCodegen, config: &ModuleConfig, ); fn optimize_thin( cgcx: &CodegenContext, + shared_emitter: &SharedEmitter, thin: ThinModule, ) -> ModuleCodegen; fn codegen( cgcx: &CodegenContext, + shared_emitter: &SharedEmitter, module: ModuleCodegen, config: &ModuleConfig, ) -> CompiledModule; diff --git a/compiler/rustc_driver_impl/src/lib.rs b/compiler/rustc_driver_impl/src/lib.rs index 9a3d7cc506cf7..cf5a5fdd234c0 100644 --- a/compiler/rustc_driver_impl/src/lib.rs +++ b/compiler/rustc_driver_impl/src/lib.rs @@ -19,7 +19,7 @@ use std::ffi::OsString; use std::fmt::Write as _; use std::fs::{self, File}; use std::io::{self, IsTerminal, Read, Write}; -use std::panic::{self, PanicHookInfo, catch_unwind}; +use std::panic::{self, PanicHookInfo}; use std::path::{Path, PathBuf}; use std::process::{self, Command, Stdio}; use std::sync::OnceLock; @@ -33,10 +33,11 @@ use rustc_codegen_ssa::{CodegenErrors, CodegenResults}; use rustc_data_structures::profiling::{ TimePassesFormat, get_resident_set_size, print_time_passes_entry, }; +pub use rustc_errors::catch_fatal_errors; use rustc_errors::emitter::stderr_destination; use rustc_errors::registry::Registry; use rustc_errors::translation::Translator; -use rustc_errors::{ColorConfig, DiagCtxt, ErrCode, FatalError, PResult, markdown}; +use rustc_errors::{ColorConfig, DiagCtxt, ErrCode, PResult, markdown}; use rustc_feature::find_gated_cfg; // This avoids a false positive with `-Wunused_crate_dependencies`. // `rust_index` isn't used in this crate's code, but it must be named in the @@ -1306,21 +1307,6 @@ fn parse_crate_attrs<'a>(sess: &'a Session) -> PResult<'a, ast::AttrVec> { parser.parse_inner_attributes() } -/// Runs a closure and catches unwinds triggered by fatal errors. -/// -/// The compiler currently unwinds with a special sentinel value to abort -/// compilation on fatal errors. This function catches that sentinel and turns -/// the panic into a `Result` instead. -pub fn catch_fatal_errors R, R>(f: F) -> Result { - catch_unwind(panic::AssertUnwindSafe(f)).map_err(|value| { - if value.is::() { - FatalError - } else { - panic::resume_unwind(value); - } - }) -} - /// Variant of `catch_fatal_errors` for the `interface::Result` return type /// that also computes the exit code. pub fn catch_with_exit_code(f: impl FnOnce()) -> i32 { diff --git a/compiler/rustc_error_messages/src/lib.rs b/compiler/rustc_error_messages/src/lib.rs index 64dcf3c1f72d8..085403c8ef36b 100644 --- a/compiler/rustc_error_messages/src/lib.rs +++ b/compiler/rustc_error_messages/src/lib.rs @@ -455,10 +455,6 @@ impl MultiSpan { replacements_occurred } - pub fn pop_span_label(&mut self) -> Option<(Span, DiagMessage)> { - self.span_labels.pop() - } - /// Returns the strings to highlight. We always ensure that there /// is an entry for each of the primary spans -- for each primary /// span `P`, if there is at least one label with span `P`, we return diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs index bbdda155496fa..ada32bf0c2839 100644 --- a/compiler/rustc_errors/src/lib.rs +++ b/compiler/rustc_errors/src/lib.rs @@ -68,7 +68,7 @@ pub use rustc_lint_defs::{Applicability, listify, pluralize}; use rustc_lint_defs::{Lint, LintExpectationId}; use rustc_macros::{Decodable, Encodable}; pub use rustc_span::ErrorGuaranteed; -pub use rustc_span::fatal_error::{FatalError, FatalErrorMarker}; +pub use rustc_span::fatal_error::{FatalError, FatalErrorMarker, catch_fatal_errors}; use rustc_span::source_map::SourceMap; use rustc_span::{BytePos, DUMMY_SP, Loc, Span}; pub use snippet::Style; diff --git a/compiler/rustc_span/src/fatal_error.rs b/compiler/rustc_span/src/fatal_error.rs index 26c5711099c6d..5e2d82681a11a 100644 --- a/compiler/rustc_span/src/fatal_error.rs +++ b/compiler/rustc_span/src/fatal_error.rs @@ -3,6 +3,8 @@ #[must_use] pub struct FatalError; +use std::panic; + pub use rustc_data_structures::FatalErrorMarker; // Don't implement Send on FatalError. This makes it impossible to `panic_any!(FatalError)`. @@ -22,3 +24,18 @@ impl std::fmt::Display for FatalError { } impl std::error::Error for FatalError {} + +/// Runs a closure and catches unwinds triggered by fatal errors. +/// +/// The compiler currently unwinds with a special sentinel value to abort +/// compilation on fatal errors. This function catches that sentinel and turns +/// the panic into a `Result` instead. +pub fn catch_fatal_errors R, R>(f: F) -> Result { + panic::catch_unwind(panic::AssertUnwindSafe(f)).map_err(|value| { + if value.is::() { + FatalError + } else { + panic::resume_unwind(value); + } + }) +}