diff --git a/compiler/rustc_const_eval/messages.ftl b/compiler/rustc_const_eval/messages.ftl index 3a2b3f8843191..38ab46a7bb5b1 100644 --- a/compiler/rustc_const_eval/messages.ftl +++ b/compiler/rustc_const_eval/messages.ftl @@ -473,7 +473,6 @@ const_eval_validation_invalid_ref_meta = {$front_matter}: encountered invalid re const_eval_validation_invalid_ref_slice_meta = {$front_matter}: encountered invalid reference metadata: slice is bigger than largest supported object const_eval_validation_invalid_vtable_ptr = {$front_matter}: encountered {$value}, but expected a vtable pointer const_eval_validation_invalid_vtable_trait = {$front_matter}: wrong trait in wide pointer vtable: expected `{$expected_dyn_type}`, but encountered `{$vtable_dyn_type}` -const_eval_validation_mutable_ref_in_const = {$front_matter}: encountered mutable reference in `const` value const_eval_validation_mutable_ref_to_immutable = {$front_matter}: encountered mutable reference or box pointing to read-only memory const_eval_validation_never_val = {$front_matter}: encountered a value of the never type `!` const_eval_validation_nonnull_ptr_out_of_range = {$front_matter}: encountered a maybe-null pointer, but expected something that is definitely non-zero diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs index a0958a2b9ef3a..50f5448ec20ad 100644 --- a/compiler/rustc_const_eval/src/errors.rs +++ b/compiler/rustc_const_eval/src/errors.rs @@ -665,7 +665,6 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> { PointerAsInt { .. } => const_eval_validation_pointer_as_int, PartialPointer => const_eval_validation_partial_pointer, MutableRefToImmutable => const_eval_validation_mutable_ref_to_immutable, - MutableRefInConst => const_eval_validation_mutable_ref_in_const, NullFnPtr { .. } => const_eval_validation_null_fn_ptr, NeverVal => const_eval_validation_never_val, NonnullPtrMaybeNull { .. } => const_eval_validation_nonnull_ptr_out_of_range, @@ -824,7 +823,6 @@ impl<'tcx> ReportErrorExt for ValidationErrorInfo<'tcx> { err.arg("maybe", maybe); } MutableRefToImmutable - | MutableRefInConst | NonnullPtrMaybeNull | NeverVal | UnsafeCellInImmutable diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index 89a3303eb3902..34296b6d8de34 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -639,12 +639,6 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { // This can actually occur with transmutes. throw_validation_failure!(self.path, MutableRefToImmutable); } - // In a const, any kind of mutable reference is not good. - if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { .. })) { - if ptr_expected_mutbl == Mutability::Mut { - throw_validation_failure!(self.path, MutableRefInConst); - } - } } } // Potentially skip recursive check. diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs index 976c209977b06..66c928f518aa3 100644 --- a/compiler/rustc_middle/src/mir/interpret/error.rs +++ b/compiler/rustc_middle/src/mir/interpret/error.rs @@ -496,7 +496,6 @@ pub enum ValidationErrorKind<'tcx> { }, MutableRefToImmutable, UnsafeCellInImmutable, - MutableRefInConst, NullFnPtr { /// Records whether this pointer is definitely null or just may be null. maybe: bool, diff --git a/compiler/rustc_resolve/src/diagnostics.rs b/compiler/rustc_resolve/src/diagnostics.rs index 2f4a18f9cfa6b..fe299a6cebca7 100644 --- a/compiler/rustc_resolve/src/diagnostics.rs +++ b/compiler/rustc_resolve/src/diagnostics.rs @@ -1,3 +1,5 @@ +use std::ops::ControlFlow; + use itertools::Itertools as _; use rustc_ast::visit::{self, Visitor}; use rustc_ast::{ @@ -1261,7 +1263,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { } } - None::<()> + ControlFlow::<()>::Continue(()) }); } diff --git a/compiler/rustc_resolve/src/ident.rs b/compiler/rustc_resolve/src/ident.rs index 8ecae07dea67d..f59b5a0aad9a7 100644 --- a/compiler/rustc_resolve/src/ident.rs +++ b/compiler/rustc_resolve/src/ident.rs @@ -1,3 +1,5 @@ +use std::ops::ControlFlow; + use Determinacy::*; use Namespace::*; use rustc_ast::{self as ast, NodeId}; @@ -20,7 +22,7 @@ use crate::{ AmbiguityError, AmbiguityErrorMisc, AmbiguityKind, BindingKey, CmResolver, Determinacy, Finalize, ImportKind, LexicalScopeBinding, Module, ModuleKind, ModuleOrUniformRoot, NameBinding, NameBindingKind, ParentScope, PathResult, PrivacyError, Res, ResolutionError, - Resolver, Scope, ScopeSet, Segment, Stage, Used, Weak, errors, + Resolver, Scope, ScopeSet, Segment, Stage, Used, errors, }; #[derive(Copy, Clone)] @@ -41,6 +43,17 @@ enum Shadowing { Unrestricted, } +bitflags::bitflags! { + #[derive(Clone, Copy)] + struct Flags: u8 { + const MACRO_RULES = 1 << 0; + const MODULE = 1 << 1; + const MISC_SUGGEST_CRATE = 1 << 2; + const MISC_SUGGEST_SELF = 1 << 3; + const MISC_FROM_PRELUDE = 1 << 4; + } +} + impl<'ra, 'tcx> Resolver<'ra, 'tcx> { /// A generic scope visitor. /// Visits scopes in order to resolve some identifier in them or perform other actions. @@ -56,7 +69,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { Scope<'ra>, UsePrelude, SyntaxContext, - ) -> Option, + ) -> ControlFlow, ) -> Option { // General principles: // 1. Not controlled (user-defined) names should have higher priority than controlled names @@ -156,8 +169,10 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { if visit { let use_prelude = if use_prelude { UsePrelude::Yes } else { UsePrelude::No }; - if let break_result @ Some(..) = visitor(&mut self, scope, use_prelude, ctxt) { - return break_result; + if let ControlFlow::Break(break_result) = + visitor(&mut self, scope, use_prelude, ctxt) + { + return Some(break_result); } } @@ -333,7 +348,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { ))); } else if let RibKind::Block(Some(module)) = rib.kind && let Ok(binding) = self.cm().resolve_ident_in_module_unadjusted( - ModuleOrUniformRoot::Module(module), + module, ident, ns, parent_scope, @@ -388,17 +403,6 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { ignore_binding: Option>, ignore_import: Option>, ) -> Result, Determinacy> { - bitflags::bitflags! { - #[derive(Clone, Copy)] - struct Flags: u8 { - const MACRO_RULES = 1 << 0; - const MODULE = 1 << 1; - const MISC_SUGGEST_CRATE = 1 << 2; - const MISC_SUGGEST_SELF = 1 << 3; - const MISC_FROM_PRELUDE = 1 << 4; - } - } - assert!(force || finalize.is_none()); // `finalize` implies `force` // Make sure `self`, `super` etc produce an error when passed to here. @@ -411,6 +415,10 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { ScopeSet::ExternPrelude => (TypeNS, None), ScopeSet::Macro(macro_kind) => (MacroNS, Some(macro_kind)), }; + let derive_fallback_lint_id = match finalize { + Some(Finalize { node_id, stage: Stage::Late, .. }) => Some(node_id), + _ => None, + }; // This is *the* result, resolution from the scope closest to the resolved identifier. // However, sometimes this result is "weak" because it comes from a glob import or @@ -427,337 +435,375 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { let mut determinacy = Determinacy::Determined; let mut extern_prelude_item_binding = None; let mut extern_prelude_flag_binding = None; - // Shadowed bindings don't need to be marked as used or non-speculatively loaded. - macro finalize_scope() { - if innermost_result.is_none() { finalize } else { None } - } // Go through all the scopes and try to resolve the name. - let derive_fallback_lint_id = match finalize { - Some(Finalize { node_id, stage: Stage::Late, .. }) => Some(node_id), - _ => None, - }; let break_result = self.visit_scopes( scope_set, parent_scope, orig_ident.span.ctxt(), derive_fallback_lint_id, |this, scope, use_prelude, ctxt| { - let ident = Ident::new(orig_ident.name, orig_ident.span.with_ctxt(ctxt)); - let result = match scope { - Scope::DeriveHelpers(expn_id) => { - if let Some(binding) = this.helper_attrs.get(&expn_id).and_then(|attrs| { - attrs.iter().rfind(|(i, _)| ident == *i).map(|(_, binding)| *binding) - }) { - Ok((binding, Flags::empty())) - } else { - Err(Determinacy::Determined) - } - } - Scope::DeriveHelpersCompat => { - let mut result = Err(Determinacy::Determined); - for derive in parent_scope.derives { - let parent_scope = &ParentScope { derives: &[], ..*parent_scope }; - match this.reborrow().resolve_derive_macro_path( - derive, - parent_scope, - force, - ignore_import, - ) { - Ok((Some(ext), _)) => { - if ext.helper_attrs.contains(&ident.name) { - let binding = this.arenas.new_pub_res_binding( - Res::NonMacroAttr(NonMacroAttrKind::DeriveHelperCompat), - derive.span, - LocalExpnId::ROOT, - ); - result = Ok((binding, Flags::empty())); - break; - } - } - Ok(_) | Err(Determinacy::Determined) => {} - Err(Determinacy::Undetermined) => { - result = Err(Determinacy::Undetermined) - } - } - } - result - } - Scope::MacroRules(macro_rules_scope) => match macro_rules_scope.get() { - MacroRulesScope::Binding(macro_rules_binding) - if ident == macro_rules_binding.ident => - { - Ok((macro_rules_binding.binding, Flags::MACRO_RULES)) - } - MacroRulesScope::Invocation(_) => Err(Determinacy::Undetermined), - _ => Err(Determinacy::Determined), - }, - Scope::Module(module, derive_fallback_lint_id) => { - let (adjusted_parent_scope, adjusted_finalize) = - if matches!(scope_set, ScopeSet::ModuleAndExternPrelude(..)) { - (parent_scope, finalize_scope!()) - } else { - ( - &ParentScope { module, ..*parent_scope }, - finalize_scope!().map(|f| Finalize { used: Used::Scope, ..f }), - ) - }; - let binding = this.reborrow().resolve_ident_in_module_unadjusted( - ModuleOrUniformRoot::Module(module), - ident, - ns, - adjusted_parent_scope, - Shadowing::Restricted, - adjusted_finalize, - ignore_binding, - ignore_import, - ); - match binding { - Ok(binding) => { - if let Some(lint_id) = derive_fallback_lint_id { - this.get_mut().lint_buffer.buffer_lint( - PROC_MACRO_DERIVE_RESOLUTION_FALLBACK, - lint_id, - orig_ident.span, - errors::ProcMacroDeriveResolutionFallback { - span: orig_ident.span, - ns_descr: ns.descr(), - ident, - }, - ); - } - let misc_flags = if module == this.graph_root { - Flags::MISC_SUGGEST_CRATE - } else if module.is_normal() { - Flags::MISC_SUGGEST_SELF - } else { - Flags::empty() - }; - Ok((binding, Flags::MODULE | misc_flags)) - } - Err((Determinacy::Undetermined, Weak::No)) => { - return Some(Err(Determinacy::determined(force))); - } - Err((Determinacy::Undetermined, Weak::Yes)) => { - Err(Determinacy::Undetermined) - } - Err((Determinacy::Determined, _)) => Err(Determinacy::Determined), - } - } - Scope::MacroUsePrelude => { - match this.macro_use_prelude.get(&ident.name).cloned() { - Some(binding) => Ok((binding, Flags::MISC_FROM_PRELUDE)), - None => Err(Determinacy::determined( - this.graph_root.unexpanded_invocations.borrow().is_empty(), - )), - } - } - Scope::BuiltinAttrs => match this.builtin_attrs_bindings.get(&ident.name) { - Some(binding) => Ok((*binding, Flags::empty())), - None => Err(Determinacy::Determined), - }, - Scope::ExternPreludeItems => { - match this - .reborrow() - .extern_prelude_get_item(ident, finalize_scope!().is_some()) - { - Some(binding) => { - extern_prelude_item_binding = Some(binding); - Ok((binding, Flags::empty())) - } - None => Err(Determinacy::determined( - this.graph_root.unexpanded_invocations.borrow().is_empty(), - )), - } - } - Scope::ExternPreludeFlags => { - match this.extern_prelude_get_flag(ident, finalize_scope!().is_some()) { - Some(binding) => { - extern_prelude_flag_binding = Some(binding); - Ok((binding, Flags::empty())) - } - None => Err(Determinacy::Determined), - } - } - Scope::ToolPrelude => match this.registered_tool_bindings.get(&ident) { - Some(binding) => Ok((*binding, Flags::empty())), - None => Err(Determinacy::Determined), - }, - Scope::StdLibPrelude => { - let mut result = Err(Determinacy::Determined); - if let Some(prelude) = this.prelude - && let Ok(binding) = this.reborrow().resolve_ident_in_module_unadjusted( - ModuleOrUniformRoot::Module(prelude), - ident, - ns, - parent_scope, - Shadowing::Unrestricted, - None, - ignore_binding, - ignore_import, - ) - && (matches!(use_prelude, UsePrelude::Yes) - || this.is_builtin_macro(binding.res())) - { - result = Ok((binding, Flags::MISC_FROM_PRELUDE)); - } - - result - } - Scope::BuiltinTypes => match this.builtin_types_bindings.get(&ident.name) { - Some(binding) => { - if matches!(ident.name, sym::f16) - && !this.tcx.features().f16() - && !ident.span.allows_unstable(sym::f16) - && finalize_scope!().is_some() - { - feature_err( - this.tcx.sess, - sym::f16, - ident.span, - "the type `f16` is unstable", - ) - .emit(); - } - if matches!(ident.name, sym::f128) - && !this.tcx.features().f128() - && !ident.span.allows_unstable(sym::f128) - && finalize_scope!().is_some() - { - feature_err( - this.tcx.sess, - sym::f128, - ident.span, - "the type `f128` is unstable", - ) - .emit(); - } - Ok((*binding, Flags::empty())) - } - None => Err(Determinacy::Determined), - }, - }; - - match result { - Ok((binding, flags)) => { - if !sub_namespace_match(binding.macro_kinds(), macro_kind) { - return None; - } - + // We can break with an error at this step, it means we cannot determine the + // resolution right now, but we must block and wait until we can instead of + // considering outer scopes. + match this.reborrow().resolve_ident_in_scope( + orig_ident, + ns, + scope, + use_prelude, + ctxt, + scope_set, + parent_scope, + // Shadowed bindings don't need to be marked as used or non-speculatively loaded. + if innermost_result.is_none() { finalize } else { None }, + force, + ignore_binding, + ignore_import, + &mut extern_prelude_item_binding, + &mut extern_prelude_flag_binding, + )? { + Ok((binding, flags)) + if sub_namespace_match(binding.macro_kinds(), macro_kind) => + { // Below we report various ambiguity errors. // We do not need to report them if we are either in speculative resolution, // or in late resolution when everything is already imported and expanded // and no ambiguities exist. if matches!(finalize, None | Some(Finalize { stage: Stage::Late, .. })) { - return Some(Ok(binding)); + return ControlFlow::Break(Ok(binding)); } if let Some((innermost_binding, innermost_flags)) = innermost_result { // Found another solution, if the first one was "weak", report an error. - let (res, innermost_res) = (binding.res(), innermost_binding.res()); - if res != innermost_res { - let is_builtin = |res| { - matches!(res, Res::NonMacroAttr(NonMacroAttrKind::Builtin(..))) - }; - let derive_helper = - Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper); - let derive_helper_compat = - Res::NonMacroAttr(NonMacroAttrKind::DeriveHelperCompat); - - let ambiguity_error_kind = if is_builtin(innermost_res) - || is_builtin(res) - { - Some(AmbiguityKind::BuiltinAttr) - } else if innermost_res == derive_helper_compat - || res == derive_helper_compat && innermost_res != derive_helper - { - Some(AmbiguityKind::DeriveHelper) - } else if innermost_flags.contains(Flags::MACRO_RULES) - && flags.contains(Flags::MODULE) - && !this.disambiguate_macro_rules_vs_modularized( - innermost_binding, - binding, - ) - { - Some(AmbiguityKind::MacroRulesVsModularized) - } else if flags.contains(Flags::MACRO_RULES) - && innermost_flags.contains(Flags::MODULE) - { - // should be impossible because of visitation order in - // visit_scopes - // - // we visit all macro_rules scopes (e.g. textual scope macros) - // before we visit any modules (e.g. path-based scope macros) - span_bug!( - orig_ident.span, - "ambiguous scoped macro resolutions with path-based \ - scope resolution as first candidate" - ) - } else if innermost_binding.is_glob_import() { - Some(AmbiguityKind::GlobVsOuter) - } else if innermost_binding - .may_appear_after(parent_scope.expansion, binding) - { - Some(AmbiguityKind::MoreExpandedVsOuter) - } else { - None - }; - // Skip ambiguity errors for extern flag bindings "overridden" - // by extern item bindings. - // FIXME: Remove with lang team approval. - let issue_145575_hack = Some(binding) - == extern_prelude_flag_binding - && extern_prelude_item_binding.is_some() - && extern_prelude_item_binding != Some(innermost_binding); - if let Some(kind) = ambiguity_error_kind - && !issue_145575_hack - { - let misc = |f: Flags| { - if f.contains(Flags::MISC_SUGGEST_CRATE) { - AmbiguityErrorMisc::SuggestCrate - } else if f.contains(Flags::MISC_SUGGEST_SELF) { - AmbiguityErrorMisc::SuggestSelf - } else if f.contains(Flags::MISC_FROM_PRELUDE) { - AmbiguityErrorMisc::FromPrelude - } else { - AmbiguityErrorMisc::None - } - }; - this.get_mut().ambiguity_errors.push(AmbiguityError { - kind, - ident: orig_ident, - b1: innermost_binding, - b2: binding, - warning: false, - misc1: misc(innermost_flags), - misc2: misc(flags), - }); - return Some(Ok(innermost_binding)); - } + if this.get_mut().maybe_push_ambiguity( + orig_ident, + parent_scope, + binding, + innermost_binding, + flags, + innermost_flags, + extern_prelude_item_binding, + extern_prelude_flag_binding, + ) { + // No need to search for more potential ambiguities, one is enough. + return ControlFlow::Break(Ok(innermost_binding)); } } else { // Found the first solution. innermost_result = Some((binding, flags)); } } - Err(Determinacy::Determined) => {} + Ok(_) | Err(Determinacy::Determined) => {} Err(Determinacy::Undetermined) => determinacy = Determinacy::Undetermined, } - None + ControlFlow::Continue(()) }, ); + // Scope visiting returned some result early. if let Some(break_result) = break_result { return break_result; } - // The first found solution was the only one, return it. - if let Some((binding, _)) = innermost_result { - return Ok(binding); + // Scope visiting walked all the scopes and maybe found something in one of them. + match innermost_result { + Some((binding, _)) => Ok(binding), + None => Err(Determinacy::determined(determinacy == Determinacy::Determined || force)), } + } + + fn resolve_ident_in_scope<'r>( + mut self: CmResolver<'r, 'ra, 'tcx>, + orig_ident: Ident, + ns: Namespace, + scope: Scope<'ra>, + use_prelude: UsePrelude, + ctxt: SyntaxContext, + scope_set: ScopeSet<'ra>, + parent_scope: &ParentScope<'ra>, + finalize: Option, + force: bool, + ignore_binding: Option>, + ignore_import: Option>, + extern_prelude_item_binding: &mut Option>, + extern_prelude_flag_binding: &mut Option>, + ) -> ControlFlow< + Result, Determinacy>, + Result<(NameBinding<'ra>, Flags), Determinacy>, + > { + let ident = Ident::new(orig_ident.name, orig_ident.span.with_ctxt(ctxt)); + let ret = match scope { + Scope::DeriveHelpers(expn_id) => { + if let Some(binding) = self.helper_attrs.get(&expn_id).and_then(|attrs| { + attrs.iter().rfind(|(i, _)| ident == *i).map(|(_, binding)| *binding) + }) { + Ok((binding, Flags::empty())) + } else { + Err(Determinacy::Determined) + } + } + Scope::DeriveHelpersCompat => { + let mut result = Err(Determinacy::Determined); + for derive in parent_scope.derives { + let parent_scope = &ParentScope { derives: &[], ..*parent_scope }; + match self.reborrow().resolve_derive_macro_path( + derive, + parent_scope, + force, + ignore_import, + ) { + Ok((Some(ext), _)) => { + if ext.helper_attrs.contains(&ident.name) { + let binding = self.arenas.new_pub_res_binding( + Res::NonMacroAttr(NonMacroAttrKind::DeriveHelperCompat), + derive.span, + LocalExpnId::ROOT, + ); + result = Ok((binding, Flags::empty())); + break; + } + } + Ok(_) | Err(Determinacy::Determined) => {} + Err(Determinacy::Undetermined) => result = Err(Determinacy::Undetermined), + } + } + result + } + Scope::MacroRules(macro_rules_scope) => match macro_rules_scope.get() { + MacroRulesScope::Binding(macro_rules_binding) + if ident == macro_rules_binding.ident => + { + Ok((macro_rules_binding.binding, Flags::MACRO_RULES)) + } + MacroRulesScope::Invocation(_) => Err(Determinacy::Undetermined), + _ => Err(Determinacy::Determined), + }, + Scope::Module(module, derive_fallback_lint_id) => { + let (adjusted_parent_scope, adjusted_finalize) = + if matches!(scope_set, ScopeSet::ModuleAndExternPrelude(..)) { + (parent_scope, finalize) + } else { + ( + &ParentScope { module, ..*parent_scope }, + finalize.map(|f| Finalize { used: Used::Scope, ..f }), + ) + }; + let binding = self.reborrow().resolve_ident_in_module_unadjusted( + module, + ident, + ns, + adjusted_parent_scope, + Shadowing::Restricted, + adjusted_finalize, + ignore_binding, + ignore_import, + ); + match binding { + Ok(binding) => { + if let Some(lint_id) = derive_fallback_lint_id { + self.get_mut().lint_buffer.buffer_lint( + PROC_MACRO_DERIVE_RESOLUTION_FALLBACK, + lint_id, + orig_ident.span, + errors::ProcMacroDeriveResolutionFallback { + span: orig_ident.span, + ns_descr: ns.descr(), + ident, + }, + ); + } + let misc_flags = if module == self.graph_root { + Flags::MISC_SUGGEST_CRATE + } else if module.is_normal() { + Flags::MISC_SUGGEST_SELF + } else { + Flags::empty() + }; + Ok((binding, Flags::MODULE | misc_flags)) + } + Err(ControlFlow::Continue(determinacy)) => Err(determinacy), + Err(ControlFlow::Break(Determinacy::Undetermined)) => { + return ControlFlow::Break(Err(Determinacy::determined(force))); + } + // Privacy errors, do not happen during in scope resolution. + Err(ControlFlow::Break(Determinacy::Determined)) => unreachable!(), + } + } + Scope::MacroUsePrelude => match self.macro_use_prelude.get(&ident.name).cloned() { + Some(binding) => Ok((binding, Flags::MISC_FROM_PRELUDE)), + None => Err(Determinacy::determined( + self.graph_root.unexpanded_invocations.borrow().is_empty(), + )), + }, + Scope::BuiltinAttrs => match self.builtin_attrs_bindings.get(&ident.name) { + Some(binding) => Ok((*binding, Flags::empty())), + None => Err(Determinacy::Determined), + }, + Scope::ExternPreludeItems => { + match self.reborrow().extern_prelude_get_item(ident, finalize.is_some()) { + Some(binding) => { + *extern_prelude_item_binding = Some(binding); + Ok((binding, Flags::empty())) + } + None => Err(Determinacy::determined( + self.graph_root.unexpanded_invocations.borrow().is_empty(), + )), + } + } + Scope::ExternPreludeFlags => { + match self.extern_prelude_get_flag(ident, finalize.is_some()) { + Some(binding) => { + *extern_prelude_flag_binding = Some(binding); + Ok((binding, Flags::empty())) + } + None => Err(Determinacy::Determined), + } + } + Scope::ToolPrelude => match self.registered_tool_bindings.get(&ident) { + Some(binding) => Ok((*binding, Flags::empty())), + None => Err(Determinacy::Determined), + }, + Scope::StdLibPrelude => { + let mut result = Err(Determinacy::Determined); + if let Some(prelude) = self.prelude + && let Ok(binding) = self.reborrow().resolve_ident_in_module_unadjusted( + prelude, + ident, + ns, + parent_scope, + Shadowing::Unrestricted, + None, + ignore_binding, + ignore_import, + ) + && (matches!(use_prelude, UsePrelude::Yes) + || self.is_builtin_macro(binding.res())) + { + result = Ok((binding, Flags::MISC_FROM_PRELUDE)); + } - Err(Determinacy::determined(determinacy == Determinacy::Determined || force)) + result + } + Scope::BuiltinTypes => match self.builtin_types_bindings.get(&ident.name) { + Some(binding) => { + if matches!(ident.name, sym::f16) + && !self.tcx.features().f16() + && !ident.span.allows_unstable(sym::f16) + && finalize.is_some() + { + feature_err( + self.tcx.sess, + sym::f16, + ident.span, + "the type `f16` is unstable", + ) + .emit(); + } + if matches!(ident.name, sym::f128) + && !self.tcx.features().f128() + && !ident.span.allows_unstable(sym::f128) + && finalize.is_some() + { + feature_err( + self.tcx.sess, + sym::f128, + ident.span, + "the type `f128` is unstable", + ) + .emit(); + } + Ok((*binding, Flags::empty())) + } + None => Err(Determinacy::Determined), + }, + }; + + ControlFlow::Continue(ret) + } + + fn maybe_push_ambiguity( + &mut self, + orig_ident: Ident, + parent_scope: &ParentScope<'ra>, + binding: NameBinding<'ra>, + innermost_binding: NameBinding<'ra>, + flags: Flags, + innermost_flags: Flags, + extern_prelude_item_binding: Option>, + extern_prelude_flag_binding: Option>, + ) -> bool { + let (res, innermost_res) = (binding.res(), innermost_binding.res()); + if res == innermost_res { + return false; + } + + let is_builtin = |res| matches!(res, Res::NonMacroAttr(NonMacroAttrKind::Builtin(..))); + let derive_helper = Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper); + let derive_helper_compat = Res::NonMacroAttr(NonMacroAttrKind::DeriveHelperCompat); + + let ambiguity_error_kind = if is_builtin(innermost_res) || is_builtin(res) { + Some(AmbiguityKind::BuiltinAttr) + } else if innermost_res == derive_helper_compat + || res == derive_helper_compat && innermost_res != derive_helper + { + Some(AmbiguityKind::DeriveHelper) + } else if innermost_flags.contains(Flags::MACRO_RULES) + && flags.contains(Flags::MODULE) + && !self.disambiguate_macro_rules_vs_modularized(innermost_binding, binding) + { + Some(AmbiguityKind::MacroRulesVsModularized) + } else if flags.contains(Flags::MACRO_RULES) && innermost_flags.contains(Flags::MODULE) { + // should be impossible because of visitation order in + // visit_scopes + // + // we visit all macro_rules scopes (e.g. textual scope macros) + // before we visit any modules (e.g. path-based scope macros) + span_bug!( + orig_ident.span, + "ambiguous scoped macro resolutions with path-based \ + scope resolution as first candidate" + ) + } else if innermost_binding.is_glob_import() { + Some(AmbiguityKind::GlobVsOuter) + } else if innermost_binding.may_appear_after(parent_scope.expansion, binding) { + Some(AmbiguityKind::MoreExpandedVsOuter) + } else { + None + }; + // Skip ambiguity errors for extern flag bindings "overridden" + // by extern item bindings. + // FIXME: Remove with lang team approval. + let issue_145575_hack = Some(binding) == extern_prelude_flag_binding + && extern_prelude_item_binding.is_some() + && extern_prelude_item_binding != Some(innermost_binding); + if let Some(kind) = ambiguity_error_kind + && !issue_145575_hack + { + let misc = |f: Flags| { + if f.contains(Flags::MISC_SUGGEST_CRATE) { + AmbiguityErrorMisc::SuggestCrate + } else if f.contains(Flags::MISC_SUGGEST_SELF) { + AmbiguityErrorMisc::SuggestSelf + } else if f.contains(Flags::MISC_FROM_PRELUDE) { + AmbiguityErrorMisc::FromPrelude + } else { + AmbiguityErrorMisc::None + } + }; + self.ambiguity_errors.push(AmbiguityError { + kind, + ident: orig_ident, + b1: innermost_binding, + b2: binding, + warning: false, + misc1: misc(innermost_flags), + misc2: misc(flags), + }); + return true; + } + + false } #[instrument(level = "debug", skip(self))] @@ -770,7 +816,6 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { ignore_import: Option>, ) -> Result, Determinacy> { self.resolve_ident_in_module(module, ident, ns, parent_scope, None, None, ignore_import) - .map_err(|(determinacy, _)| determinacy) } #[instrument(level = "debug", skip(self))] @@ -783,7 +828,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { finalize: Option, ignore_binding: Option>, ignore_import: Option>, - ) -> Result, (Determinacy, Weak)> { + ) -> Result, Determinacy> { let tmp_parent_scope; let mut adjusted_parent_scope = parent_scope; match module { @@ -801,54 +846,56 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { // No adjustments } } - self.resolve_ident_in_module_unadjusted( + self.resolve_ident_in_virt_module_unadjusted( module, ident, ns, adjusted_parent_scope, - Shadowing::Unrestricted, finalize, ignore_binding, ignore_import, ) } - /// Attempts to resolve `ident` in namespaces `ns` of `module`. - /// Invariant: if `finalize` is `Some`, expansion and import resolution must be complete. + + /// Attempts to resolve `ident` in namespace `ns` of `module`. #[instrument(level = "debug", skip(self))] - fn resolve_ident_in_module_unadjusted<'r>( - mut self: CmResolver<'r, 'ra, 'tcx>, + fn resolve_ident_in_virt_module_unadjusted<'r>( + self: CmResolver<'r, 'ra, 'tcx>, module: ModuleOrUniformRoot<'ra>, ident: Ident, ns: Namespace, parent_scope: &ParentScope<'ra>, - shadowing: Shadowing, finalize: Option, - // This binding should be ignored during in-module resolution, so that we don't get - // "self-confirming" import resolutions during import validation and checking. ignore_binding: Option>, ignore_import: Option>, - ) -> Result, (Determinacy, Weak)> { - let module = match module { - ModuleOrUniformRoot::Module(module) => module, - ModuleOrUniformRoot::ModuleAndExternPrelude(module) => { - assert_eq!(shadowing, Shadowing::Unrestricted); - let binding = self.resolve_ident_in_scope_set( + ) -> Result, Determinacy> { + match module { + ModuleOrUniformRoot::Module(module) => self + .resolve_ident_in_module_unadjusted( + module, ident, - ScopeSet::ModuleAndExternPrelude(ns, module), + ns, parent_scope, + Shadowing::Unrestricted, finalize, - finalize.is_some(), ignore_binding, ignore_import, - ); - return binding.map_err(|determinacy| (determinacy, Weak::No)); - } + ) + .map_err(|determinacy| determinacy.into_value()), + ModuleOrUniformRoot::ModuleAndExternPrelude(module) => self.resolve_ident_in_scope_set( + ident, + ScopeSet::ModuleAndExternPrelude(ns, module), + parent_scope, + finalize, + finalize.is_some(), + ignore_binding, + ignore_import, + ), ModuleOrUniformRoot::ExternPrelude => { - assert_eq!(shadowing, Shadowing::Unrestricted); - return if ns != TypeNS { - Err((Determined, Weak::No)) + if ns != TypeNS { + Err(Determined) } else { - let binding = self.resolve_ident_in_scope_set( + self.resolve_ident_in_scope_set( ident, ScopeSet::ExternPrelude, parent_scope, @@ -856,12 +903,10 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { finalize.is_some(), ignore_binding, ignore_import, - ); - return binding.map_err(|determinacy| (determinacy, Weak::No)); - }; + ) + } } ModuleOrUniformRoot::CurrentScope => { - assert_eq!(shadowing, Shadowing::Unrestricted); if ns == TypeNS { if ident.name == kw::Crate || ident.name == kw::DollarCrate { let module = self.resolve_crate_root(ident); @@ -873,7 +918,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { } } - let binding = self.resolve_ident_in_scope_set( + self.resolve_ident_in_scope_set( ident, ScopeSet::All(ns), parent_scope, @@ -881,11 +926,65 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { finalize.is_some(), ignore_binding, ignore_import, - ); - return binding.map_err(|determinacy| (determinacy, Weak::No)); + ) } - }; + } + } + + /// Attempts to resolve `ident` in namespace `ns` of `module`. + fn resolve_ident_in_module_unadjusted<'r>( + mut self: CmResolver<'r, 'ra, 'tcx>, + module: Module<'ra>, + ident: Ident, + ns: Namespace, + parent_scope: &ParentScope<'ra>, + shadowing: Shadowing, + finalize: Option, + // This binding should be ignored during in-module resolution, so that we don't get + // "self-confirming" import resolutions during import validation and checking. + ignore_binding: Option>, + ignore_import: Option>, + ) -> Result, ControlFlow> { + let res = self.reborrow().resolve_ident_in_module_non_globs_unadjusted( + module, + ident, + ns, + parent_scope, + shadowing, + finalize, + ignore_binding, + ignore_import, + ); + + match res { + Ok(_) | Err(ControlFlow::Break(_)) => return res, + Err(ControlFlow::Continue(_)) => {} + } + + self.resolve_ident_in_module_globs_unadjusted( + module, + ident, + ns, + parent_scope, + shadowing, + finalize, + ignore_binding, + ignore_import, + ) + } + /// Attempts to resolve `ident` in namespace `ns` of non-glob bindings in `module`. + fn resolve_ident_in_module_non_globs_unadjusted<'r>( + mut self: CmResolver<'r, 'ra, 'tcx>, + module: Module<'ra>, + ident: Ident, + ns: Namespace, + parent_scope: &ParentScope<'ra>, + shadowing: Shadowing, + finalize: Option, + ignore_binding: Option>, + ignore_import: Option>, + ) -> Result, ControlFlow> { let key = BindingKey::new(ident, ns); // `try_borrow_mut` is required to ensure exclusive access, even if the resulting binding // doesn't need to be mutable. It will fail when there is a cycle of imports, and without @@ -893,15 +992,9 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { let resolution = &*self .resolution_or_default(module, key) .try_borrow_mut_unchecked() - .map_err(|_| (Determined, Weak::No))?; + .map_err(|_| ControlFlow::Continue(Determined))?; - // If the primary binding is unusable, search further and return the shadowed glob - // binding if it exists. What we really want here is having two separate scopes in - // a module - one for non-globs and one for globs, but until that's done use this - // hack to avoid inconsistent resolution ICEs during import validation. - let binding = [resolution.non_glob_binding, resolution.glob_binding] - .into_iter() - .find_map(|binding| if binding == ignore_binding { None } else { binding }); + let binding = resolution.non_glob_binding.filter(|b| Some(*b) != ignore_binding); if let Some(finalize) = finalize { return self.get_mut().finalize_module_binding( @@ -915,19 +1008,67 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { ); } - let check_usable = |this: CmResolver<'r, 'ra, 'tcx>, binding: NameBinding<'ra>| { - let usable = this.is_accessible_from(binding.vis, parent_scope.module); - if usable { Ok(binding) } else { Err((Determined, Weak::No)) } - }; - // Items and single imports are not shadowable, if we have one, then it's determined. - if let Some(binding) = binding - && !binding.is_glob_import() - { - return check_usable(self, binding); + if let Some(binding) = binding { + let accessible = self.is_accessible_from(binding.vis, parent_scope.module); + return if accessible { Ok(binding) } else { Err(ControlFlow::Break(Determined)) }; + } + + // Check if one of single imports can still define the name, block if it can. + if self.reborrow().single_import_can_define_name( + &resolution, + None, + ns, + ignore_import, + ignore_binding, + parent_scope, + ) { + return Err(ControlFlow::Break(Undetermined)); } - // --- From now on we either have a glob resolution or no resolution. --- + // Check if one of unexpanded macros can still define the name. + if !module.unexpanded_invocations.borrow().is_empty() { + return Err(ControlFlow::Continue(Undetermined)); + } + + // No resolution and no one else can define the name - determinate error. + Err(ControlFlow::Continue(Determined)) + } + + /// Attempts to resolve `ident` in namespace `ns` of glob bindings in `module`. + fn resolve_ident_in_module_globs_unadjusted<'r>( + mut self: CmResolver<'r, 'ra, 'tcx>, + module: Module<'ra>, + ident: Ident, + ns: Namespace, + parent_scope: &ParentScope<'ra>, + shadowing: Shadowing, + finalize: Option, + ignore_binding: Option>, + ignore_import: Option>, + ) -> Result, ControlFlow> { + let key = BindingKey::new(ident, ns); + // `try_borrow_mut` is required to ensure exclusive access, even if the resulting binding + // doesn't need to be mutable. It will fail when there is a cycle of imports, and without + // the exclusive access infinite recursion will crash the compiler with stack overflow. + let resolution = &*self + .resolution_or_default(module, key) + .try_borrow_mut_unchecked() + .map_err(|_| ControlFlow::Continue(Determined))?; + + let binding = resolution.glob_binding.filter(|b| Some(*b) != ignore_binding); + + if let Some(finalize) = finalize { + return self.get_mut().finalize_module_binding( + ident, + binding, + if resolution.non_glob_binding.is_some() { resolution.glob_binding } else { None }, + parent_scope, + module, + finalize, + shadowing, + ); + } // Check if one of single imports can still define the name, // if it can then our result is not determined and can be invalidated. @@ -939,7 +1080,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { ignore_binding, parent_scope, ) { - return Err((Undetermined, Weak::No)); + return Err(ControlFlow::Break(Undetermined)); } // So we have a resolution that's from a glob import. This resolution is determined @@ -955,25 +1096,23 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { // and prohibit access to macro-expanded `macro_export` macros instead (unless restricted // shadowing is enabled, see `macro_expanded_macro_export_errors`). if let Some(binding) = binding { - if binding.determined() || ns == MacroNS || shadowing == Shadowing::Restricted { - return check_usable(self, binding); + return if binding.determined() || ns == MacroNS || shadowing == Shadowing::Restricted { + let accessible = self.is_accessible_from(binding.vis, parent_scope.module); + if accessible { Ok(binding) } else { Err(ControlFlow::Break(Determined)) } } else { - return Err((Undetermined, Weak::No)); - } + Err(ControlFlow::Break(Undetermined)) + }; } - // --- From now on we have no resolution. --- - // Now we are in situation when new item/import can appear only from a glob or a macro // expansion. With restricted shadowing names from globs and macro expansions cannot // shadow names from outer scopes, so we can freely fallback from module search to search // in outer scopes. For `resolve_ident_in_scope_set` to continue search in outer - // scopes we return `Undetermined` with `Weak::Yes`. - + // scopes we return `Undetermined` with `ControlFlow::Continue`. // Check if one of unexpanded macros can still define the name, // if it can then our "no resolution" result is not determined and can be invalidated. if !module.unexpanded_invocations.borrow().is_empty() { - return Err((Undetermined, Weak::Yes)); + return Err(ControlFlow::Continue(Undetermined)); } // Check if one of glob imports can still define the name, @@ -988,7 +1127,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { let module = match glob_import.imported_module.get() { Some(ModuleOrUniformRoot::Module(module)) => module, Some(_) => continue, - None => return Err((Undetermined, Weak::Yes)), + None => return Err(ControlFlow::Continue(Undetermined)), }; let tmp_parent_scope; let (mut adjusted_parent_scope, mut ident) = @@ -1003,7 +1142,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { None => continue, }; let result = self.reborrow().resolve_ident_in_module_unadjusted( - ModuleOrUniformRoot::Module(module), + module, ident, ns, adjusted_parent_scope, @@ -1014,18 +1153,21 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { ); match result { - Err((Determined, _)) => continue, + Err(ControlFlow::Break(Determined) | ControlFlow::Continue(Determined)) => continue, Ok(binding) if !self.is_accessible_from(binding.vis, glob_import.parent_scope.module) => { continue; } - Ok(_) | Err((Undetermined, _)) => return Err((Undetermined, Weak::Yes)), + Ok(_) + | Err(ControlFlow::Break(Undetermined) | ControlFlow::Continue(Undetermined)) => { + return Err(ControlFlow::Continue(Undetermined)); + } } } // No resolution and no one else can define the name - determinate error. - Err((Determined, Weak::No)) + Err(ControlFlow::Continue(Determined)) } fn finalize_module_binding( @@ -1037,11 +1179,11 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { module: Module<'ra>, finalize: Finalize, shadowing: Shadowing, - ) -> Result, (Determinacy, Weak)> { + ) -> Result, ControlFlow> { let Finalize { path_span, report_private, used, root_span, .. } = finalize; let Some(binding) = binding else { - return Err((Determined, Weak::No)); + return Err(ControlFlow::Continue(Determined)); }; if !self.is_accessible_from(binding.vis, parent_scope.module) { @@ -1056,7 +1198,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { single_nested: path_span != root_span, }); } else { - return Err((Determined, Weak::No)); + return Err(ControlFlow::Break(Determined)); } } @@ -1169,15 +1311,13 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { ignore_binding, ignore_import, ) { - Err((Determined, _)) => continue, + Err(Determined) => continue, Ok(binding) if !self.is_accessible_from(binding.vis, single_import.parent_scope.module) => { continue; } - Ok(_) | Err((Undetermined, _)) => { - return true; - } + Ok(_) | Err(Undetermined) => return true, } } @@ -1694,17 +1834,15 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { } let binding = if let Some(module) = module { - self.reborrow() - .resolve_ident_in_module( - module, - ident, - ns, - parent_scope, - finalize, - ignore_binding, - ignore_import, - ) - .map_err(|(determinacy, _)| determinacy) + self.reborrow().resolve_ident_in_module( + module, + ident, + ns, + parent_scope, + finalize, + ignore_binding, + ignore_import, + ) } else if let Some(ribs) = ribs && let Some(TypeNS | ValueNS) = opt_ns { diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs index 7ce70ee9af8d4..16eeb9229c977 100644 --- a/compiler/rustc_resolve/src/lib.rs +++ b/compiler/rustc_resolve/src/lib.rs @@ -13,6 +13,7 @@ #![feature(arbitrary_self_types)] #![feature(assert_matches)] #![feature(box_patterns)] +#![feature(control_flow_into_value)] #![feature(decl_macro)] #![feature(default_field_values)] #![feature(if_let_guard)] @@ -26,6 +27,7 @@ use std::cell::Ref; use std::collections::BTreeSet; use std::fmt::{self}; +use std::ops::ControlFlow; use std::sync::Arc; use diagnostics::{ImportSuggestion, LabelSuggestion, Suggestion}; @@ -98,12 +100,6 @@ use crate::ref_mut::{CmCell, CmRefCell}; rustc_fluent_macro::fluent_messages! { "../messages.ftl" } -#[derive(Debug)] -enum Weak { - Yes, - No, -} - #[derive(Copy, Clone, PartialEq, Debug)] enum Determinacy { Determined, @@ -1917,7 +1913,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { | Scope::BuiltinTypes => {} _ => unreachable!(), } - None::<()> + ControlFlow::<()>::Continue(()) }); found_traits @@ -2489,6 +2485,7 @@ enum Stage { Late, } +/// Invariant: if `Finalize` is used, expansion and import resolution must be complete. #[derive(Copy, Clone, Debug)] struct Finalize { /// Node ID for linting. diff --git a/library/std/src/thread/builder.rs b/library/std/src/thread/builder.rs new file mode 100644 index 0000000000000..f4abe074ab9d7 --- /dev/null +++ b/library/std/src/thread/builder.rs @@ -0,0 +1,267 @@ +use super::join_handle::JoinHandle; +use super::lifecycle::spawn_unchecked; +use crate::io; + +/// Thread factory, which can be used in order to configure the properties of +/// a new thread. +/// +/// Methods can be chained on it in order to configure it. +/// +/// The two configurations available are: +/// +/// - [`name`]: specifies an [associated name for the thread][naming-threads] +/// - [`stack_size`]: specifies the [desired stack size for the thread][stack-size] +/// +/// The [`spawn`] method will take ownership of the builder and create an +/// [`io::Result`] to the thread handle with the given configuration. +/// +/// The [`thread::spawn`] free function uses a `Builder` with default +/// configuration and [`unwrap`]s its return value. +/// +/// You may want to use [`spawn`] instead of [`thread::spawn`], when you want +/// to recover from a failure to launch a thread, indeed the free function will +/// panic where the `Builder` method will return a [`io::Result`]. +/// +/// # Examples +/// +/// ``` +/// use std::thread; +/// +/// let builder = thread::Builder::new(); +/// +/// let handler = builder.spawn(|| { +/// // thread code +/// }).unwrap(); +/// +/// handler.join().unwrap(); +/// ``` +/// +/// [`stack_size`]: Builder::stack_size +/// [`name`]: Builder::name +/// [`spawn`]: Builder::spawn +/// [`thread::spawn`]: super::spawn +/// [`io::Result`]: crate::io::Result +/// [`unwrap`]: crate::result::Result::unwrap +/// [naming-threads]: ./index.html#naming-threads +/// [stack-size]: ./index.html#stack-size +#[must_use = "must eventually spawn the thread"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Debug)] +pub struct Builder { + /// A name for the thread-to-be, for identification in panic messages + pub(super) name: Option, + /// The size of the stack for the spawned thread in bytes + pub(super) stack_size: Option, + /// Skip running and inheriting the thread spawn hooks + pub(super) no_hooks: bool, +} + +impl Builder { + /// Generates the base configuration for spawning a thread, from which + /// configuration methods can be chained. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// + /// let builder = thread::Builder::new() + /// .name("foo".into()) + /// .stack_size(32 * 1024); + /// + /// let handler = builder.spawn(|| { + /// // thread code + /// }).unwrap(); + /// + /// handler.join().unwrap(); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn new() -> Builder { + Builder { name: None, stack_size: None, no_hooks: false } + } + + /// Names the thread-to-be. Currently the name is used for identification + /// only in panic messages. + /// + /// The name must not contain null bytes (`\0`). + /// + /// For more information about named threads, see + /// [this module-level documentation][naming-threads]. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// + /// let builder = thread::Builder::new() + /// .name("foo".into()); + /// + /// let handler = builder.spawn(|| { + /// assert_eq!(thread::current().name(), Some("foo")) + /// }).unwrap(); + /// + /// handler.join().unwrap(); + /// ``` + /// + /// [naming-threads]: ./index.html#naming-threads + #[stable(feature = "rust1", since = "1.0.0")] + pub fn name(mut self, name: String) -> Builder { + self.name = Some(name); + self + } + + /// Sets the size of the stack (in bytes) for the new thread. + /// + /// The actual stack size may be greater than this value if + /// the platform specifies a minimal stack size. + /// + /// For more information about the stack size for threads, see + /// [this module-level documentation][stack-size]. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// + /// let builder = thread::Builder::new().stack_size(32 * 1024); + /// ``` + /// + /// [stack-size]: ./index.html#stack-size + #[stable(feature = "rust1", since = "1.0.0")] + pub fn stack_size(mut self, size: usize) -> Builder { + self.stack_size = Some(size); + self + } + + /// Disables running and inheriting [spawn hooks]. + /// + /// Use this if the parent thread is in no way relevant for the child thread. + /// For example, when lazily spawning threads for a thread pool. + /// + /// [spawn hooks]: super::add_spawn_hook + #[unstable(feature = "thread_spawn_hook", issue = "132951")] + pub fn no_hooks(mut self) -> Builder { + self.no_hooks = true; + self + } + + /// Spawns a new thread by taking ownership of the `Builder`, and returns an + /// [`io::Result`] to its [`JoinHandle`]. + /// + /// The spawned thread may outlive the caller (unless the caller thread + /// is the main thread; the whole process is terminated when the main + /// thread finishes). The join handle can be used to block on + /// termination of the spawned thread, including recovering its panics. + /// + /// For a more complete documentation see [`thread::spawn`]. + /// + /// # Errors + /// + /// Unlike the [`spawn`] free function, this method yields an + /// [`io::Result`] to capture any failure to create the thread at + /// the OS level. + /// + /// [`io::Result`]: crate::io::Result + /// + /// # Panics + /// + /// Panics if a thread name was set and it contained null bytes. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// + /// let builder = thread::Builder::new(); + /// + /// let handler = builder.spawn(|| { + /// // thread code + /// }).unwrap(); + /// + /// handler.join().unwrap(); + /// ``` + /// + /// [`thread::spawn`]: super::spawn + /// [`spawn`]: super::spawn + #[stable(feature = "rust1", since = "1.0.0")] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub fn spawn(self, f: F) -> io::Result> + where + F: FnOnce() -> T, + F: Send + 'static, + T: Send + 'static, + { + unsafe { self.spawn_unchecked(f) } + } + + /// Spawns a new thread without any lifetime restrictions by taking ownership + /// of the `Builder`, and returns an [`io::Result`] to its [`JoinHandle`]. + /// + /// The spawned thread may outlive the caller (unless the caller thread + /// is the main thread; the whole process is terminated when the main + /// thread finishes). The join handle can be used to block on + /// termination of the spawned thread, including recovering its panics. + /// + /// This method is identical to [`thread::Builder::spawn`][`Builder::spawn`], + /// except for the relaxed lifetime bounds, which render it unsafe. + /// For a more complete documentation see [`thread::spawn`]. + /// + /// # Errors + /// + /// Unlike the [`spawn`] free function, this method yields an + /// [`io::Result`] to capture any failure to create the thread at + /// the OS level. + /// + /// # Panics + /// + /// Panics if a thread name was set and it contained null bytes. + /// + /// # Safety + /// + /// The caller has to ensure that the spawned thread does not outlive any + /// references in the supplied thread closure and its return type. + /// This can be guaranteed in two ways: + /// + /// - ensure that [`join`][`JoinHandle::join`] is called before any referenced + /// data is dropped + /// - use only types with `'static` lifetime bounds, i.e., those with no or only + /// `'static` references (both [`thread::Builder::spawn`][`Builder::spawn`] + /// and [`thread::spawn`] enforce this property statically) + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// + /// let builder = thread::Builder::new(); + /// + /// let x = 1; + /// let thread_x = &x; + /// + /// let handler = unsafe { + /// builder.spawn_unchecked(move || { + /// println!("x = {}", *thread_x); + /// }).unwrap() + /// }; + /// + /// // caller has to ensure `join()` is called, otherwise + /// // it is possible to access freed memory if `x` gets + /// // dropped before the thread closure is executed! + /// handler.join().unwrap(); + /// ``` + /// + /// [`io::Result`]: crate::io::Result + /// [`thread::spawn`]: super::spawn + /// [`spawn`]: super::spawn + #[stable(feature = "thread_spawn_unchecked", since = "1.82.0")] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + pub unsafe fn spawn_unchecked(self, f: F) -> io::Result> + where + F: FnOnce() -> T, + F: Send, + T: Send, + { + let Builder { name, stack_size, no_hooks } = self; + Ok(JoinHandle(unsafe { spawn_unchecked(name, stack_size, no_hooks, None, f) }?)) + } +} diff --git a/library/std/src/thread/current.rs b/library/std/src/thread/current.rs index ea0c6c7229fe8..508e35cefe88f 100644 --- a/library/std/src/thread/current.rs +++ b/library/std/src/thread/current.rs @@ -1,6 +1,9 @@ -use super::{Thread, ThreadId, imp}; +use super::id::ThreadId; +use super::main_thread; +use super::thread::Thread; use crate::mem::ManuallyDrop; use crate::ptr; +use crate::sys::thread as imp; use crate::sys::thread_local::local_pointer; const NONE: *mut () = ptr::null_mut(); @@ -184,7 +187,7 @@ pub(crate) fn current_os_id() -> u64 { /// Gets a reference to the handle of the thread that invokes it, if the handle /// has been initialized. -pub(super) fn try_with_current(f: F) -> R +fn try_with_current(f: F) -> R where F: FnOnce(Option<&Thread>) -> R, { @@ -202,6 +205,36 @@ where } } +/// Run a function with the current thread's name. +/// +/// Modulo thread local accesses, this function is safe to call from signal +/// handlers and in similar circumstances where allocations are not possible. +pub(crate) fn with_current_name(f: F) -> R +where + F: FnOnce(Option<&str>) -> R, +{ + try_with_current(|thread| { + let name = if let Some(thread) = thread { + // If there is a current thread handle, try to use the name stored + // there. + thread.name() + } else if let Some(main) = main_thread::get() + && let Some(id) = id::get() + && id == main + { + // The main thread doesn't always have a thread handle, we must + // identify it through its ID instead. The checks are ordered so + // that the current ID is only loaded if it is actually needed, + // since loading it from TLS might need multiple expensive accesses. + Some("main") + } else { + None + }; + + f(name) + }) +} + /// Gets a handle to the thread that invokes it. If the handle stored in thread- /// local storage was already destroyed, this creates a new unnamed temporary /// handle to allow thread parking in nearly all situations. diff --git a/library/std/src/thread/functions.rs b/library/std/src/thread/functions.rs new file mode 100644 index 0000000000000..a25bae1aae31e --- /dev/null +++ b/library/std/src/thread/functions.rs @@ -0,0 +1,692 @@ +//! Free functions. + +use super::builder::Builder; +use super::current::current; +use super::join_handle::JoinHandle; +use crate::mem::forget; +use crate::num::NonZero; +use crate::sys::thread as imp; +use crate::time::{Duration, Instant}; +use crate::{io, panicking}; + +/// Spawns a new thread, returning a [`JoinHandle`] for it. +/// +/// The join handle provides a [`join`] method that can be used to join the spawned +/// thread. If the spawned thread panics, [`join`] will return an [`Err`] containing +/// the argument given to [`panic!`]. +/// +/// If the join handle is dropped, the spawned thread will implicitly be *detached*. +/// In this case, the spawned thread may no longer be joined. +/// (It is the responsibility of the program to either eventually join threads it +/// creates or detach them; otherwise, a resource leak will result.) +/// +/// This function creates a thread with the default parameters of [`Builder`]. +/// To specify the new thread's stack size or the name, use [`Builder::spawn`]. +/// +/// As you can see in the signature of `spawn` there are two constraints on +/// both the closure given to `spawn` and its return value, let's explain them: +/// +/// - The `'static` constraint means that the closure and its return value +/// must have a lifetime of the whole program execution. The reason for this +/// is that threads can outlive the lifetime they have been created in. +/// +/// Indeed if the thread, and by extension its return value, can outlive their +/// caller, we need to make sure that they will be valid afterwards, and since +/// we *can't* know when it will return we need to have them valid as long as +/// possible, that is until the end of the program, hence the `'static` +/// lifetime. +/// - The [`Send`] constraint is because the closure will need to be passed +/// *by value* from the thread where it is spawned to the new thread. Its +/// return value will need to be passed from the new thread to the thread +/// where it is `join`ed. +/// As a reminder, the [`Send`] marker trait expresses that it is safe to be +/// passed from thread to thread. [`Sync`] expresses that it is safe to have a +/// reference be passed from thread to thread. +/// +/// # Panics +/// +/// Panics if the OS fails to create a thread; use [`Builder::spawn`] +/// to recover from such errors. +/// +/// # Examples +/// +/// Creating a thread. +/// +/// ``` +/// use std::thread; +/// +/// let handler = thread::spawn(|| { +/// // thread code +/// }); +/// +/// handler.join().unwrap(); +/// ``` +/// +/// As mentioned in the module documentation, threads are usually made to +/// communicate using [`channels`], here is how it usually looks. +/// +/// This example also shows how to use `move`, in order to give ownership +/// of values to a thread. +/// +/// ``` +/// use std::thread; +/// use std::sync::mpsc::channel; +/// +/// let (tx, rx) = channel(); +/// +/// let sender = thread::spawn(move || { +/// tx.send("Hello, thread".to_owned()) +/// .expect("Unable to send on channel"); +/// }); +/// +/// let receiver = thread::spawn(move || { +/// let value = rx.recv().expect("Unable to receive from channel"); +/// println!("{value}"); +/// }); +/// +/// sender.join().expect("The sender thread has panicked"); +/// receiver.join().expect("The receiver thread has panicked"); +/// ``` +/// +/// A thread can also return a value through its [`JoinHandle`], you can use +/// this to make asynchronous computations (futures might be more appropriate +/// though). +/// +/// ``` +/// use std::thread; +/// +/// let computation = thread::spawn(|| { +/// // Some expensive computation. +/// 42 +/// }); +/// +/// let result = computation.join().unwrap(); +/// println!("{result}"); +/// ``` +/// +/// # Notes +/// +/// This function has the same minimal guarantee regarding "foreign" unwinding operations (e.g. +/// an exception thrown from C++ code, or a `panic!` in Rust code compiled or linked with a +/// different runtime) as [`catch_unwind`]; namely, if the thread created with `thread::spawn` +/// unwinds all the way to the root with such an exception, one of two behaviors are possible, +/// and it is unspecified which will occur: +/// +/// * The process aborts. +/// * The process does not abort, and [`join`] will return a `Result::Err` +/// containing an opaque type. +/// +/// [`catch_unwind`]: ../../std/panic/fn.catch_unwind.html +/// [`channels`]: crate::sync::mpsc +/// [`join`]: JoinHandle::join +/// [`Err`]: crate::result::Result::Err +#[stable(feature = "rust1", since = "1.0.0")] +#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces +pub fn spawn(f: F) -> JoinHandle +where + F: FnOnce() -> T, + F: Send + 'static, + T: Send + 'static, +{ + Builder::new().spawn(f).expect("failed to spawn thread") +} + +/// Cooperatively gives up a timeslice to the OS scheduler. +/// +/// This calls the underlying OS scheduler's yield primitive, signaling +/// that the calling thread is willing to give up its remaining timeslice +/// so that the OS may schedule other threads on the CPU. +/// +/// A drawback of yielding in a loop is that if the OS does not have any +/// other ready threads to run on the current CPU, the thread will effectively +/// busy-wait, which wastes CPU time and energy. +/// +/// Therefore, when waiting for events of interest, a programmer's first +/// choice should be to use synchronization devices such as [`channel`]s, +/// [`Condvar`]s, [`Mutex`]es or [`join`] since these primitives are +/// implemented in a blocking manner, giving up the CPU until the event +/// of interest has occurred which avoids repeated yielding. +/// +/// `yield_now` should thus be used only rarely, mostly in situations where +/// repeated polling is required because there is no other suitable way to +/// learn when an event of interest has occurred. +/// +/// # Examples +/// +/// ``` +/// use std::thread; +/// +/// thread::yield_now(); +/// ``` +/// +/// [`channel`]: crate::sync::mpsc +/// [`join`]: JoinHandle::join +/// [`Condvar`]: crate::sync::Condvar +/// [`Mutex`]: crate::sync::Mutex +#[stable(feature = "rust1", since = "1.0.0")] +pub fn yield_now() { + imp::yield_now() +} + +/// Determines whether the current thread is unwinding because of panic. +/// +/// A common use of this feature is to poison shared resources when writing +/// unsafe code, by checking `panicking` when the `drop` is called. +/// +/// This is usually not needed when writing safe code, as [`Mutex`es][Mutex] +/// already poison themselves when a thread panics while holding the lock. +/// +/// This can also be used in multithreaded applications, in order to send a +/// message to other threads warning that a thread has panicked (e.g., for +/// monitoring purposes). +/// +/// # Examples +/// +/// ```should_panic +/// use std::thread; +/// +/// struct SomeStruct; +/// +/// impl Drop for SomeStruct { +/// fn drop(&mut self) { +/// if thread::panicking() { +/// println!("dropped while unwinding"); +/// } else { +/// println!("dropped while not unwinding"); +/// } +/// } +/// } +/// +/// { +/// print!("a: "); +/// let a = SomeStruct; +/// } +/// +/// { +/// print!("b: "); +/// let b = SomeStruct; +/// panic!() +/// } +/// ``` +/// +/// [Mutex]: crate::sync::Mutex +#[inline] +#[must_use] +#[stable(feature = "rust1", since = "1.0.0")] +pub fn panicking() -> bool { + panicking::panicking() +} + +/// Uses [`sleep`]. +/// +/// Puts the current thread to sleep for at least the specified amount of time. +/// +/// The thread may sleep longer than the duration specified due to scheduling +/// specifics or platform-dependent functionality. It will never sleep less. +/// +/// This function is blocking, and should not be used in `async` functions. +/// +/// # Platform-specific behavior +/// +/// On Unix platforms, the underlying syscall may be interrupted by a +/// spurious wakeup or signal handler. To ensure the sleep occurs for at least +/// the specified duration, this function may invoke that system call multiple +/// times. +/// +/// # Examples +/// +/// ```no_run +/// use std::thread; +/// +/// // Let's sleep for 2 seconds: +/// thread::sleep_ms(2000); +/// ``` +#[stable(feature = "rust1", since = "1.0.0")] +#[deprecated(since = "1.6.0", note = "replaced by `std::thread::sleep`")] +pub fn sleep_ms(ms: u32) { + sleep(Duration::from_millis(ms as u64)) +} + +/// Puts the current thread to sleep for at least the specified amount of time. +/// +/// The thread may sleep longer than the duration specified due to scheduling +/// specifics or platform-dependent functionality. It will never sleep less. +/// +/// This function is blocking, and should not be used in `async` functions. +/// +/// # Platform-specific behavior +/// +/// On Unix platforms, the underlying syscall may be interrupted by a +/// spurious wakeup or signal handler. To ensure the sleep occurs for at least +/// the specified duration, this function may invoke that system call multiple +/// times. +/// Platforms which do not support nanosecond precision for sleeping will +/// have `dur` rounded up to the nearest granularity of time they can sleep for. +/// +/// Currently, specifying a zero duration on Unix platforms returns immediately +/// without invoking the underlying [`nanosleep`] syscall, whereas on Windows +/// platforms the underlying [`Sleep`] syscall is always invoked. +/// If the intention is to yield the current time-slice you may want to use +/// [`yield_now`] instead. +/// +/// [`nanosleep`]: https://linux.die.net/man/2/nanosleep +/// [`Sleep`]: https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-sleep +/// +/// # Examples +/// +/// ```no_run +/// use std::{thread, time}; +/// +/// let ten_millis = time::Duration::from_millis(10); +/// let now = time::Instant::now(); +/// +/// thread::sleep(ten_millis); +/// +/// assert!(now.elapsed() >= ten_millis); +/// ``` +#[stable(feature = "thread_sleep", since = "1.4.0")] +pub fn sleep(dur: Duration) { + imp::sleep(dur) +} + +/// Puts the current thread to sleep until the specified deadline has passed. +/// +/// The thread may still be asleep after the deadline specified due to +/// scheduling specifics or platform-dependent functionality. It will never +/// wake before. +/// +/// This function is blocking, and should not be used in `async` functions. +/// +/// # Platform-specific behavior +/// +/// In most cases this function will call an OS specific function. Where that +/// is not supported [`sleep`] is used. Those platforms are referred to as other +/// in the table below. +/// +/// # Underlying System calls +/// +/// The following system calls are [currently] being used: +/// +/// | Platform | System call | +/// |-----------|----------------------------------------------------------------------| +/// | Linux | [clock_nanosleep] (Monotonic clock) | +/// | BSD except OpenBSD | [clock_nanosleep] (Monotonic Clock)] | +/// | Android | [clock_nanosleep] (Monotonic Clock)] | +/// | Solaris | [clock_nanosleep] (Monotonic Clock)] | +/// | Illumos | [clock_nanosleep] (Monotonic Clock)] | +/// | Dragonfly | [clock_nanosleep] (Monotonic Clock)] | +/// | Hurd | [clock_nanosleep] (Monotonic Clock)] | +/// | Fuchsia | [clock_nanosleep] (Monotonic Clock)] | +/// | Vxworks | [clock_nanosleep] (Monotonic Clock)] | +/// | Other | `sleep_until` uses [`sleep`] and does not issue a syscall itself | +/// +/// [currently]: crate::io#platform-specific-behavior +/// [clock_nanosleep]: https://linux.die.net/man/3/clock_nanosleep +/// +/// **Disclaimer:** These system calls might change over time. +/// +/// # Examples +/// +/// A simple game loop that limits the game to 60 frames per second. +/// +/// ```no_run +/// #![feature(thread_sleep_until)] +/// # use std::time::{Duration, Instant}; +/// # use std::thread; +/// # +/// # fn update() {} +/// # fn render() {} +/// # +/// let max_fps = 60.0; +/// let frame_time = Duration::from_secs_f32(1.0/max_fps); +/// let mut next_frame = Instant::now(); +/// loop { +/// thread::sleep_until(next_frame); +/// next_frame += frame_time; +/// update(); +/// render(); +/// } +/// ``` +/// +/// A slow API we must not call too fast and which takes a few +/// tries before succeeding. By using `sleep_until` the time the +/// API call takes does not influence when we retry or when we give up +/// +/// ```no_run +/// #![feature(thread_sleep_until)] +/// # use std::time::{Duration, Instant}; +/// # use std::thread; +/// # +/// # enum Status { +/// # Ready(usize), +/// # Waiting, +/// # } +/// # fn slow_web_api_call() -> Status { Status::Ready(42) } +/// # +/// # const MAX_DURATION: Duration = Duration::from_secs(10); +/// # +/// # fn try_api_call() -> Result { +/// let deadline = Instant::now() + MAX_DURATION; +/// let delay = Duration::from_millis(250); +/// let mut next_attempt = Instant::now(); +/// loop { +/// if Instant::now() > deadline { +/// break Err(()); +/// } +/// if let Status::Ready(data) = slow_web_api_call() { +/// break Ok(data); +/// } +/// +/// next_attempt = deadline.min(next_attempt + delay); +/// thread::sleep_until(next_attempt); +/// } +/// # } +/// # let _data = try_api_call(); +/// ``` +#[unstable(feature = "thread_sleep_until", issue = "113752")] +pub fn sleep_until(deadline: Instant) { + imp::sleep_until(deadline) +} + +/// Used to ensure that `park` and `park_timeout` do not unwind, as that can +/// cause undefined behavior if not handled correctly (see #102398 for context). +struct PanicGuard; + +impl Drop for PanicGuard { + fn drop(&mut self) { + rtabort!("an irrecoverable error occurred while synchronizing threads") + } +} + +/// Blocks unless or until the current thread's token is made available. +/// +/// A call to `park` does not guarantee that the thread will remain parked +/// forever, and callers should be prepared for this possibility. However, +/// it is guaranteed that this function will not panic (it may abort the +/// process if the implementation encounters some rare errors). +/// +/// # `park` and `unpark` +/// +/// Every thread is equipped with some basic low-level blocking support, via the +/// [`thread::park`][`park`] function and [`thread::Thread::unpark`][`unpark`] +/// method. [`park`] blocks the current thread, which can then be resumed from +/// another thread by calling the [`unpark`] method on the blocked thread's +/// handle. +/// +/// Conceptually, each [`Thread`] handle has an associated token, which is +/// initially not present: +/// +/// * The [`thread::park`][`park`] function blocks the current thread unless or +/// until the token is available for its thread handle, at which point it +/// atomically consumes the token. It may also return *spuriously*, without +/// consuming the token. [`thread::park_timeout`] does the same, but allows +/// specifying a maximum time to block the thread for. +/// +/// * The [`unpark`] method on a [`Thread`] atomically makes the token available +/// if it wasn't already. Because the token can be held by a thread even if it is currently not +/// parked, [`unpark`] followed by [`park`] will result in the second call returning immediately. +/// However, note that to rely on this guarantee, you need to make sure that your `unpark` happens +/// after all `park` that may be done by other data structures! +/// +/// The API is typically used by acquiring a handle to the current thread, placing that handle in a +/// shared data structure so that other threads can find it, and then `park`ing in a loop. When some +/// desired condition is met, another thread calls [`unpark`] on the handle. The last bullet point +/// above guarantees that even if the `unpark` occurs before the thread is finished `park`ing, it +/// will be woken up properly. +/// +/// Note that the coordination via the shared data structure is crucial: If you `unpark` a thread +/// without first establishing that it is about to be `park`ing within your code, that `unpark` may +/// get consumed by a *different* `park` in the same thread, leading to a deadlock. This also means +/// you must not call unknown code between setting up for parking and calling `park`; for instance, +/// if you invoke `println!`, that may itself call `park` and thus consume your `unpark` and cause a +/// deadlock. +/// +/// The motivation for this design is twofold: +/// +/// * It avoids the need to allocate mutexes and condvars when building new +/// synchronization primitives; the threads already provide basic +/// blocking/signaling. +/// +/// * It can be implemented very efficiently on many platforms. +/// +/// # Memory Ordering +/// +/// Calls to `unpark` _synchronize-with_ calls to `park`, meaning that memory +/// operations performed before a call to `unpark` are made visible to the thread that +/// consumes the token and returns from `park`. Note that all `park` and `unpark` +/// operations for a given thread form a total order and _all_ prior `unpark` operations +/// synchronize-with `park`. +/// +/// In atomic ordering terms, `unpark` performs a `Release` operation and `park` +/// performs the corresponding `Acquire` operation. Calls to `unpark` for the same +/// thread form a [release sequence]. +/// +/// Note that being unblocked does not imply a call was made to `unpark`, because +/// wakeups can also be spurious. For example, a valid, but inefficient, +/// implementation could have `park` and `unpark` return immediately without doing anything, +/// making *all* wakeups spurious. +/// +/// # Examples +/// +/// ``` +/// use std::thread; +/// use std::sync::atomic::{Ordering, AtomicBool}; +/// use std::time::Duration; +/// +/// static QUEUED: AtomicBool = AtomicBool::new(false); +/// static FLAG: AtomicBool = AtomicBool::new(false); +/// +/// let parked_thread = thread::spawn(move || { +/// println!("Thread spawned"); +/// // Signal that we are going to `park`. Between this store and our `park`, there may +/// // be no other `park`, or else that `park` could consume our `unpark` token! +/// QUEUED.store(true, Ordering::Release); +/// // We want to wait until the flag is set. We *could* just spin, but using +/// // park/unpark is more efficient. +/// while !FLAG.load(Ordering::Acquire) { +/// // We can *not* use `println!` here since that could use thread parking internally. +/// thread::park(); +/// // We *could* get here spuriously, i.e., way before the 10ms below are over! +/// // But that is no problem, we are in a loop until the flag is set anyway. +/// } +/// println!("Flag received"); +/// }); +/// +/// // Let some time pass for the thread to be spawned. +/// thread::sleep(Duration::from_millis(10)); +/// +/// // Ensure the thread is about to park. +/// // This is crucial! It guarantees that the `unpark` below is not consumed +/// // by some other code in the parked thread (e.g. inside `println!`). +/// while !QUEUED.load(Ordering::Acquire) { +/// // Spinning is of course inefficient; in practice, this would more likely be +/// // a dequeue where we have no work to do if there's nobody queued. +/// std::hint::spin_loop(); +/// } +/// +/// // Set the flag, and let the thread wake up. +/// // There is no race condition here: if `unpark` +/// // happens first, `park` will return immediately. +/// // There is also no other `park` that could consume this token, +/// // since we waited until the other thread got queued. +/// // Hence there is no risk of a deadlock. +/// FLAG.store(true, Ordering::Release); +/// println!("Unpark the thread"); +/// parked_thread.thread().unpark(); +/// +/// parked_thread.join().unwrap(); +/// ``` +/// +/// [`Thread`]: super::Thread +/// [`unpark`]: super::Thread::unpark +/// [`thread::park_timeout`]: park_timeout +/// [release sequence]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release_sequence +#[stable(feature = "rust1", since = "1.0.0")] +pub fn park() { + let guard = PanicGuard; + // SAFETY: park_timeout is called on the parker owned by this thread. + unsafe { + current().park(); + } + // No panic occurred, do not abort. + forget(guard); +} + +/// Uses [`park_timeout`]. +/// +/// Blocks unless or until the current thread's token is made available or +/// the specified duration has been reached (may wake spuriously). +/// +/// The semantics of this function are equivalent to [`park`] except +/// that the thread will be blocked for roughly no longer than `dur`. This +/// method should not be used for precise timing due to anomalies such as +/// preemption or platform differences that might not cause the maximum +/// amount of time waited to be precisely `ms` long. +/// +/// See the [park documentation][`park`] for more detail. +#[stable(feature = "rust1", since = "1.0.0")] +#[deprecated(since = "1.6.0", note = "replaced by `std::thread::park_timeout`")] +pub fn park_timeout_ms(ms: u32) { + park_timeout(Duration::from_millis(ms as u64)) +} + +/// Blocks unless or until the current thread's token is made available or +/// the specified duration has been reached (may wake spuriously). +/// +/// The semantics of this function are equivalent to [`park`][park] except +/// that the thread will be blocked for roughly no longer than `dur`. This +/// method should not be used for precise timing due to anomalies such as +/// preemption or platform differences that might not cause the maximum +/// amount of time waited to be precisely `dur` long. +/// +/// See the [park documentation][park] for more details. +/// +/// # Platform-specific behavior +/// +/// Platforms which do not support nanosecond precision for sleeping will have +/// `dur` rounded up to the nearest granularity of time they can sleep for. +/// +/// # Examples +/// +/// Waiting for the complete expiration of the timeout: +/// +/// ```rust,no_run +/// use std::thread::park_timeout; +/// use std::time::{Instant, Duration}; +/// +/// let timeout = Duration::from_secs(2); +/// let beginning_park = Instant::now(); +/// +/// let mut timeout_remaining = timeout; +/// loop { +/// park_timeout(timeout_remaining); +/// let elapsed = beginning_park.elapsed(); +/// if elapsed >= timeout { +/// break; +/// } +/// println!("restarting park_timeout after {elapsed:?}"); +/// timeout_remaining = timeout - elapsed; +/// } +/// ``` +#[stable(feature = "park_timeout", since = "1.4.0")] +pub fn park_timeout(dur: Duration) { + let guard = PanicGuard; + // SAFETY: park_timeout is called on a handle owned by this thread. + unsafe { + current().park_timeout(dur); + } + // No panic occurred, do not abort. + forget(guard); +} + +/// Returns an estimate of the default amount of parallelism a program should use. +/// +/// Parallelism is a resource. A given machine provides a certain capacity for +/// parallelism, i.e., a bound on the number of computations it can perform +/// simultaneously. This number often corresponds to the amount of CPUs a +/// computer has, but it may diverge in various cases. +/// +/// Host environments such as VMs or container orchestrators may want to +/// restrict the amount of parallelism made available to programs in them. This +/// is often done to limit the potential impact of (unintentionally) +/// resource-intensive programs on other programs running on the same machine. +/// +/// # Limitations +/// +/// The purpose of this API is to provide an easy and portable way to query +/// the default amount of parallelism the program should use. Among other things it +/// does not expose information on NUMA regions, does not account for +/// differences in (co)processor capabilities or current system load, +/// and will not modify the program's global state in order to more accurately +/// query the amount of available parallelism. +/// +/// Where both fixed steady-state and burst limits are available the steady-state +/// capacity will be used to ensure more predictable latencies. +/// +/// Resource limits can be changed during the runtime of a program, therefore the value is +/// not cached and instead recomputed every time this function is called. It should not be +/// called from hot code. +/// +/// The value returned by this function should be considered a simplified +/// approximation of the actual amount of parallelism available at any given +/// time. To get a more detailed or precise overview of the amount of +/// parallelism available to the program, you may wish to use +/// platform-specific APIs as well. The following platform limitations currently +/// apply to `available_parallelism`: +/// +/// On Windows: +/// - It may undercount the amount of parallelism available on systems with more +/// than 64 logical CPUs. However, programs typically need specific support to +/// take advantage of more than 64 logical CPUs, and in the absence of such +/// support, the number returned by this function accurately reflects the +/// number of logical CPUs the program can use by default. +/// - It may overcount the amount of parallelism available on systems limited by +/// process-wide affinity masks, or job object limitations. +/// +/// On Linux: +/// - It may overcount the amount of parallelism available when limited by a +/// process-wide affinity mask or cgroup quotas and `sched_getaffinity()` or cgroup fs can't be +/// queried, e.g. due to sandboxing. +/// - It may undercount the amount of parallelism if the current thread's affinity mask +/// does not reflect the process' cpuset, e.g. due to pinned threads. +/// - If the process is in a cgroup v1 cpu controller, this may need to +/// scan mountpoints to find the corresponding cgroup v1 controller, +/// which may take time on systems with large numbers of mountpoints. +/// (This does not apply to cgroup v2, or to processes not in a +/// cgroup.) +/// - It does not attempt to take `ulimit` into account. If there is a limit set on the number of +/// threads, `available_parallelism` cannot know how much of that limit a Rust program should +/// take, or know in a reliable and race-free way how much of that limit is already taken. +/// +/// On all targets: +/// - It may overcount the amount of parallelism available when running in a VM +/// with CPU usage limits (e.g. an overcommitted host). +/// +/// # Errors +/// +/// This function will, but is not limited to, return errors in the following +/// cases: +/// +/// - If the amount of parallelism is not known for the target platform. +/// - If the program lacks permission to query the amount of parallelism made +/// available to it. +/// +/// # Examples +/// +/// ``` +/// # #![allow(dead_code)] +/// use std::{io, thread}; +/// +/// fn main() -> io::Result<()> { +/// let count = thread::available_parallelism()?.get(); +/// assert!(count >= 1_usize); +/// Ok(()) +/// } +/// ``` +#[doc(alias = "available_concurrency")] // Alias for a previous name we gave this API on unstable. +#[doc(alias = "hardware_concurrency")] // Alias for C++ `std::thread::hardware_concurrency`. +#[doc(alias = "num_cpus")] // Alias for a popular ecosystem crate which provides similar functionality. +#[stable(feature = "available_parallelism", since = "1.59.0")] +pub fn available_parallelism() -> io::Result> { + imp::available_parallelism() +} diff --git a/library/std/src/thread/id.rs b/library/std/src/thread/id.rs new file mode 100644 index 0000000000000..ba7024327881b --- /dev/null +++ b/library/std/src/thread/id.rs @@ -0,0 +1,117 @@ +use crate::num::NonZero; +use crate::sync::atomic::{Atomic, Ordering}; + +/// A unique identifier for a running thread. +/// +/// A `ThreadId` is an opaque object that uniquely identifies each thread +/// created during the lifetime of a process. `ThreadId`s are guaranteed not to +/// be reused, even when a thread terminates. `ThreadId`s are under the control +/// of Rust's standard library and there may not be any relationship between +/// `ThreadId` and the underlying platform's notion of a thread identifier -- +/// the two concepts cannot, therefore, be used interchangeably. A `ThreadId` +/// can be retrieved from the [`id`] method on a [`Thread`]. +/// +/// # Examples +/// +/// ``` +/// use std::thread; +/// +/// let other_thread = thread::spawn(|| { +/// thread::current().id() +/// }); +/// +/// let other_thread_id = other_thread.join().unwrap(); +/// assert!(thread::current().id() != other_thread_id); +/// ``` +/// +/// [`Thread`]: super::Thread +/// [`id`]: super::Thread::id +#[stable(feature = "thread_id", since = "1.19.0")] +#[derive(Eq, PartialEq, Clone, Copy, Hash, Debug)] +pub struct ThreadId(NonZero); + +impl ThreadId { + // Generate a new unique thread ID. + pub(crate) fn new() -> ThreadId { + #[cold] + fn exhausted() -> ! { + panic!("failed to generate unique thread ID: bitspace exhausted") + } + + cfg_select! { + target_has_atomic = "64" => { + use crate::sync::atomic::AtomicU64; + + static COUNTER: Atomic = AtomicU64::new(0); + + let mut last = COUNTER.load(Ordering::Relaxed); + loop { + let Some(id) = last.checked_add(1) else { + exhausted(); + }; + + match COUNTER.compare_exchange_weak(last, id, Ordering::Relaxed, Ordering::Relaxed) { + Ok(_) => return ThreadId(NonZero::new(id).unwrap()), + Err(id) => last = id, + } + } + } + _ => { + use crate::cell::SyncUnsafeCell; + use crate::hint::spin_loop; + use crate::sync::atomic::AtomicBool; + use crate::thread::yield_now; + + // If we don't have a 64-bit atomic we use a small spinlock. We don't use Mutex + // here as we might be trying to get the current thread id in the global allocator, + // and on some platforms Mutex requires allocation. + static COUNTER_LOCKED: Atomic = AtomicBool::new(false); + static COUNTER: SyncUnsafeCell = SyncUnsafeCell::new(0); + + // Acquire lock. + let mut spin = 0; + while COUNTER_LOCKED.compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() { + if spin <= 3 { + for _ in 0..(1 << spin) { + spin_loop(); + } + } else { + yield_now(); + } + spin += 1; + } + + // SAFETY: we have an exclusive lock on the counter. + unsafe { + if let Some(id) = (*COUNTER.get()).checked_add(1) { + *COUNTER.get() = id; + COUNTER_LOCKED.store(false, Ordering::Release); + ThreadId(NonZero::new(id).unwrap()) + } else { + COUNTER_LOCKED.store(false, Ordering::Release); + exhausted() + } + } + } + } + } + + #[cfg(any(not(target_thread_local), target_has_atomic = "64"))] + pub(super) fn from_u64(v: u64) -> Option { + NonZero::new(v).map(ThreadId) + } + + /// This returns a numeric identifier for the thread identified by this + /// `ThreadId`. + /// + /// As noted in the documentation for the type itself, it is essentially an + /// opaque ID, but is guaranteed to be unique for each thread. The returned + /// value is entirely opaque -- only equality testing is stable. Note that + /// it is not guaranteed which values new threads will return, and this may + /// change across Rust versions. + #[must_use] + #[unstable(feature = "thread_id_value", issue = "67939")] + pub fn as_u64(&self) -> NonZero { + self.0 + } +} diff --git a/library/std/src/thread/join_handle.rs b/library/std/src/thread/join_handle.rs new file mode 100644 index 0000000000000..8714ceeb4f467 --- /dev/null +++ b/library/std/src/thread/join_handle.rs @@ -0,0 +1,185 @@ +use super::Result; +use super::lifecycle::JoinInner; +use super::thread::Thread; +use crate::fmt; +use crate::sys::thread as imp; +use crate::sys_common::{AsInner, IntoInner}; + +/// An owned permission to join on a thread (block on its termination). +/// +/// A `JoinHandle` *detaches* the associated thread when it is dropped, which +/// means that there is no longer any handle to the thread and no way to `join` +/// on it. +/// +/// Due to platform restrictions, it is not possible to [`Clone`] this +/// handle: the ability to join a thread is a uniquely-owned permission. +/// +/// This `struct` is created by the [`thread::spawn`] function and the +/// [`thread::Builder::spawn`] method. +/// +/// # Examples +/// +/// Creation from [`thread::spawn`]: +/// +/// ``` +/// use std::thread; +/// +/// let join_handle: thread::JoinHandle<_> = thread::spawn(|| { +/// // some work here +/// }); +/// ``` +/// +/// Creation from [`thread::Builder::spawn`]: +/// +/// ``` +/// use std::thread; +/// +/// let builder = thread::Builder::new(); +/// +/// let join_handle: thread::JoinHandle<_> = builder.spawn(|| { +/// // some work here +/// }).unwrap(); +/// ``` +/// +/// A thread being detached and outliving the thread that spawned it: +/// +/// ```no_run +/// use std::thread; +/// use std::time::Duration; +/// +/// let original_thread = thread::spawn(|| { +/// let _detached_thread = thread::spawn(|| { +/// // Here we sleep to make sure that the first thread returns before. +/// thread::sleep(Duration::from_millis(10)); +/// // This will be called, even though the JoinHandle is dropped. +/// println!("♫ Still alive ♫"); +/// }); +/// }); +/// +/// original_thread.join().expect("The thread being joined has panicked"); +/// println!("Original thread is joined."); +/// +/// // We make sure that the new thread has time to run, before the main +/// // thread returns. +/// +/// thread::sleep(Duration::from_millis(1000)); +/// ``` +/// +/// [`thread::Builder::spawn`]: super::Builder::spawn +/// [`thread::spawn`]: super::spawn +#[stable(feature = "rust1", since = "1.0.0")] +#[cfg_attr(target_os = "teeos", must_use)] +pub struct JoinHandle(pub(super) JoinInner<'static, T>); + +#[stable(feature = "joinhandle_impl_send_sync", since = "1.29.0")] +unsafe impl Send for JoinHandle {} +#[stable(feature = "joinhandle_impl_send_sync", since = "1.29.0")] +unsafe impl Sync for JoinHandle {} + +impl JoinHandle { + /// Extracts a handle to the underlying thread. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// + /// let builder = thread::Builder::new(); + /// + /// let join_handle: thread::JoinHandle<_> = builder.spawn(|| { + /// // some work here + /// }).unwrap(); + /// + /// let thread = join_handle.thread(); + /// println!("thread id: {:?}", thread.id()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use] + pub fn thread(&self) -> &Thread { + self.0.thread() + } + + /// Waits for the associated thread to finish. + /// + /// This function will return immediately if the associated thread has already finished. + /// + /// In terms of [atomic memory orderings], the completion of the associated + /// thread synchronizes with this function returning. In other words, all + /// operations performed by that thread [happen + /// before](https://doc.rust-lang.org/nomicon/atomics.html#data-accesses) all + /// operations that happen after `join` returns. + /// + /// If the associated thread panics, [`Err`] is returned with the parameter given + /// to [`panic!`] (though see the Notes below). + /// + /// [`Err`]: crate::result::Result::Err + /// [atomic memory orderings]: crate::sync::atomic + /// + /// # Panics + /// + /// This function may panic on some platforms if a thread attempts to join + /// itself or otherwise may create a deadlock with joining threads. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// + /// let builder = thread::Builder::new(); + /// + /// let join_handle: thread::JoinHandle<_> = builder.spawn(|| { + /// // some work here + /// }).unwrap(); + /// join_handle.join().expect("Couldn't join on the associated thread"); + /// ``` + /// + /// # Notes + /// + /// If a "foreign" unwinding operation (e.g. an exception thrown from C++ + /// code, or a `panic!` in Rust code compiled or linked with a different + /// runtime) unwinds all the way to the thread root, the process may be + /// aborted; see the Notes on [`thread::spawn`]. If the process is not + /// aborted, this function will return a `Result::Err` containing an opaque + /// type. + /// + /// [`catch_unwind`]: ../../std/panic/fn.catch_unwind.html + /// [`thread::spawn`]: super::spawn + #[stable(feature = "rust1", since = "1.0.0")] + pub fn join(self) -> Result { + self.0.join() + } + + /// Checks if the associated thread has finished running its main function. + /// + /// `is_finished` supports implementing a non-blocking join operation, by checking + /// `is_finished`, and calling `join` if it returns `true`. This function does not block. To + /// block while waiting on the thread to finish, use [`join`][Self::join]. + /// + /// This might return `true` for a brief moment after the thread's main + /// function has returned, but before the thread itself has stopped running. + /// However, once this returns `true`, [`join`][Self::join] can be expected + /// to return quickly, without blocking for any significant amount of time. + #[stable(feature = "thread_is_running", since = "1.61.0")] + pub fn is_finished(&self) -> bool { + self.0.is_finished() + } +} + +impl AsInner for JoinHandle { + fn as_inner(&self) -> &imp::Thread { + self.0.as_inner() + } +} + +impl IntoInner for JoinHandle { + fn into_inner(self) -> imp::Thread { + self.0.into_inner() + } +} + +#[stable(feature = "std_debug", since = "1.16.0")] +impl fmt::Debug for JoinHandle { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("JoinHandle").finish_non_exhaustive() + } +} diff --git a/library/std/src/thread/lifecycle.rs b/library/std/src/thread/lifecycle.rs new file mode 100644 index 0000000000000..119322b909b52 --- /dev/null +++ b/library/std/src/thread/lifecycle.rs @@ -0,0 +1,261 @@ +//! The inner logic for thread spawning and joining. + +use super::current::set_current; +use super::id::ThreadId; +use super::scoped::ScopeData; +use super::thread::Thread; +use super::{Result, spawnhook}; +use crate::cell::UnsafeCell; +use crate::marker::PhantomData; +use crate::mem::{ManuallyDrop, MaybeUninit}; +use crate::sync::Arc; +use crate::sync::atomic::{Atomic, AtomicUsize, Ordering}; +use crate::sys::thread as imp; +use crate::sys_common::{AsInner, IntoInner}; +use crate::{env, io, panic}; + +#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces +pub(super) unsafe fn spawn_unchecked<'scope, F, T>( + name: Option, + stack_size: Option, + no_hooks: bool, + scope_data: Option>, + f: F, +) -> io::Result> +where + F: FnOnce() -> T, + F: Send, + T: Send, +{ + let stack_size = stack_size.unwrap_or_else(|| { + static MIN: Atomic = AtomicUsize::new(0); + + match MIN.load(Ordering::Relaxed) { + 0 => {} + n => return n - 1, + } + + let amt = env::var_os("RUST_MIN_STACK") + .and_then(|s| s.to_str().and_then(|s| s.parse().ok())) + .unwrap_or(imp::DEFAULT_MIN_STACK_SIZE); + + // 0 is our sentinel value, so ensure that we'll never see 0 after + // initialization has run + MIN.store(amt + 1, Ordering::Relaxed); + amt + }); + + let id = ThreadId::new(); + let thread = Thread::new(id, name); + + let hooks = if no_hooks { + spawnhook::ChildSpawnHooks::default() + } else { + spawnhook::run_spawn_hooks(&thread) + }; + + let my_packet: Arc> = + Arc::new(Packet { scope: scope_data, result: UnsafeCell::new(None), _marker: PhantomData }); + let their_packet = my_packet.clone(); + + // Pass `f` in `MaybeUninit` because actually that closure might *run longer than the lifetime of `F`*. + // See for more details. + // To prevent leaks we use a wrapper that drops its contents. + #[repr(transparent)] + struct MaybeDangling(MaybeUninit); + impl MaybeDangling { + fn new(x: T) -> Self { + MaybeDangling(MaybeUninit::new(x)) + } + fn into_inner(self) -> T { + // Make sure we don't drop. + let this = ManuallyDrop::new(self); + // SAFETY: we are always initialized. + unsafe { this.0.assume_init_read() } + } + } + impl Drop for MaybeDangling { + fn drop(&mut self) { + // SAFETY: we are always initialized. + unsafe { self.0.assume_init_drop() }; + } + } + + let f = MaybeDangling::new(f); + + // The entrypoint of the Rust thread, after platform-specific thread + // initialization is done. + let rust_start = move || { + let f = f.into_inner(); + let try_result = panic::catch_unwind(panic::AssertUnwindSafe(|| { + crate::sys::backtrace::__rust_begin_short_backtrace(|| hooks.run()); + crate::sys::backtrace::__rust_begin_short_backtrace(f) + })); + // SAFETY: `their_packet` as been built just above and moved by the + // closure (it is an Arc<...>) and `my_packet` will be stored in the + // same `JoinInner` as this closure meaning the mutation will be + // safe (not modify it and affect a value far away). + unsafe { *their_packet.result.get() = Some(try_result) }; + // Here `their_packet` gets dropped, and if this is the last `Arc` for that packet that + // will call `decrement_num_running_threads` and therefore signal that this thread is + // done. + drop(their_packet); + // Here, the lifetime `'scope` can end. `main` keeps running for a bit + // after that before returning itself. + }; + + if let Some(scope_data) = &my_packet.scope { + scope_data.increment_num_running_threads(); + } + + // SAFETY: dynamic size and alignment of the Box remain the same. See below for why the + // lifetime change is justified. + let rust_start = unsafe { + Box::from_raw(Box::into_raw(Box::new(rust_start)) as *mut (dyn FnOnce() + Send + 'static)) + }; + + let init = Box::new(ThreadInit { handle: thread.clone(), rust_start }); + + Ok(JoinInner { + // SAFETY: + // + // `imp::Thread::new` takes a closure with a `'static` lifetime, since it's passed + // through FFI or otherwise used with low-level threading primitives that have no + // notion of or way to enforce lifetimes. + // + // As mentioned in the `Safety` section of this function's documentation, the caller of + // this function needs to guarantee that the passed-in lifetime is sufficiently long + // for the lifetime of the thread. + // + // Similarly, the `sys` implementation must guarantee that no references to the closure + // exist after the thread has terminated, which is signaled by `Thread::join` + // returning. + native: unsafe { imp::Thread::new(stack_size, init)? }, + thread, + packet: my_packet, + }) +} + +/// The data passed to the spawned thread for thread initialization. Any thread +/// implementation should start a new thread by calling .init() on this before +/// doing anything else to ensure the current thread is properly initialized and +/// the global allocator works. +pub(crate) struct ThreadInit { + pub handle: Thread, + pub rust_start: Box, +} + +impl ThreadInit { + /// Initialize the 'current thread' mechanism on this thread, returning the + /// Rust entry point. + pub fn init(self: Box) -> Box { + // Set the current thread before any (de)allocations on the global allocator occur, + // so that it may call std::thread::current() in its implementation. This is also + // why we take Box, to ensure the Box is not destroyed until after this point. + // Cloning the handle does not invoke the global allocator, it is an Arc. + if let Err(_thread) = set_current(self.handle.clone()) { + // The current thread should not have set yet. Use an abort to save binary size (see #123356). + rtabort!("current thread handle already set during thread spawn"); + } + + if let Some(name) = self.handle.cname() { + imp::set_name(name); + } + + self.rust_start + } +} + +// This packet is used to communicate the return value between the spawned +// thread and the rest of the program. It is shared through an `Arc` and +// there's no need for a mutex here because synchronization happens with `join()` +// (the caller will never read this packet until the thread has exited). +// +// An Arc to the packet is stored into a `JoinInner` which in turns is placed +// in `JoinHandle`. +struct Packet<'scope, T> { + scope: Option>, + result: UnsafeCell>>, + _marker: PhantomData>, +} + +// Due to the usage of `UnsafeCell` we need to manually implement Sync. +// The type `T` should already always be Send (otherwise the thread could not +// have been created) and the Packet is Sync because all access to the +// `UnsafeCell` synchronized (by the `join()` boundary), and `ScopeData` is Sync. +unsafe impl<'scope, T: Send> Sync for Packet<'scope, T> {} + +impl<'scope, T> Drop for Packet<'scope, T> { + fn drop(&mut self) { + // If this packet was for a thread that ran in a scope, the thread + // panicked, and nobody consumed the panic payload, we make sure + // the scope function will panic. + let unhandled_panic = matches!(self.result.get_mut(), Some(Err(_))); + // Drop the result without causing unwinding. + // This is only relevant for threads that aren't join()ed, as + // join() will take the `result` and set it to None, such that + // there is nothing left to drop here. + // If this panics, we should handle that, because we're outside the + // outermost `catch_unwind` of our thread. + // We just abort in that case, since there's nothing else we can do. + // (And even if we tried to handle it somehow, we'd also need to handle + // the case where the panic payload we get out of it also panics on + // drop, and so on. See issue #86027.) + if let Err(_) = panic::catch_unwind(panic::AssertUnwindSafe(|| { + *self.result.get_mut() = None; + })) { + rtabort!("thread result panicked on drop"); + } + // Book-keeping so the scope knows when it's done. + if let Some(scope) = &self.scope { + // Now that there will be no more user code running on this thread + // that can use 'scope, mark the thread as 'finished'. + // It's important we only do this after the `result` has been dropped, + // since dropping it might still use things it borrowed from 'scope. + scope.decrement_num_running_threads(unhandled_panic); + } + } +} + +/// Inner representation for JoinHandle +pub(super) struct JoinInner<'scope, T> { + native: imp::Thread, + thread: Thread, + packet: Arc>, +} + +impl<'scope, T> JoinInner<'scope, T> { + pub(super) fn is_finished(&self) -> bool { + Arc::strong_count(&self.packet) == 1 + } + + pub(super) fn thread(&self) -> &Thread { + &self.thread + } + + pub(super) fn join(mut self) -> Result { + self.native.join(); + Arc::get_mut(&mut self.packet) + // FIXME(fuzzypixelz): returning an error instead of panicking here + // would require updating the documentation of + // `std::thread::Result`; currently we can return `Err` if and only + // if the thread had panicked. + .expect("threads should not terminate unexpectedly") + .result + .get_mut() + .take() + .unwrap() + } +} + +impl AsInner for JoinInner<'static, T> { + fn as_inner(&self) -> &imp::Thread { + &self.native + } +} + +impl IntoInner for JoinInner<'static, T> { + fn into_inner(self) -> imp::Thread { + self.native + } +} diff --git a/library/std/src/thread/main_thread.rs b/library/std/src/thread/main_thread.rs new file mode 100644 index 0000000000000..394074a593674 --- /dev/null +++ b/library/std/src/thread/main_thread.rs @@ -0,0 +1,56 @@ +//! Store the ID of the main thread. +//! +//! The thread handle for the main thread is created lazily, and this might even +//! happen pre-main. Since not every platform has a way to identify the main +//! thread when that happens – macOS's `pthread_main_np` function being a notable +//! exception – we cannot assign it the right name right then. Instead, in our +//! runtime startup code, we remember the thread ID of the main thread (through +//! this modules `set` function) and use it to identify the main thread from then +//! on. This works reliably and has the additional advantage that we can report +//! the right thread name on main even after the thread handle has been destroyed. +//! Note however that this also means that the name reported in pre-main functions +//! will be incorrect, but that's just something we have to live with. + +cfg_select! { + target_has_atomic = "64" => { + use super::id::ThreadId; + use crate::sync::atomic::{Atomic, AtomicU64}; + use crate::sync::atomic::Ordering::Relaxed; + + static MAIN: Atomic = AtomicU64::new(0); + + pub(super) fn get() -> Option { + ThreadId::from_u64(MAIN.load(Relaxed)) + } + + /// # Safety + /// May only be called once. + pub(crate) unsafe fn set(id: ThreadId) { + MAIN.store(id.as_u64().get(), Relaxed) + } + } + _ => { + use super::id::ThreadId; + use crate::mem::MaybeUninit; + use crate::sync::atomic::{Atomic, AtomicBool}; + use crate::sync::atomic::Ordering::{Acquire, Release}; + + static INIT: Atomic = AtomicBool::new(false); + static mut MAIN: MaybeUninit = MaybeUninit::uninit(); + + pub(super) fn get() -> Option { + if INIT.load(Acquire) { + Some(unsafe { MAIN.assume_init() }) + } else { + None + } + } + + /// # Safety + /// May only be called once. + pub(crate) unsafe fn set(id: ThreadId) { + unsafe { MAIN = MaybeUninit::new(id) }; + INIT.store(true, Release); + } + } +} diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs index 983d189b07024..00aeb70e6e076 100644 --- a/library/std/src/thread/mod.rs +++ b/library/std/src/thread/mod.rs @@ -137,6 +137,7 @@ //! Note that the stack size of the main thread is *not* determined by Rust. //! //! [channels]: crate::sync::mpsc +//! [`Arc`]: crate::sync::Arc //! [`join`]: JoinHandle::join //! [`Result`]: crate::result::Result //! [`Ok`]: crate::result::Result::Ok @@ -155,54 +156,56 @@ // Under `test`, `__FastLocalKeyInner` seems unused. #![cfg_attr(test, allow(dead_code))] -#[cfg(all(test, not(any(target_os = "emscripten", target_os = "wasi"))))] -mod tests; - -use crate::alloc::System; use crate::any::Any; -use crate::cell::UnsafeCell; -use crate::ffi::CStr; -use crate::marker::PhantomData; -use crate::mem::{self, ManuallyDrop, forget}; -use crate::num::NonZero; -use crate::pin::Pin; -use crate::sync::Arc; -use crate::sync::atomic::{Atomic, AtomicUsize, Ordering}; -use crate::sys::sync::Parker; -use crate::sys::thread as imp; -use crate::sys_common::{AsInner, IntoInner}; -use crate::time::{Duration, Instant}; -use crate::{env, fmt, io, panic, panicking, str}; -#[stable(feature = "scoped_threads", since = "1.63.0")] +#[macro_use] +mod local; +mod builder; +mod current; +mod functions; +mod id; +mod join_handle; +mod lifecycle; mod scoped; +mod spawnhook; +mod thread; -#[stable(feature = "scoped_threads", since = "1.63.0")] -pub use scoped::{Scope, ScopedJoinHandle, scope}; +pub(crate) mod main_thread; -mod current; +#[cfg(all(test, not(any(target_os = "emscripten", target_os = "wasi"))))] +mod tests; +#[stable(feature = "rust1", since = "1.0.0")] +pub use builder::Builder; #[stable(feature = "rust1", since = "1.0.0")] pub use current::current; #[unstable(feature = "current_thread_id", issue = "147194")] pub use current::current_id; -pub(crate) use current::{current_or_unnamed, current_os_id, drop_current}; -use current::{set_current, try_with_current}; - -mod spawnhook; - +pub(crate) use current::{current_or_unnamed, current_os_id, drop_current, with_current_name}; +#[stable(feature = "available_parallelism", since = "1.59.0")] +pub use functions::available_parallelism; +#[stable(feature = "park_timeout", since = "1.4.0")] +pub use functions::park_timeout; +#[stable(feature = "thread_sleep", since = "1.4.0")] +pub use functions::sleep; +#[unstable(feature = "thread_sleep_until", issue = "113752")] +pub use functions::sleep_until; +#[expect(deprecated)] +#[stable(feature = "rust1", since = "1.0.0")] +pub use functions::{panicking, park, park_timeout_ms, sleep_ms, spawn, yield_now}; +#[stable(feature = "thread_id", since = "1.19.0")] +pub use id::ThreadId; +#[stable(feature = "rust1", since = "1.0.0")] +pub use join_handle::JoinHandle; +pub(crate) use lifecycle::ThreadInit; +#[stable(feature = "rust1", since = "1.0.0")] +pub use local::{AccessError, LocalKey}; +#[stable(feature = "scoped_threads", since = "1.63.0")] +pub use scoped::{Scope, ScopedJoinHandle, scope}; #[unstable(feature = "thread_spawn_hook", issue = "132951")] pub use spawnhook::add_spawn_hook; - -//////////////////////////////////////////////////////////////////////////////// -// Thread-local storage -//////////////////////////////////////////////////////////////////////////////// - -#[macro_use] -mod local; - #[stable(feature = "rust1", since = "1.0.0")] -pub use self::local::{AccessError, LocalKey}; +pub use thread::Thread; // Implementation details used by the thread_local!{} macro. #[doc(hidden)] @@ -212,1542 +215,6 @@ pub mod local_impl { pub use crate::sys::thread_local::*; } -/// The data passed to the spawned thread for thread initialization. Any thread -/// implementation should start a new thread by calling .init() on this before -/// doing anything else to ensure the current thread is properly initialized and -/// the global allocator works. -pub(crate) struct ThreadInit { - pub handle: Thread, - pub rust_start: Box, -} - -impl ThreadInit { - /// Initialize the 'current thread' mechanism on this thread, returning the - /// Rust entry point. - pub fn init(self: Box) -> Box { - // Set the current thread before any (de)allocations on the global allocator occur, - // so that it may call std::thread::current() in its implementation. This is also - // why we take Box, to ensure the Box is not destroyed until after this point. - // Cloning the handle does not invoke the global allocator, it is an Arc. - if let Err(_thread) = set_current(self.handle.clone()) { - // The current thread should not have set yet. Use an abort to save binary size (see #123356). - rtabort!("current thread handle already set during thread spawn"); - } - - if let Some(name) = self.handle.cname() { - imp::set_name(name); - } - - self.rust_start - } -} - -//////////////////////////////////////////////////////////////////////////////// -// Builder -//////////////////////////////////////////////////////////////////////////////// - -/// Thread factory, which can be used in order to configure the properties of -/// a new thread. -/// -/// Methods can be chained on it in order to configure it. -/// -/// The two configurations available are: -/// -/// - [`name`]: specifies an [associated name for the thread][naming-threads] -/// - [`stack_size`]: specifies the [desired stack size for the thread][stack-size] -/// -/// The [`spawn`] method will take ownership of the builder and create an -/// [`io::Result`] to the thread handle with the given configuration. -/// -/// The [`thread::spawn`] free function uses a `Builder` with default -/// configuration and [`unwrap`]s its return value. -/// -/// You may want to use [`spawn`] instead of [`thread::spawn`], when you want -/// to recover from a failure to launch a thread, indeed the free function will -/// panic where the `Builder` method will return a [`io::Result`]. -/// -/// # Examples -/// -/// ``` -/// use std::thread; -/// -/// let builder = thread::Builder::new(); -/// -/// let handler = builder.spawn(|| { -/// // thread code -/// }).unwrap(); -/// -/// handler.join().unwrap(); -/// ``` -/// -/// [`stack_size`]: Builder::stack_size -/// [`name`]: Builder::name -/// [`spawn`]: Builder::spawn -/// [`thread::spawn`]: spawn -/// [`io::Result`]: crate::io::Result -/// [`unwrap`]: crate::result::Result::unwrap -/// [naming-threads]: ./index.html#naming-threads -/// [stack-size]: ./index.html#stack-size -#[must_use = "must eventually spawn the thread"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Debug)] -pub struct Builder { - // A name for the thread-to-be, for identification in panic messages - name: Option, - // The size of the stack for the spawned thread in bytes - stack_size: Option, - // Skip running and inheriting the thread spawn hooks - no_hooks: bool, -} - -impl Builder { - /// Generates the base configuration for spawning a thread, from which - /// configuration methods can be chained. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// - /// let builder = thread::Builder::new() - /// .name("foo".into()) - /// .stack_size(32 * 1024); - /// - /// let handler = builder.spawn(|| { - /// // thread code - /// }).unwrap(); - /// - /// handler.join().unwrap(); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn new() -> Builder { - Builder { name: None, stack_size: None, no_hooks: false } - } - - /// Names the thread-to-be. Currently the name is used for identification - /// only in panic messages. - /// - /// The name must not contain null bytes (`\0`). - /// - /// For more information about named threads, see - /// [this module-level documentation][naming-threads]. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// - /// let builder = thread::Builder::new() - /// .name("foo".into()); - /// - /// let handler = builder.spawn(|| { - /// assert_eq!(thread::current().name(), Some("foo")) - /// }).unwrap(); - /// - /// handler.join().unwrap(); - /// ``` - /// - /// [naming-threads]: ./index.html#naming-threads - #[stable(feature = "rust1", since = "1.0.0")] - pub fn name(mut self, name: String) -> Builder { - self.name = Some(name); - self - } - - /// Sets the size of the stack (in bytes) for the new thread. - /// - /// The actual stack size may be greater than this value if - /// the platform specifies a minimal stack size. - /// - /// For more information about the stack size for threads, see - /// [this module-level documentation][stack-size]. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// - /// let builder = thread::Builder::new().stack_size(32 * 1024); - /// ``` - /// - /// [stack-size]: ./index.html#stack-size - #[stable(feature = "rust1", since = "1.0.0")] - pub fn stack_size(mut self, size: usize) -> Builder { - self.stack_size = Some(size); - self - } - - /// Disables running and inheriting [spawn hooks](add_spawn_hook). - /// - /// Use this if the parent thread is in no way relevant for the child thread. - /// For example, when lazily spawning threads for a thread pool. - #[unstable(feature = "thread_spawn_hook", issue = "132951")] - pub fn no_hooks(mut self) -> Builder { - self.no_hooks = true; - self - } - - /// Spawns a new thread by taking ownership of the `Builder`, and returns an - /// [`io::Result`] to its [`JoinHandle`]. - /// - /// The spawned thread may outlive the caller (unless the caller thread - /// is the main thread; the whole process is terminated when the main - /// thread finishes). The join handle can be used to block on - /// termination of the spawned thread, including recovering its panics. - /// - /// For a more complete documentation see [`thread::spawn`][`spawn`]. - /// - /// # Errors - /// - /// Unlike the [`spawn`] free function, this method yields an - /// [`io::Result`] to capture any failure to create the thread at - /// the OS level. - /// - /// [`io::Result`]: crate::io::Result - /// - /// # Panics - /// - /// Panics if a thread name was set and it contained null bytes. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// - /// let builder = thread::Builder::new(); - /// - /// let handler = builder.spawn(|| { - /// // thread code - /// }).unwrap(); - /// - /// handler.join().unwrap(); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces - pub fn spawn(self, f: F) -> io::Result> - where - F: FnOnce() -> T, - F: Send + 'static, - T: Send + 'static, - { - unsafe { self.spawn_unchecked(f) } - } - - /// Spawns a new thread without any lifetime restrictions by taking ownership - /// of the `Builder`, and returns an [`io::Result`] to its [`JoinHandle`]. - /// - /// The spawned thread may outlive the caller (unless the caller thread - /// is the main thread; the whole process is terminated when the main - /// thread finishes). The join handle can be used to block on - /// termination of the spawned thread, including recovering its panics. - /// - /// This method is identical to [`thread::Builder::spawn`][`Builder::spawn`], - /// except for the relaxed lifetime bounds, which render it unsafe. - /// For a more complete documentation see [`thread::spawn`][`spawn`]. - /// - /// # Errors - /// - /// Unlike the [`spawn`] free function, this method yields an - /// [`io::Result`] to capture any failure to create the thread at - /// the OS level. - /// - /// # Panics - /// - /// Panics if a thread name was set and it contained null bytes. - /// - /// # Safety - /// - /// The caller has to ensure that the spawned thread does not outlive any - /// references in the supplied thread closure and its return type. - /// This can be guaranteed in two ways: - /// - /// - ensure that [`join`][`JoinHandle::join`] is called before any referenced - /// data is dropped - /// - use only types with `'static` lifetime bounds, i.e., those with no or only - /// `'static` references (both [`thread::Builder::spawn`][`Builder::spawn`] - /// and [`thread::spawn`][`spawn`] enforce this property statically) - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// - /// let builder = thread::Builder::new(); - /// - /// let x = 1; - /// let thread_x = &x; - /// - /// let handler = unsafe { - /// builder.spawn_unchecked(move || { - /// println!("x = {}", *thread_x); - /// }).unwrap() - /// }; - /// - /// // caller has to ensure `join()` is called, otherwise - /// // it is possible to access freed memory if `x` gets - /// // dropped before the thread closure is executed! - /// handler.join().unwrap(); - /// ``` - /// - /// [`io::Result`]: crate::io::Result - #[stable(feature = "thread_spawn_unchecked", since = "1.82.0")] - #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces - pub unsafe fn spawn_unchecked(self, f: F) -> io::Result> - where - F: FnOnce() -> T, - F: Send, - T: Send, - { - Ok(JoinHandle(unsafe { self.spawn_unchecked_(f, None) }?)) - } - - #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces - unsafe fn spawn_unchecked_<'scope, F, T>( - self, - f: F, - scope_data: Option>, - ) -> io::Result> - where - F: FnOnce() -> T, - F: Send, - T: Send, - { - let Builder { name, stack_size, no_hooks } = self; - - let stack_size = stack_size.unwrap_or_else(|| { - static MIN: Atomic = AtomicUsize::new(0); - - match MIN.load(Ordering::Relaxed) { - 0 => {} - n => return n - 1, - } - - let amt = env::var_os("RUST_MIN_STACK") - .and_then(|s| s.to_str().and_then(|s| s.parse().ok())) - .unwrap_or(imp::DEFAULT_MIN_STACK_SIZE); - - // 0 is our sentinel value, so ensure that we'll never see 0 after - // initialization has run - MIN.store(amt + 1, Ordering::Relaxed); - amt - }); - - let id = ThreadId::new(); - let thread = Thread::new(id, name); - - let hooks = if no_hooks { - spawnhook::ChildSpawnHooks::default() - } else { - spawnhook::run_spawn_hooks(&thread) - }; - - let my_packet: Arc> = Arc::new(Packet { - scope: scope_data, - result: UnsafeCell::new(None), - _marker: PhantomData, - }); - let their_packet = my_packet.clone(); - - // Pass `f` in `MaybeUninit` because actually that closure might *run longer than the lifetime of `F`*. - // See for more details. - // To prevent leaks we use a wrapper that drops its contents. - #[repr(transparent)] - struct MaybeDangling(mem::MaybeUninit); - impl MaybeDangling { - fn new(x: T) -> Self { - MaybeDangling(mem::MaybeUninit::new(x)) - } - fn into_inner(self) -> T { - // Make sure we don't drop. - let this = ManuallyDrop::new(self); - // SAFETY: we are always initialized. - unsafe { this.0.assume_init_read() } - } - } - impl Drop for MaybeDangling { - fn drop(&mut self) { - // SAFETY: we are always initialized. - unsafe { self.0.assume_init_drop() }; - } - } - - let f = MaybeDangling::new(f); - - // The entrypoint of the Rust thread, after platform-specific thread - // initialization is done. - let rust_start = move || { - let f = f.into_inner(); - let try_result = panic::catch_unwind(panic::AssertUnwindSafe(|| { - crate::sys::backtrace::__rust_begin_short_backtrace(|| hooks.run()); - crate::sys::backtrace::__rust_begin_short_backtrace(f) - })); - // SAFETY: `their_packet` as been built just above and moved by the - // closure (it is an Arc<...>) and `my_packet` will be stored in the - // same `JoinInner` as this closure meaning the mutation will be - // safe (not modify it and affect a value far away). - unsafe { *their_packet.result.get() = Some(try_result) }; - // Here `their_packet` gets dropped, and if this is the last `Arc` for that packet that - // will call `decrement_num_running_threads` and therefore signal that this thread is - // done. - drop(their_packet); - // Here, the lifetime `'scope` can end. `main` keeps running for a bit - // after that before returning itself. - }; - - if let Some(scope_data) = &my_packet.scope { - scope_data.increment_num_running_threads(); - } - - // SAFETY: dynamic size and alignment of the Box remain the same. See below for why the - // lifetime change is justified. - let rust_start = unsafe { - Box::from_raw( - Box::into_raw(Box::new(rust_start)) as *mut (dyn FnOnce() + Send + 'static) - ) - }; - - let init = Box::new(ThreadInit { handle: thread.clone(), rust_start }); - - Ok(JoinInner { - // SAFETY: - // - // `imp::Thread::new` takes a closure with a `'static` lifetime, since it's passed - // through FFI or otherwise used with low-level threading primitives that have no - // notion of or way to enforce lifetimes. - // - // As mentioned in the `Safety` section of this function's documentation, the caller of - // this function needs to guarantee that the passed-in lifetime is sufficiently long - // for the lifetime of the thread. - // - // Similarly, the `sys` implementation must guarantee that no references to the closure - // exist after the thread has terminated, which is signaled by `Thread::join` - // returning. - native: unsafe { imp::Thread::new(stack_size, init)? }, - thread, - packet: my_packet, - }) - } -} - -//////////////////////////////////////////////////////////////////////////////// -// Free functions -//////////////////////////////////////////////////////////////////////////////// - -/// Spawns a new thread, returning a [`JoinHandle`] for it. -/// -/// The join handle provides a [`join`] method that can be used to join the spawned -/// thread. If the spawned thread panics, [`join`] will return an [`Err`] containing -/// the argument given to [`panic!`]. -/// -/// If the join handle is dropped, the spawned thread will implicitly be *detached*. -/// In this case, the spawned thread may no longer be joined. -/// (It is the responsibility of the program to either eventually join threads it -/// creates or detach them; otherwise, a resource leak will result.) -/// -/// This function creates a thread with the default parameters of [`Builder`]. -/// To specify the new thread's stack size or the name, use [`Builder::spawn`]. -/// -/// As you can see in the signature of `spawn` there are two constraints on -/// both the closure given to `spawn` and its return value, let's explain them: -/// -/// - The `'static` constraint means that the closure and its return value -/// must have a lifetime of the whole program execution. The reason for this -/// is that threads can outlive the lifetime they have been created in. -/// -/// Indeed if the thread, and by extension its return value, can outlive their -/// caller, we need to make sure that they will be valid afterwards, and since -/// we *can't* know when it will return we need to have them valid as long as -/// possible, that is until the end of the program, hence the `'static` -/// lifetime. -/// - The [`Send`] constraint is because the closure will need to be passed -/// *by value* from the thread where it is spawned to the new thread. Its -/// return value will need to be passed from the new thread to the thread -/// where it is `join`ed. -/// As a reminder, the [`Send`] marker trait expresses that it is safe to be -/// passed from thread to thread. [`Sync`] expresses that it is safe to have a -/// reference be passed from thread to thread. -/// -/// # Panics -/// -/// Panics if the OS fails to create a thread; use [`Builder::spawn`] -/// to recover from such errors. -/// -/// # Examples -/// -/// Creating a thread. -/// -/// ``` -/// use std::thread; -/// -/// let handler = thread::spawn(|| { -/// // thread code -/// }); -/// -/// handler.join().unwrap(); -/// ``` -/// -/// As mentioned in the module documentation, threads are usually made to -/// communicate using [`channels`], here is how it usually looks. -/// -/// This example also shows how to use `move`, in order to give ownership -/// of values to a thread. -/// -/// ``` -/// use std::thread; -/// use std::sync::mpsc::channel; -/// -/// let (tx, rx) = channel(); -/// -/// let sender = thread::spawn(move || { -/// tx.send("Hello, thread".to_owned()) -/// .expect("Unable to send on channel"); -/// }); -/// -/// let receiver = thread::spawn(move || { -/// let value = rx.recv().expect("Unable to receive from channel"); -/// println!("{value}"); -/// }); -/// -/// sender.join().expect("The sender thread has panicked"); -/// receiver.join().expect("The receiver thread has panicked"); -/// ``` -/// -/// A thread can also return a value through its [`JoinHandle`], you can use -/// this to make asynchronous computations (futures might be more appropriate -/// though). -/// -/// ``` -/// use std::thread; -/// -/// let computation = thread::spawn(|| { -/// // Some expensive computation. -/// 42 -/// }); -/// -/// let result = computation.join().unwrap(); -/// println!("{result}"); -/// ``` -/// -/// # Notes -/// -/// This function has the same minimal guarantee regarding "foreign" unwinding operations (e.g. -/// an exception thrown from C++ code, or a `panic!` in Rust code compiled or linked with a -/// different runtime) as [`catch_unwind`]; namely, if the thread created with `thread::spawn` -/// unwinds all the way to the root with such an exception, one of two behaviors are possible, -/// and it is unspecified which will occur: -/// -/// * The process aborts. -/// * The process does not abort, and [`join`] will return a `Result::Err` -/// containing an opaque type. -/// -/// [`catch_unwind`]: ../../std/panic/fn.catch_unwind.html -/// [`channels`]: crate::sync::mpsc -/// [`join`]: JoinHandle::join -/// [`Err`]: crate::result::Result::Err -#[stable(feature = "rust1", since = "1.0.0")] -#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces -pub fn spawn(f: F) -> JoinHandle -where - F: FnOnce() -> T, - F: Send + 'static, - T: Send + 'static, -{ - Builder::new().spawn(f).expect("failed to spawn thread") -} - -/// Cooperatively gives up a timeslice to the OS scheduler. -/// -/// This calls the underlying OS scheduler's yield primitive, signaling -/// that the calling thread is willing to give up its remaining timeslice -/// so that the OS may schedule other threads on the CPU. -/// -/// A drawback of yielding in a loop is that if the OS does not have any -/// other ready threads to run on the current CPU, the thread will effectively -/// busy-wait, which wastes CPU time and energy. -/// -/// Therefore, when waiting for events of interest, a programmer's first -/// choice should be to use synchronization devices such as [`channel`]s, -/// [`Condvar`]s, [`Mutex`]es or [`join`] since these primitives are -/// implemented in a blocking manner, giving up the CPU until the event -/// of interest has occurred which avoids repeated yielding. -/// -/// `yield_now` should thus be used only rarely, mostly in situations where -/// repeated polling is required because there is no other suitable way to -/// learn when an event of interest has occurred. -/// -/// # Examples -/// -/// ``` -/// use std::thread; -/// -/// thread::yield_now(); -/// ``` -/// -/// [`channel`]: crate::sync::mpsc -/// [`join`]: JoinHandle::join -/// [`Condvar`]: crate::sync::Condvar -/// [`Mutex`]: crate::sync::Mutex -#[stable(feature = "rust1", since = "1.0.0")] -pub fn yield_now() { - imp::yield_now() -} - -/// Determines whether the current thread is unwinding because of panic. -/// -/// A common use of this feature is to poison shared resources when writing -/// unsafe code, by checking `panicking` when the `drop` is called. -/// -/// This is usually not needed when writing safe code, as [`Mutex`es][Mutex] -/// already poison themselves when a thread panics while holding the lock. -/// -/// This can also be used in multithreaded applications, in order to send a -/// message to other threads warning that a thread has panicked (e.g., for -/// monitoring purposes). -/// -/// # Examples -/// -/// ```should_panic -/// use std::thread; -/// -/// struct SomeStruct; -/// -/// impl Drop for SomeStruct { -/// fn drop(&mut self) { -/// if thread::panicking() { -/// println!("dropped while unwinding"); -/// } else { -/// println!("dropped while not unwinding"); -/// } -/// } -/// } -/// -/// { -/// print!("a: "); -/// let a = SomeStruct; -/// } -/// -/// { -/// print!("b: "); -/// let b = SomeStruct; -/// panic!() -/// } -/// ``` -/// -/// [Mutex]: crate::sync::Mutex -#[inline] -#[must_use] -#[stable(feature = "rust1", since = "1.0.0")] -pub fn panicking() -> bool { - panicking::panicking() -} - -/// Uses [`sleep`]. -/// -/// Puts the current thread to sleep for at least the specified amount of time. -/// -/// The thread may sleep longer than the duration specified due to scheduling -/// specifics or platform-dependent functionality. It will never sleep less. -/// -/// This function is blocking, and should not be used in `async` functions. -/// -/// # Platform-specific behavior -/// -/// On Unix platforms, the underlying syscall may be interrupted by a -/// spurious wakeup or signal handler. To ensure the sleep occurs for at least -/// the specified duration, this function may invoke that system call multiple -/// times. -/// -/// # Examples -/// -/// ```no_run -/// use std::thread; -/// -/// // Let's sleep for 2 seconds: -/// thread::sleep_ms(2000); -/// ``` -#[stable(feature = "rust1", since = "1.0.0")] -#[deprecated(since = "1.6.0", note = "replaced by `std::thread::sleep`")] -pub fn sleep_ms(ms: u32) { - sleep(Duration::from_millis(ms as u64)) -} - -/// Puts the current thread to sleep for at least the specified amount of time. -/// -/// The thread may sleep longer than the duration specified due to scheduling -/// specifics or platform-dependent functionality. It will never sleep less. -/// -/// This function is blocking, and should not be used in `async` functions. -/// -/// # Platform-specific behavior -/// -/// On Unix platforms, the underlying syscall may be interrupted by a -/// spurious wakeup or signal handler. To ensure the sleep occurs for at least -/// the specified duration, this function may invoke that system call multiple -/// times. -/// Platforms which do not support nanosecond precision for sleeping will -/// have `dur` rounded up to the nearest granularity of time they can sleep for. -/// -/// Currently, specifying a zero duration on Unix platforms returns immediately -/// without invoking the underlying [`nanosleep`] syscall, whereas on Windows -/// platforms the underlying [`Sleep`] syscall is always invoked. -/// If the intention is to yield the current time-slice you may want to use -/// [`yield_now`] instead. -/// -/// [`nanosleep`]: https://linux.die.net/man/2/nanosleep -/// [`Sleep`]: https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-sleep -/// -/// # Examples -/// -/// ```no_run -/// use std::{thread, time}; -/// -/// let ten_millis = time::Duration::from_millis(10); -/// let now = time::Instant::now(); -/// -/// thread::sleep(ten_millis); -/// -/// assert!(now.elapsed() >= ten_millis); -/// ``` -#[stable(feature = "thread_sleep", since = "1.4.0")] -pub fn sleep(dur: Duration) { - imp::sleep(dur) -} - -/// Puts the current thread to sleep until the specified deadline has passed. -/// -/// The thread may still be asleep after the deadline specified due to -/// scheduling specifics or platform-dependent functionality. It will never -/// wake before. -/// -/// This function is blocking, and should not be used in `async` functions. -/// -/// # Platform-specific behavior -/// -/// In most cases this function will call an OS specific function. Where that -/// is not supported [`sleep`] is used. Those platforms are referred to as other -/// in the table below. -/// -/// # Underlying System calls -/// -/// The following system calls are [currently] being used: -/// -/// | Platform | System call | -/// |-----------|----------------------------------------------------------------------| -/// | Linux | [clock_nanosleep] (Monotonic clock) | -/// | BSD except OpenBSD | [clock_nanosleep] (Monotonic Clock)] | -/// | Android | [clock_nanosleep] (Monotonic Clock)] | -/// | Solaris | [clock_nanosleep] (Monotonic Clock)] | -/// | Illumos | [clock_nanosleep] (Monotonic Clock)] | -/// | Dragonfly | [clock_nanosleep] (Monotonic Clock)] | -/// | Hurd | [clock_nanosleep] (Monotonic Clock)] | -/// | Fuchsia | [clock_nanosleep] (Monotonic Clock)] | -/// | Vxworks | [clock_nanosleep] (Monotonic Clock)] | -/// | Other | `sleep_until` uses [`sleep`] and does not issue a syscall itself | -/// -/// [currently]: crate::io#platform-specific-behavior -/// [clock_nanosleep]: https://linux.die.net/man/3/clock_nanosleep -/// -/// **Disclaimer:** These system calls might change over time. -/// -/// # Examples -/// -/// A simple game loop that limits the game to 60 frames per second. -/// -/// ```no_run -/// #![feature(thread_sleep_until)] -/// # use std::time::{Duration, Instant}; -/// # use std::thread; -/// # -/// # fn update() {} -/// # fn render() {} -/// # -/// let max_fps = 60.0; -/// let frame_time = Duration::from_secs_f32(1.0/max_fps); -/// let mut next_frame = Instant::now(); -/// loop { -/// thread::sleep_until(next_frame); -/// next_frame += frame_time; -/// update(); -/// render(); -/// } -/// ``` -/// -/// A slow API we must not call too fast and which takes a few -/// tries before succeeding. By using `sleep_until` the time the -/// API call takes does not influence when we retry or when we give up -/// -/// ```no_run -/// #![feature(thread_sleep_until)] -/// # use std::time::{Duration, Instant}; -/// # use std::thread; -/// # -/// # enum Status { -/// # Ready(usize), -/// # Waiting, -/// # } -/// # fn slow_web_api_call() -> Status { Status::Ready(42) } -/// # -/// # const MAX_DURATION: Duration = Duration::from_secs(10); -/// # -/// # fn try_api_call() -> Result { -/// let deadline = Instant::now() + MAX_DURATION; -/// let delay = Duration::from_millis(250); -/// let mut next_attempt = Instant::now(); -/// loop { -/// if Instant::now() > deadline { -/// break Err(()); -/// } -/// if let Status::Ready(data) = slow_web_api_call() { -/// break Ok(data); -/// } -/// -/// next_attempt = deadline.min(next_attempt + delay); -/// thread::sleep_until(next_attempt); -/// } -/// # } -/// # let _data = try_api_call(); -/// ``` -#[unstable(feature = "thread_sleep_until", issue = "113752")] -pub fn sleep_until(deadline: Instant) { - imp::sleep_until(deadline) -} - -/// Used to ensure that `park` and `park_timeout` do not unwind, as that can -/// cause undefined behavior if not handled correctly (see #102398 for context). -struct PanicGuard; - -impl Drop for PanicGuard { - fn drop(&mut self) { - rtabort!("an irrecoverable error occurred while synchronizing threads") - } -} - -/// Blocks unless or until the current thread's token is made available. -/// -/// A call to `park` does not guarantee that the thread will remain parked -/// forever, and callers should be prepared for this possibility. However, -/// it is guaranteed that this function will not panic (it may abort the -/// process if the implementation encounters some rare errors). -/// -/// # `park` and `unpark` -/// -/// Every thread is equipped with some basic low-level blocking support, via the -/// [`thread::park`][`park`] function and [`thread::Thread::unpark`][`unpark`] -/// method. [`park`] blocks the current thread, which can then be resumed from -/// another thread by calling the [`unpark`] method on the blocked thread's -/// handle. -/// -/// Conceptually, each [`Thread`] handle has an associated token, which is -/// initially not present: -/// -/// * The [`thread::park`][`park`] function blocks the current thread unless or -/// until the token is available for its thread handle, at which point it -/// atomically consumes the token. It may also return *spuriously*, without -/// consuming the token. [`thread::park_timeout`] does the same, but allows -/// specifying a maximum time to block the thread for. -/// -/// * The [`unpark`] method on a [`Thread`] atomically makes the token available -/// if it wasn't already. Because the token can be held by a thread even if it is currently not -/// parked, [`unpark`] followed by [`park`] will result in the second call returning immediately. -/// However, note that to rely on this guarantee, you need to make sure that your `unpark` happens -/// after all `park` that may be done by other data structures! -/// -/// The API is typically used by acquiring a handle to the current thread, placing that handle in a -/// shared data structure so that other threads can find it, and then `park`ing in a loop. When some -/// desired condition is met, another thread calls [`unpark`] on the handle. The last bullet point -/// above guarantees that even if the `unpark` occurs before the thread is finished `park`ing, it -/// will be woken up properly. -/// -/// Note that the coordination via the shared data structure is crucial: If you `unpark` a thread -/// without first establishing that it is about to be `park`ing within your code, that `unpark` may -/// get consumed by a *different* `park` in the same thread, leading to a deadlock. This also means -/// you must not call unknown code between setting up for parking and calling `park`; for instance, -/// if you invoke `println!`, that may itself call `park` and thus consume your `unpark` and cause a -/// deadlock. -/// -/// The motivation for this design is twofold: -/// -/// * It avoids the need to allocate mutexes and condvars when building new -/// synchronization primitives; the threads already provide basic -/// blocking/signaling. -/// -/// * It can be implemented very efficiently on many platforms. -/// -/// # Memory Ordering -/// -/// Calls to `unpark` _synchronize-with_ calls to `park`, meaning that memory -/// operations performed before a call to `unpark` are made visible to the thread that -/// consumes the token and returns from `park`. Note that all `park` and `unpark` -/// operations for a given thread form a total order and _all_ prior `unpark` operations -/// synchronize-with `park`. -/// -/// In atomic ordering terms, `unpark` performs a `Release` operation and `park` -/// performs the corresponding `Acquire` operation. Calls to `unpark` for the same -/// thread form a [release sequence]. -/// -/// Note that being unblocked does not imply a call was made to `unpark`, because -/// wakeups can also be spurious. For example, a valid, but inefficient, -/// implementation could have `park` and `unpark` return immediately without doing anything, -/// making *all* wakeups spurious. -/// -/// # Examples -/// -/// ``` -/// use std::thread; -/// use std::sync::atomic::{Ordering, AtomicBool}; -/// use std::time::Duration; -/// -/// static QUEUED: AtomicBool = AtomicBool::new(false); -/// static FLAG: AtomicBool = AtomicBool::new(false); -/// -/// let parked_thread = thread::spawn(move || { -/// println!("Thread spawned"); -/// // Signal that we are going to `park`. Between this store and our `park`, there may -/// // be no other `park`, or else that `park` could consume our `unpark` token! -/// QUEUED.store(true, Ordering::Release); -/// // We want to wait until the flag is set. We *could* just spin, but using -/// // park/unpark is more efficient. -/// while !FLAG.load(Ordering::Acquire) { -/// // We can *not* use `println!` here since that could use thread parking internally. -/// thread::park(); -/// // We *could* get here spuriously, i.e., way before the 10ms below are over! -/// // But that is no problem, we are in a loop until the flag is set anyway. -/// } -/// println!("Flag received"); -/// }); -/// -/// // Let some time pass for the thread to be spawned. -/// thread::sleep(Duration::from_millis(10)); -/// -/// // Ensure the thread is about to park. -/// // This is crucial! It guarantees that the `unpark` below is not consumed -/// // by some other code in the parked thread (e.g. inside `println!`). -/// while !QUEUED.load(Ordering::Acquire) { -/// // Spinning is of course inefficient; in practice, this would more likely be -/// // a dequeue where we have no work to do if there's nobody queued. -/// std::hint::spin_loop(); -/// } -/// -/// // Set the flag, and let the thread wake up. -/// // There is no race condition here: if `unpark` -/// // happens first, `park` will return immediately. -/// // There is also no other `park` that could consume this token, -/// // since we waited until the other thread got queued. -/// // Hence there is no risk of a deadlock. -/// FLAG.store(true, Ordering::Release); -/// println!("Unpark the thread"); -/// parked_thread.thread().unpark(); -/// -/// parked_thread.join().unwrap(); -/// ``` -/// -/// [`unpark`]: Thread::unpark -/// [`thread::park_timeout`]: park_timeout -/// [release sequence]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release_sequence -#[stable(feature = "rust1", since = "1.0.0")] -pub fn park() { - let guard = PanicGuard; - // SAFETY: park_timeout is called on the parker owned by this thread. - unsafe { - current().park(); - } - // No panic occurred, do not abort. - forget(guard); -} - -/// Uses [`park_timeout`]. -/// -/// Blocks unless or until the current thread's token is made available or -/// the specified duration has been reached (may wake spuriously). -/// -/// The semantics of this function are equivalent to [`park`] except -/// that the thread will be blocked for roughly no longer than `dur`. This -/// method should not be used for precise timing due to anomalies such as -/// preemption or platform differences that might not cause the maximum -/// amount of time waited to be precisely `ms` long. -/// -/// See the [park documentation][`park`] for more detail. -#[stable(feature = "rust1", since = "1.0.0")] -#[deprecated(since = "1.6.0", note = "replaced by `std::thread::park_timeout`")] -pub fn park_timeout_ms(ms: u32) { - park_timeout(Duration::from_millis(ms as u64)) -} - -/// Blocks unless or until the current thread's token is made available or -/// the specified duration has been reached (may wake spuriously). -/// -/// The semantics of this function are equivalent to [`park`][park] except -/// that the thread will be blocked for roughly no longer than `dur`. This -/// method should not be used for precise timing due to anomalies such as -/// preemption or platform differences that might not cause the maximum -/// amount of time waited to be precisely `dur` long. -/// -/// See the [park documentation][park] for more details. -/// -/// # Platform-specific behavior -/// -/// Platforms which do not support nanosecond precision for sleeping will have -/// `dur` rounded up to the nearest granularity of time they can sleep for. -/// -/// # Examples -/// -/// Waiting for the complete expiration of the timeout: -/// -/// ```rust,no_run -/// use std::thread::park_timeout; -/// use std::time::{Instant, Duration}; -/// -/// let timeout = Duration::from_secs(2); -/// let beginning_park = Instant::now(); -/// -/// let mut timeout_remaining = timeout; -/// loop { -/// park_timeout(timeout_remaining); -/// let elapsed = beginning_park.elapsed(); -/// if elapsed >= timeout { -/// break; -/// } -/// println!("restarting park_timeout after {elapsed:?}"); -/// timeout_remaining = timeout - elapsed; -/// } -/// ``` -#[stable(feature = "park_timeout", since = "1.4.0")] -pub fn park_timeout(dur: Duration) { - let guard = PanicGuard; - // SAFETY: park_timeout is called on a handle owned by this thread. - unsafe { - current().park_timeout(dur); - } - // No panic occurred, do not abort. - forget(guard); -} - -//////////////////////////////////////////////////////////////////////////////// -// ThreadId -//////////////////////////////////////////////////////////////////////////////// - -/// A unique identifier for a running thread. -/// -/// A `ThreadId` is an opaque object that uniquely identifies each thread -/// created during the lifetime of a process. `ThreadId`s are guaranteed not to -/// be reused, even when a thread terminates. `ThreadId`s are under the control -/// of Rust's standard library and there may not be any relationship between -/// `ThreadId` and the underlying platform's notion of a thread identifier -- -/// the two concepts cannot, therefore, be used interchangeably. A `ThreadId` -/// can be retrieved from the [`id`] method on a [`Thread`]. -/// -/// # Examples -/// -/// ``` -/// use std::thread; -/// -/// let other_thread = thread::spawn(|| { -/// thread::current().id() -/// }); -/// -/// let other_thread_id = other_thread.join().unwrap(); -/// assert!(thread::current().id() != other_thread_id); -/// ``` -/// -/// [`id`]: Thread::id -#[stable(feature = "thread_id", since = "1.19.0")] -#[derive(Eq, PartialEq, Clone, Copy, Hash, Debug)] -pub struct ThreadId(NonZero); - -impl ThreadId { - // Generate a new unique thread ID. - pub(crate) fn new() -> ThreadId { - #[cold] - fn exhausted() -> ! { - panic!("failed to generate unique thread ID: bitspace exhausted") - } - - cfg_select! { - target_has_atomic = "64" => { - use crate::sync::atomic::{Atomic, AtomicU64}; - - static COUNTER: Atomic = AtomicU64::new(0); - - let mut last = COUNTER.load(Ordering::Relaxed); - loop { - let Some(id) = last.checked_add(1) else { - exhausted(); - }; - - match COUNTER.compare_exchange_weak(last, id, Ordering::Relaxed, Ordering::Relaxed) { - Ok(_) => return ThreadId(NonZero::new(id).unwrap()), - Err(id) => last = id, - } - } - } - _ => { - use crate::cell::SyncUnsafeCell; - use crate::hint::spin_loop; - use crate::sync::atomic::{Atomic, AtomicBool}; - use crate::thread::yield_now; - - // If we don't have a 64-bit atomic we use a small spinlock. We don't use Mutex - // here as we might be trying to get the current thread id in the global allocator, - // and on some platforms Mutex requires allocation. - static COUNTER_LOCKED: Atomic = AtomicBool::new(false); - static COUNTER: SyncUnsafeCell = SyncUnsafeCell::new(0); - - // Acquire lock. - let mut spin = 0; - while COUNTER_LOCKED.compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() { - if spin <= 3 { - for _ in 0..(1 << spin) { - spin_loop(); - } - } else { - yield_now(); - } - spin += 1; - } - - // SAFETY: we have an exclusive lock on the counter. - unsafe { - if let Some(id) = (*COUNTER.get()).checked_add(1) { - *COUNTER.get() = id; - COUNTER_LOCKED.store(false, Ordering::Release); - ThreadId(NonZero::new(id).unwrap()) - } else { - COUNTER_LOCKED.store(false, Ordering::Release); - exhausted() - } - } - } - } - } - - #[cfg(any(not(target_thread_local), target_has_atomic = "64"))] - fn from_u64(v: u64) -> Option { - NonZero::new(v).map(ThreadId) - } - - /// This returns a numeric identifier for the thread identified by this - /// `ThreadId`. - /// - /// As noted in the documentation for the type itself, it is essentially an - /// opaque ID, but is guaranteed to be unique for each thread. The returned - /// value is entirely opaque -- only equality testing is stable. Note that - /// it is not guaranteed which values new threads will return, and this may - /// change across Rust versions. - #[must_use] - #[unstable(feature = "thread_id_value", issue = "67939")] - pub fn as_u64(&self) -> NonZero { - self.0 - } -} - -//////////////////////////////////////////////////////////////////////////////// -// Thread -//////////////////////////////////////////////////////////////////////////////// - -// This module ensures private fields are kept private, which is necessary to enforce the safety requirements. -mod thread_name_string { - use crate::ffi::{CStr, CString}; - use crate::str; - - /// Like a `String` it's guaranteed UTF-8 and like a `CString` it's null terminated. - pub(crate) struct ThreadNameString { - inner: CString, - } - - impl From for ThreadNameString { - fn from(s: String) -> Self { - Self { - inner: CString::new(s).expect("thread name may not contain interior null bytes"), - } - } - } - - impl ThreadNameString { - pub fn as_cstr(&self) -> &CStr { - &self.inner - } - - pub fn as_str(&self) -> &str { - // SAFETY: `ThreadNameString` is guaranteed to be UTF-8. - unsafe { str::from_utf8_unchecked(self.inner.to_bytes()) } - } - } -} - -use thread_name_string::ThreadNameString; - -/// Store the ID of the main thread. -/// -/// The thread handle for the main thread is created lazily, and this might even -/// happen pre-main. Since not every platform has a way to identify the main -/// thread when that happens – macOS's `pthread_main_np` function being a notable -/// exception – we cannot assign it the right name right then. Instead, in our -/// runtime startup code, we remember the thread ID of the main thread (through -/// this modules `set` function) and use it to identify the main thread from then -/// on. This works reliably and has the additional advantage that we can report -/// the right thread name on main even after the thread handle has been destroyed. -/// Note however that this also means that the name reported in pre-main functions -/// will be incorrect, but that's just something we have to live with. -pub(crate) mod main_thread { - cfg_select! { - target_has_atomic = "64" => { - use super::ThreadId; - use crate::sync::atomic::{Atomic, AtomicU64}; - use crate::sync::atomic::Ordering::Relaxed; - - static MAIN: Atomic = AtomicU64::new(0); - - pub(super) fn get() -> Option { - ThreadId::from_u64(MAIN.load(Relaxed)) - } - - /// # Safety - /// May only be called once. - pub(crate) unsafe fn set(id: ThreadId) { - MAIN.store(id.as_u64().get(), Relaxed) - } - } - _ => { - use super::ThreadId; - use crate::mem::MaybeUninit; - use crate::sync::atomic::{Atomic, AtomicBool}; - use crate::sync::atomic::Ordering::{Acquire, Release}; - - static INIT: Atomic = AtomicBool::new(false); - static mut MAIN: MaybeUninit = MaybeUninit::uninit(); - - pub(super) fn get() -> Option { - if INIT.load(Acquire) { - Some(unsafe { MAIN.assume_init() }) - } else { - None - } - } - - /// # Safety - /// May only be called once. - pub(crate) unsafe fn set(id: ThreadId) { - unsafe { MAIN = MaybeUninit::new(id) }; - INIT.store(true, Release); - } - } - } -} - -/// Run a function with the current thread's name. -/// -/// Modulo thread local accesses, this function is safe to call from signal -/// handlers and in similar circumstances where allocations are not possible. -pub(crate) fn with_current_name(f: F) -> R -where - F: FnOnce(Option<&str>) -> R, -{ - try_with_current(|thread| { - if let Some(thread) = thread { - // If there is a current thread handle, try to use the name stored - // there. - if let Some(name) = &thread.inner.name { - return f(Some(name.as_str())); - } else if Some(thread.inner.id) == main_thread::get() { - // The main thread doesn't store its name in the handle, we must - // identify it through its ID. Since we already have the `Thread`, - // we can retrieve the ID from it instead of going through another - // thread local. - return f(Some("main")); - } - } else if let Some(main) = main_thread::get() - && let Some(id) = current::id::get() - && id == main - { - // The main thread doesn't always have a thread handle, we must - // identify it through its ID instead. The checks are ordered so - // that the current ID is only loaded if it is actually needed, - // since loading it from TLS might need multiple expensive accesses. - return f(Some("main")); - } - - f(None) - }) -} - -/// The internal representation of a `Thread` handle -/// -/// We explicitly set the alignment for our guarantee in Thread::into_raw. This -/// allows applications to stuff extra metadata bits into the alignment, which -/// can be rather useful when working with atomics. -#[repr(align(8))] -struct Inner { - name: Option, - id: ThreadId, - parker: Parker, -} - -impl Inner { - fn parker(self: Pin<&Self>) -> Pin<&Parker> { - unsafe { Pin::map_unchecked(self, |inner| &inner.parker) } - } -} - -#[derive(Clone)] -#[stable(feature = "rust1", since = "1.0.0")] -/// A handle to a thread. -/// -/// Threads are represented via the `Thread` type, which you can get in one of -/// two ways: -/// -/// * By spawning a new thread, e.g., using the [`thread::spawn`][`spawn`] -/// function, and calling [`thread`][`JoinHandle::thread`] on the -/// [`JoinHandle`]. -/// * By requesting the current thread, using the [`thread::current`] function. -/// -/// The [`thread::current`] function is available even for threads not spawned -/// by the APIs of this module. -/// -/// There is usually no need to create a `Thread` struct yourself, one -/// should instead use a function like `spawn` to create new threads, see the -/// docs of [`Builder`] and [`spawn`] for more details. -/// -/// [`thread::current`]: current::current -pub struct Thread { - // We use the System allocator such that creating or dropping this handle - // does not interfere with a potential Global allocator using thread-local - // storage. - inner: Pin>, -} - -impl Thread { - pub(crate) fn new(id: ThreadId, name: Option) -> Thread { - let name = name.map(ThreadNameString::from); - - // We have to use `unsafe` here to construct the `Parker` in-place, - // which is required for the UNIX implementation. - // - // SAFETY: We pin the Arc immediately after creation, so its address never - // changes. - let inner = unsafe { - let mut arc = Arc::::new_uninit_in(System); - let ptr = Arc::get_mut_unchecked(&mut arc).as_mut_ptr(); - (&raw mut (*ptr).name).write(name); - (&raw mut (*ptr).id).write(id); - Parker::new_in_place(&raw mut (*ptr).parker); - Pin::new_unchecked(arc.assume_init()) - }; - - Thread { inner } - } - - /// Like the public [`park`], but callable on any handle. This is used to - /// allow parking in TLS destructors. - /// - /// # Safety - /// May only be called from the thread to which this handle belongs. - pub(crate) unsafe fn park(&self) { - unsafe { self.inner.as_ref().parker().park() } - } - - /// Like the public [`park_timeout`], but callable on any handle. This is - /// used to allow parking in TLS destructors. - /// - /// # Safety - /// May only be called from the thread to which this handle belongs. - pub(crate) unsafe fn park_timeout(&self, dur: Duration) { - unsafe { self.inner.as_ref().parker().park_timeout(dur) } - } - - /// Atomically makes the handle's token available if it is not already. - /// - /// Every thread is equipped with some basic low-level blocking support, via - /// the [`park`][park] function and the `unpark()` method. These can be - /// used as a more CPU-efficient implementation of a spinlock. - /// - /// See the [park documentation][park] for more details. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use std::sync::atomic::{AtomicBool, Ordering}; - /// - /// static QUEUED: AtomicBool = AtomicBool::new(false); - /// - /// let parked_thread = thread::Builder::new() - /// .spawn(|| { - /// println!("Parking thread"); - /// QUEUED.store(true, Ordering::Release); - /// thread::park(); - /// println!("Thread unparked"); - /// }) - /// .unwrap(); - /// - /// // Let some time pass for the thread to be spawned. - /// thread::sleep(Duration::from_millis(10)); - /// - /// // Wait until the other thread is queued. - /// // This is crucial! It guarantees that the `unpark` below is not consumed - /// // by some other code in the parked thread (e.g. inside `println!`). - /// while !QUEUED.load(Ordering::Acquire) { - /// // Spinning is of course inefficient; in practice, this would more likely be - /// // a dequeue where we have no work to do if there's nobody queued. - /// std::hint::spin_loop(); - /// } - /// - /// println!("Unpark the thread"); - /// parked_thread.thread().unpark(); - /// - /// parked_thread.join().unwrap(); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn unpark(&self) { - self.inner.as_ref().parker().unpark(); - } - - /// Gets the thread's unique identifier. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// - /// let other_thread = thread::spawn(|| { - /// thread::current().id() - /// }); - /// - /// let other_thread_id = other_thread.join().unwrap(); - /// assert!(thread::current().id() != other_thread_id); - /// ``` - #[stable(feature = "thread_id", since = "1.19.0")] - #[must_use] - pub fn id(&self) -> ThreadId { - self.inner.id - } - - /// Gets the thread's name. - /// - /// For more information about named threads, see - /// [this module-level documentation][naming-threads]. - /// - /// # Examples - /// - /// Threads by default have no name specified: - /// - /// ``` - /// use std::thread; - /// - /// let builder = thread::Builder::new(); - /// - /// let handler = builder.spawn(|| { - /// assert!(thread::current().name().is_none()); - /// }).unwrap(); - /// - /// handler.join().unwrap(); - /// ``` - /// - /// Thread with a specified name: - /// - /// ``` - /// use std::thread; - /// - /// let builder = thread::Builder::new() - /// .name("foo".into()); - /// - /// let handler = builder.spawn(|| { - /// assert_eq!(thread::current().name(), Some("foo")) - /// }).unwrap(); - /// - /// handler.join().unwrap(); - /// ``` - /// - /// [naming-threads]: ./index.html#naming-threads - #[stable(feature = "rust1", since = "1.0.0")] - #[must_use] - pub fn name(&self) -> Option<&str> { - if let Some(name) = &self.inner.name { - Some(name.as_str()) - } else if main_thread::get() == Some(self.inner.id) { - Some("main") - } else { - None - } - } - - /// Consumes the `Thread`, returning a raw pointer. - /// - /// To avoid a memory leak the pointer must be converted - /// back into a `Thread` using [`Thread::from_raw`]. The pointer is - /// guaranteed to be aligned to at least 8 bytes. - /// - /// # Examples - /// - /// ``` - /// #![feature(thread_raw)] - /// - /// use std::thread::{self, Thread}; - /// - /// let thread = thread::current(); - /// let id = thread.id(); - /// let ptr = Thread::into_raw(thread); - /// unsafe { - /// assert_eq!(Thread::from_raw(ptr).id(), id); - /// } - /// ``` - #[unstable(feature = "thread_raw", issue = "97523")] - pub fn into_raw(self) -> *const () { - // Safety: We only expose an opaque pointer, which maintains the `Pin` invariant. - let inner = unsafe { Pin::into_inner_unchecked(self.inner) }; - Arc::into_raw_with_allocator(inner).0 as *const () - } - - /// Constructs a `Thread` from a raw pointer. - /// - /// The raw pointer must have been previously returned - /// by a call to [`Thread::into_raw`]. - /// - /// # Safety - /// - /// This function is unsafe because improper use may lead - /// to memory unsafety, even if the returned `Thread` is never - /// accessed. - /// - /// Creating a `Thread` from a pointer other than one returned - /// from [`Thread::into_raw`] is **undefined behavior**. - /// - /// Calling this function twice on the same raw pointer can lead - /// to a double-free if both `Thread` instances are dropped. - #[unstable(feature = "thread_raw", issue = "97523")] - pub unsafe fn from_raw(ptr: *const ()) -> Thread { - // Safety: Upheld by caller. - unsafe { - Thread { inner: Pin::new_unchecked(Arc::from_raw_in(ptr as *const Inner, System)) } - } - } - - pub(crate) fn cname(&self) -> Option<&CStr> { - if let Some(name) = &self.inner.name { - Some(name.as_cstr()) - } else if main_thread::get() == Some(self.inner.id) { - Some(c"main") - } else { - None - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Debug for Thread { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Thread") - .field("id", &self.id()) - .field("name", &self.name()) - .finish_non_exhaustive() - } -} - -//////////////////////////////////////////////////////////////////////////////// -// JoinHandle -//////////////////////////////////////////////////////////////////////////////// - /// A specialized [`Result`] type for threads. /// /// Indicates the manner in which a thread exited. @@ -1794,353 +261,8 @@ impl fmt::Debug for Thread { #[doc(search_unbox)] pub type Result = crate::result::Result>; -// This packet is used to communicate the return value between the spawned -// thread and the rest of the program. It is shared through an `Arc` and -// there's no need for a mutex here because synchronization happens with `join()` -// (the caller will never read this packet until the thread has exited). -// -// An Arc to the packet is stored into a `JoinInner` which in turns is placed -// in `JoinHandle`. -struct Packet<'scope, T> { - scope: Option>, - result: UnsafeCell>>, - _marker: PhantomData>, -} - -// Due to the usage of `UnsafeCell` we need to manually implement Sync. -// The type `T` should already always be Send (otherwise the thread could not -// have been created) and the Packet is Sync because all access to the -// `UnsafeCell` synchronized (by the `join()` boundary), and `ScopeData` is Sync. -unsafe impl<'scope, T: Send> Sync for Packet<'scope, T> {} - -impl<'scope, T> Drop for Packet<'scope, T> { - fn drop(&mut self) { - // If this packet was for a thread that ran in a scope, the thread - // panicked, and nobody consumed the panic payload, we make sure - // the scope function will panic. - let unhandled_panic = matches!(self.result.get_mut(), Some(Err(_))); - // Drop the result without causing unwinding. - // This is only relevant for threads that aren't join()ed, as - // join() will take the `result` and set it to None, such that - // there is nothing left to drop here. - // If this panics, we should handle that, because we're outside the - // outermost `catch_unwind` of our thread. - // We just abort in that case, since there's nothing else we can do. - // (And even if we tried to handle it somehow, we'd also need to handle - // the case where the panic payload we get out of it also panics on - // drop, and so on. See issue #86027.) - if let Err(_) = panic::catch_unwind(panic::AssertUnwindSafe(|| { - *self.result.get_mut() = None; - })) { - rtabort!("thread result panicked on drop"); - } - // Book-keeping so the scope knows when it's done. - if let Some(scope) = &self.scope { - // Now that there will be no more user code running on this thread - // that can use 'scope, mark the thread as 'finished'. - // It's important we only do this after the `result` has been dropped, - // since dropping it might still use things it borrowed from 'scope. - scope.decrement_num_running_threads(unhandled_panic); - } - } -} - -/// Inner representation for JoinHandle -struct JoinInner<'scope, T> { - native: imp::Thread, - thread: Thread, - packet: Arc>, -} - -impl<'scope, T> JoinInner<'scope, T> { - fn join(mut self) -> Result { - self.native.join(); - Arc::get_mut(&mut self.packet) - // FIXME(fuzzypixelz): returning an error instead of panicking here - // would require updating the documentation of - // `std::thread::Result`; currently we can return `Err` if and only - // if the thread had panicked. - .expect("threads should not terminate unexpectedly") - .result - .get_mut() - .take() - .unwrap() - } -} - -/// An owned permission to join on a thread (block on its termination). -/// -/// A `JoinHandle` *detaches* the associated thread when it is dropped, which -/// means that there is no longer any handle to the thread and no way to `join` -/// on it. -/// -/// Due to platform restrictions, it is not possible to [`Clone`] this -/// handle: the ability to join a thread is a uniquely-owned permission. -/// -/// This `struct` is created by the [`thread::spawn`] function and the -/// [`thread::Builder::spawn`] method. -/// -/// # Examples -/// -/// Creation from [`thread::spawn`]: -/// -/// ``` -/// use std::thread; -/// -/// let join_handle: thread::JoinHandle<_> = thread::spawn(|| { -/// // some work here -/// }); -/// ``` -/// -/// Creation from [`thread::Builder::spawn`]: -/// -/// ``` -/// use std::thread; -/// -/// let builder = thread::Builder::new(); -/// -/// let join_handle: thread::JoinHandle<_> = builder.spawn(|| { -/// // some work here -/// }).unwrap(); -/// ``` -/// -/// A thread being detached and outliving the thread that spawned it: -/// -/// ```no_run -/// use std::thread; -/// use std::time::Duration; -/// -/// let original_thread = thread::spawn(|| { -/// let _detached_thread = thread::spawn(|| { -/// // Here we sleep to make sure that the first thread returns before. -/// thread::sleep(Duration::from_millis(10)); -/// // This will be called, even though the JoinHandle is dropped. -/// println!("♫ Still alive ♫"); -/// }); -/// }); -/// -/// original_thread.join().expect("The thread being joined has panicked"); -/// println!("Original thread is joined."); -/// -/// // We make sure that the new thread has time to run, before the main -/// // thread returns. -/// -/// thread::sleep(Duration::from_millis(1000)); -/// ``` -/// -/// [`thread::Builder::spawn`]: Builder::spawn -/// [`thread::spawn`]: spawn -#[stable(feature = "rust1", since = "1.0.0")] -#[cfg_attr(target_os = "teeos", must_use)] -pub struct JoinHandle(JoinInner<'static, T>); - -#[stable(feature = "joinhandle_impl_send_sync", since = "1.29.0")] -unsafe impl Send for JoinHandle {} -#[stable(feature = "joinhandle_impl_send_sync", since = "1.29.0")] -unsafe impl Sync for JoinHandle {} - -impl JoinHandle { - /// Extracts a handle to the underlying thread. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// - /// let builder = thread::Builder::new(); - /// - /// let join_handle: thread::JoinHandle<_> = builder.spawn(|| { - /// // some work here - /// }).unwrap(); - /// - /// let thread = join_handle.thread(); - /// println!("thread id: {:?}", thread.id()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[must_use] - pub fn thread(&self) -> &Thread { - &self.0.thread - } - - /// Waits for the associated thread to finish. - /// - /// This function will return immediately if the associated thread has already finished. - /// - /// In terms of [atomic memory orderings], the completion of the associated - /// thread synchronizes with this function returning. In other words, all - /// operations performed by that thread [happen - /// before](https://doc.rust-lang.org/nomicon/atomics.html#data-accesses) all - /// operations that happen after `join` returns. - /// - /// If the associated thread panics, [`Err`] is returned with the parameter given - /// to [`panic!`] (though see the Notes below). - /// - /// [`Err`]: crate::result::Result::Err - /// [atomic memory orderings]: crate::sync::atomic - /// - /// # Panics - /// - /// This function may panic on some platforms if a thread attempts to join - /// itself or otherwise may create a deadlock with joining threads. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// - /// let builder = thread::Builder::new(); - /// - /// let join_handle: thread::JoinHandle<_> = builder.spawn(|| { - /// // some work here - /// }).unwrap(); - /// join_handle.join().expect("Couldn't join on the associated thread"); - /// ``` - /// - /// # Notes - /// - /// If a "foreign" unwinding operation (e.g. an exception thrown from C++ - /// code, or a `panic!` in Rust code compiled or linked with a different - /// runtime) unwinds all the way to the thread root, the process may be - /// aborted; see the Notes on [`thread::spawn`]. If the process is not - /// aborted, this function will return a `Result::Err` containing an opaque - /// type. - /// - /// [`catch_unwind`]: ../../std/panic/fn.catch_unwind.html - /// [`thread::spawn`]: spawn - #[stable(feature = "rust1", since = "1.0.0")] - pub fn join(self) -> Result { - self.0.join() - } - - /// Checks if the associated thread has finished running its main function. - /// - /// `is_finished` supports implementing a non-blocking join operation, by checking - /// `is_finished`, and calling `join` if it returns `true`. This function does not block. To - /// block while waiting on the thread to finish, use [`join`][Self::join]. - /// - /// This might return `true` for a brief moment after the thread's main - /// function has returned, but before the thread itself has stopped running. - /// However, once this returns `true`, [`join`][Self::join] can be expected - /// to return quickly, without blocking for any significant amount of time. - #[stable(feature = "thread_is_running", since = "1.61.0")] - pub fn is_finished(&self) -> bool { - Arc::strong_count(&self.0.packet) == 1 - } -} - -impl AsInner for JoinHandle { - fn as_inner(&self) -> &imp::Thread { - &self.0.native - } -} - -impl IntoInner for JoinHandle { - fn into_inner(self) -> imp::Thread { - self.0.native - } -} - -#[stable(feature = "std_debug", since = "1.16.0")] -impl fmt::Debug for JoinHandle { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("JoinHandle").finish_non_exhaustive() - } -} - fn _assert_sync_and_send() { fn _assert_both() {} _assert_both::>(); _assert_both::(); } - -/// Returns an estimate of the default amount of parallelism a program should use. -/// -/// Parallelism is a resource. A given machine provides a certain capacity for -/// parallelism, i.e., a bound on the number of computations it can perform -/// simultaneously. This number often corresponds to the amount of CPUs a -/// computer has, but it may diverge in various cases. -/// -/// Host environments such as VMs or container orchestrators may want to -/// restrict the amount of parallelism made available to programs in them. This -/// is often done to limit the potential impact of (unintentionally) -/// resource-intensive programs on other programs running on the same machine. -/// -/// # Limitations -/// -/// The purpose of this API is to provide an easy and portable way to query -/// the default amount of parallelism the program should use. Among other things it -/// does not expose information on NUMA regions, does not account for -/// differences in (co)processor capabilities or current system load, -/// and will not modify the program's global state in order to more accurately -/// query the amount of available parallelism. -/// -/// Where both fixed steady-state and burst limits are available the steady-state -/// capacity will be used to ensure more predictable latencies. -/// -/// Resource limits can be changed during the runtime of a program, therefore the value is -/// not cached and instead recomputed every time this function is called. It should not be -/// called from hot code. -/// -/// The value returned by this function should be considered a simplified -/// approximation of the actual amount of parallelism available at any given -/// time. To get a more detailed or precise overview of the amount of -/// parallelism available to the program, you may wish to use -/// platform-specific APIs as well. The following platform limitations currently -/// apply to `available_parallelism`: -/// -/// On Windows: -/// - It may undercount the amount of parallelism available on systems with more -/// than 64 logical CPUs. However, programs typically need specific support to -/// take advantage of more than 64 logical CPUs, and in the absence of such -/// support, the number returned by this function accurately reflects the -/// number of logical CPUs the program can use by default. -/// - It may overcount the amount of parallelism available on systems limited by -/// process-wide affinity masks, or job object limitations. -/// -/// On Linux: -/// - It may overcount the amount of parallelism available when limited by a -/// process-wide affinity mask or cgroup quotas and `sched_getaffinity()` or cgroup fs can't be -/// queried, e.g. due to sandboxing. -/// - It may undercount the amount of parallelism if the current thread's affinity mask -/// does not reflect the process' cpuset, e.g. due to pinned threads. -/// - If the process is in a cgroup v1 cpu controller, this may need to -/// scan mountpoints to find the corresponding cgroup v1 controller, -/// which may take time on systems with large numbers of mountpoints. -/// (This does not apply to cgroup v2, or to processes not in a -/// cgroup.) -/// - It does not attempt to take `ulimit` into account. If there is a limit set on the number of -/// threads, `available_parallelism` cannot know how much of that limit a Rust program should -/// take, or know in a reliable and race-free way how much of that limit is already taken. -/// -/// On all targets: -/// - It may overcount the amount of parallelism available when running in a VM -/// with CPU usage limits (e.g. an overcommitted host). -/// -/// # Errors -/// -/// This function will, but is not limited to, return errors in the following -/// cases: -/// -/// - If the amount of parallelism is not known for the target platform. -/// - If the program lacks permission to query the amount of parallelism made -/// available to it. -/// -/// # Examples -/// -/// ``` -/// # #![allow(dead_code)] -/// use std::{io, thread}; -/// -/// fn main() -> io::Result<()> { -/// let count = thread::available_parallelism()?.get(); -/// assert!(count >= 1_usize); -/// Ok(()) -/// } -/// ``` -#[doc(alias = "available_concurrency")] // Alias for a previous name we gave this API on unstable. -#[doc(alias = "hardware_concurrency")] // Alias for C++ `std::thread::hardware_concurrency`. -#[doc(alias = "num_cpus")] // Alias for a popular ecosystem crate which provides similar functionality. -#[stable(feature = "available_parallelism", since = "1.59.0")] -pub fn available_parallelism() -> io::Result> { - imp::available_parallelism() -} diff --git a/library/std/src/thread/scoped.rs b/library/std/src/thread/scoped.rs index 75a5303fc321a..301f5e949cac3 100644 --- a/library/std/src/thread/scoped.rs +++ b/library/std/src/thread/scoped.rs @@ -1,4 +1,8 @@ -use super::{Builder, JoinInner, Result, Thread, current_or_unnamed}; +use super::Result; +use super::builder::Builder; +use super::current::current_or_unnamed; +use super::lifecycle::{JoinInner, spawn_unchecked}; +use super::thread::Thread; use crate::marker::PhantomData; use crate::panic::{AssertUnwindSafe, catch_unwind, resume_unwind}; use crate::sync::Arc; @@ -257,7 +261,10 @@ impl Builder { F: FnOnce() -> T + Send + 'scope, T: Send + 'scope, { - Ok(ScopedJoinHandle(unsafe { self.spawn_unchecked_(f, Some(scope.data.clone())) }?)) + let Builder { name, stack_size, no_hooks } = self; + Ok(ScopedJoinHandle(unsafe { + spawn_unchecked(name, stack_size, no_hooks, Some(scope.data.clone()), f) + }?)) } } @@ -279,7 +286,7 @@ impl<'scope, T> ScopedJoinHandle<'scope, T> { #[must_use] #[stable(feature = "scoped_threads", since = "1.63.0")] pub fn thread(&self) -> &Thread { - &self.0.thread + self.0.thread() } /// Waits for the associated thread to finish. @@ -325,7 +332,7 @@ impl<'scope, T> ScopedJoinHandle<'scope, T> { /// to return quickly, without blocking for any significant amount of time. #[stable(feature = "scoped_threads", since = "1.63.0")] pub fn is_finished(&self) -> bool { - Arc::strong_count(&self.0.packet) == 1 + self.0.is_finished() } } diff --git a/library/std/src/thread/spawnhook.rs b/library/std/src/thread/spawnhook.rs index c8a7bcf55c14e..254793ac33d08 100644 --- a/library/std/src/thread/spawnhook.rs +++ b/library/std/src/thread/spawnhook.rs @@ -1,7 +1,7 @@ +use super::thread::Thread; use crate::cell::Cell; use crate::iter; use crate::sync::Arc; -use crate::thread::Thread; crate::thread_local! { /// A thread local linked list of spawn hooks. diff --git a/library/std/src/thread/tests.rs b/library/std/src/thread/tests.rs index 2117f5f93ce26..4b934c039a36f 100644 --- a/library/std/src/thread/tests.rs +++ b/library/std/src/thread/tests.rs @@ -1,11 +1,10 @@ -use super::Builder; use crate::any::Any; use crate::panic::panic_any; use crate::result; use crate::sync::atomic::{AtomicBool, Ordering}; use crate::sync::mpsc::{Sender, channel}; use crate::sync::{Arc, Barrier}; -use crate::thread::{self, Scope, ThreadId}; +use crate::thread::{self, Builder, Scope, ThreadId}; use crate::time::{Duration, Instant}; // !!! These tests are dangerous. If something is buggy, they will hang, !!! diff --git a/library/std/src/thread/thread.rs b/library/std/src/thread/thread.rs new file mode 100644 index 0000000000000..7c9c91c3b0c78 --- /dev/null +++ b/library/std/src/thread/thread.rs @@ -0,0 +1,326 @@ +use super::id::ThreadId; +use super::main_thread; +use crate::alloc::System; +use crate::ffi::CStr; +use crate::fmt; +use crate::pin::Pin; +use crate::sync::Arc; +use crate::sys::sync::Parker; +use crate::time::Duration; + +// This module ensures private fields are kept private, which is necessary to enforce the safety requirements. +mod thread_name_string { + use crate::ffi::{CStr, CString}; + use crate::str; + + /// Like a `String` it's guaranteed UTF-8 and like a `CString` it's null terminated. + pub(crate) struct ThreadNameString { + inner: CString, + } + + impl From for ThreadNameString { + fn from(s: String) -> Self { + Self { + inner: CString::new(s).expect("thread name may not contain interior null bytes"), + } + } + } + + impl ThreadNameString { + pub fn as_cstr(&self) -> &CStr { + &self.inner + } + + pub fn as_str(&self) -> &str { + // SAFETY: `ThreadNameString` is guaranteed to be UTF-8. + unsafe { str::from_utf8_unchecked(self.inner.to_bytes()) } + } + } +} + +use thread_name_string::ThreadNameString; + +/// The internal representation of a `Thread` handle +/// +/// We explicitly set the alignment for our guarantee in Thread::into_raw. This +/// allows applications to stuff extra metadata bits into the alignment, which +/// can be rather useful when working with atomics. +#[repr(align(8))] +struct Inner { + name: Option, + id: ThreadId, + parker: Parker, +} + +impl Inner { + fn parker(self: Pin<&Self>) -> Pin<&Parker> { + unsafe { Pin::map_unchecked(self, |inner| &inner.parker) } + } +} + +#[derive(Clone)] +#[stable(feature = "rust1", since = "1.0.0")] +/// A handle to a thread. +/// +/// Threads are represented via the `Thread` type, which you can get in one of +/// two ways: +/// +/// * By spawning a new thread, e.g., using the [`thread::spawn`] +/// function, and calling [`thread`] on the [`JoinHandle`]. +/// * By requesting the current thread, using the [`thread::current`] function. +/// +/// The [`thread::current`] function is available even for threads not spawned +/// by the APIs of this module. +/// +/// There is usually no need to create a `Thread` struct yourself, one +/// should instead use a function like `spawn` to create new threads, see the +/// docs of [`Builder`] and [`spawn`] for more details. +/// +/// [`thread::spawn`]: super::spawn +/// [`thread`]: super::JoinHandle::thread +/// [`JoinHandle`]: super::JoinHandle +/// [`thread::current`]: super::current::current +/// [`Builder`]: super::Builder +/// [`spawn`]: super::spawn +pub struct Thread { + // We use the System allocator such that creating or dropping this handle + // does not interfere with a potential Global allocator using thread-local + // storage. + inner: Pin>, +} + +impl Thread { + pub(crate) fn new(id: ThreadId, name: Option) -> Thread { + let name = name.map(ThreadNameString::from); + + // We have to use `unsafe` here to construct the `Parker` in-place, + // which is required for the UNIX implementation. + // + // SAFETY: We pin the Arc immediately after creation, so its address never + // changes. + let inner = unsafe { + let mut arc = Arc::::new_uninit_in(System); + let ptr = Arc::get_mut_unchecked(&mut arc).as_mut_ptr(); + (&raw mut (*ptr).name).write(name); + (&raw mut (*ptr).id).write(id); + Parker::new_in_place(&raw mut (*ptr).parker); + Pin::new_unchecked(arc.assume_init()) + }; + + Thread { inner } + } + + /// Like the public [`park`], but callable on any handle. This is used to + /// allow parking in TLS destructors. + /// + /// # Safety + /// May only be called from the thread to which this handle belongs. + /// + /// [`park`]: super::park + pub(crate) unsafe fn park(&self) { + unsafe { self.inner.as_ref().parker().park() } + } + + /// Like the public [`park_timeout`], but callable on any handle. This is + /// used to allow parking in TLS destructors. + /// + /// # Safety + /// May only be called from the thread to which this handle belongs. + /// + /// [`park_timeout`]: super::park_timeout + pub(crate) unsafe fn park_timeout(&self, dur: Duration) { + unsafe { self.inner.as_ref().parker().park_timeout(dur) } + } + + /// Atomically makes the handle's token available if it is not already. + /// + /// Every thread is equipped with some basic low-level blocking support, via + /// the [`park`] function and the `unpark()` method. These can be used as a + /// more CPU-efficient implementation of a spinlock. + /// + /// See the [park documentation] for more details. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// use std::time::Duration; + /// use std::sync::atomic::{AtomicBool, Ordering}; + /// + /// static QUEUED: AtomicBool = AtomicBool::new(false); + /// + /// let parked_thread = thread::Builder::new() + /// .spawn(|| { + /// println!("Parking thread"); + /// QUEUED.store(true, Ordering::Release); + /// thread::park(); + /// println!("Thread unparked"); + /// }) + /// .unwrap(); + /// + /// // Let some time pass for the thread to be spawned. + /// thread::sleep(Duration::from_millis(10)); + /// + /// // Wait until the other thread is queued. + /// // This is crucial! It guarantees that the `unpark` below is not consumed + /// // by some other code in the parked thread (e.g. inside `println!`). + /// while !QUEUED.load(Ordering::Acquire) { + /// // Spinning is of course inefficient; in practice, this would more likely be + /// // a dequeue where we have no work to do if there's nobody queued. + /// std::hint::spin_loop(); + /// } + /// + /// println!("Unpark the thread"); + /// parked_thread.thread().unpark(); + /// + /// parked_thread.join().unwrap(); + /// ``` + /// + /// [`park`]: super::park + /// [park documentation]: super::park + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn unpark(&self) { + self.inner.as_ref().parker().unpark(); + } + + /// Gets the thread's unique identifier. + /// + /// # Examples + /// + /// ``` + /// use std::thread; + /// + /// let other_thread = thread::spawn(|| { + /// thread::current().id() + /// }); + /// + /// let other_thread_id = other_thread.join().unwrap(); + /// assert!(thread::current().id() != other_thread_id); + /// ``` + #[stable(feature = "thread_id", since = "1.19.0")] + #[must_use] + pub fn id(&self) -> ThreadId { + self.inner.id + } + + /// Gets the thread's name. + /// + /// For more information about named threads, see + /// [this module-level documentation][naming-threads]. + /// + /// # Examples + /// + /// Threads by default have no name specified: + /// + /// ``` + /// use std::thread; + /// + /// let builder = thread::Builder::new(); + /// + /// let handler = builder.spawn(|| { + /// assert!(thread::current().name().is_none()); + /// }).unwrap(); + /// + /// handler.join().unwrap(); + /// ``` + /// + /// Thread with a specified name: + /// + /// ``` + /// use std::thread; + /// + /// let builder = thread::Builder::new() + /// .name("foo".into()); + /// + /// let handler = builder.spawn(|| { + /// assert_eq!(thread::current().name(), Some("foo")) + /// }).unwrap(); + /// + /// handler.join().unwrap(); + /// ``` + /// + /// [naming-threads]: ./index.html#naming-threads + #[stable(feature = "rust1", since = "1.0.0")] + #[must_use] + pub fn name(&self) -> Option<&str> { + if let Some(name) = &self.inner.name { + Some(name.as_str()) + } else if main_thread::get() == Some(self.inner.id) { + Some("main") + } else { + None + } + } + + /// Consumes the `Thread`, returning a raw pointer. + /// + /// To avoid a memory leak the pointer must be converted + /// back into a `Thread` using [`Thread::from_raw`]. The pointer is + /// guaranteed to be aligned to at least 8 bytes. + /// + /// # Examples + /// + /// ``` + /// #![feature(thread_raw)] + /// + /// use std::thread::{self, Thread}; + /// + /// let thread = thread::current(); + /// let id = thread.id(); + /// let ptr = Thread::into_raw(thread); + /// unsafe { + /// assert_eq!(Thread::from_raw(ptr).id(), id); + /// } + /// ``` + #[unstable(feature = "thread_raw", issue = "97523")] + pub fn into_raw(self) -> *const () { + // Safety: We only expose an opaque pointer, which maintains the `Pin` invariant. + let inner = unsafe { Pin::into_inner_unchecked(self.inner) }; + Arc::into_raw_with_allocator(inner).0 as *const () + } + + /// Constructs a `Thread` from a raw pointer. + /// + /// The raw pointer must have been previously returned + /// by a call to [`Thread::into_raw`]. + /// + /// # Safety + /// + /// This function is unsafe because improper use may lead + /// to memory unsafety, even if the returned `Thread` is never + /// accessed. + /// + /// Creating a `Thread` from a pointer other than one returned + /// from [`Thread::into_raw`] is **undefined behavior**. + /// + /// Calling this function twice on the same raw pointer can lead + /// to a double-free if both `Thread` instances are dropped. + #[unstable(feature = "thread_raw", issue = "97523")] + pub unsafe fn from_raw(ptr: *const ()) -> Thread { + // Safety: Upheld by caller. + unsafe { + Thread { inner: Pin::new_unchecked(Arc::from_raw_in(ptr as *const Inner, System)) } + } + } + + pub(crate) fn cname(&self) -> Option<&CStr> { + if let Some(name) = &self.inner.name { + Some(name.as_cstr()) + } else if main_thread::get() == Some(self.inner.id) { + Some(c"main") + } else { + None + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Debug for Thread { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Thread") + .field("id", &self.id()) + .field("name", &self.name()) + .finish_non_exhaustive() + } +} diff --git a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_deadlock.stderr b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_deadlock.stderr index 582c6f11e3602..c80cbf835a386 100644 --- a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_deadlock.stderr +++ b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_mutex_deadlock.stderr @@ -12,8 +12,8 @@ LL | let ret = unsafe { libc::pthread_join(id, ptr::null_mut()) }; | = note: BACKTRACE: = note: inside `std::sys::thread::PLATFORM::Thread::join` at RUSTLIB/std/src/sys/thread/PLATFORM.rs:LL:CC - = note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC - = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC + = note: inside `std::thread::lifecycle::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/lifecycle.rs:LL:CC + = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/join_handle.rs:LL:CC note: inside `main` --> tests/fail-dep/concurrency/libc_pthread_mutex_deadlock.rs:LL:CC | diff --git a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_rwlock_write_read_deadlock.stderr b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_rwlock_write_read_deadlock.stderr index d8d0ff37e4100..f174d387f76e3 100644 --- a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_rwlock_write_read_deadlock.stderr +++ b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_rwlock_write_read_deadlock.stderr @@ -12,8 +12,8 @@ LL | let ret = unsafe { libc::pthread_join(id, ptr::null_mut()) }; | = note: BACKTRACE: = note: inside `std::sys::thread::PLATFORM::Thread::join` at RUSTLIB/std/src/sys/thread/PLATFORM.rs:LL:CC - = note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC - = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC + = note: inside `std::thread::lifecycle::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/lifecycle.rs:LL:CC + = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/join_handle.rs:LL:CC note: inside `main` --> tests/fail-dep/concurrency/libc_pthread_rwlock_write_read_deadlock.rs:LL:CC | diff --git a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_rwlock_write_write_deadlock.stderr b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_rwlock_write_write_deadlock.stderr index c9a4004ebfb31..06b8e1246e016 100644 --- a/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_rwlock_write_write_deadlock.stderr +++ b/src/tools/miri/tests/fail-dep/concurrency/libc_pthread_rwlock_write_write_deadlock.stderr @@ -12,8 +12,8 @@ LL | let ret = unsafe { libc::pthread_join(id, ptr::null_mut()) }; | = note: BACKTRACE: = note: inside `std::sys::thread::PLATFORM::Thread::join` at RUSTLIB/std/src/sys/thread/PLATFORM.rs:LL:CC - = note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC - = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC + = note: inside `std::thread::lifecycle::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/lifecycle.rs:LL:CC + = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/join_handle.rs:LL:CC note: inside `main` --> tests/fail-dep/concurrency/libc_pthread_rwlock_write_write_deadlock.rs:LL:CC | diff --git a/src/tools/miri/tests/fail-dep/concurrency/windows_join_detached.stderr b/src/tools/miri/tests/fail-dep/concurrency/windows_join_detached.stderr index 47a0ebdcfef82..52affb767db51 100644 --- a/src/tools/miri/tests/fail-dep/concurrency/windows_join_detached.stderr +++ b/src/tools/miri/tests/fail-dep/concurrency/windows_join_detached.stderr @@ -8,8 +8,8 @@ LL | let rc = unsafe { c::WaitForSingleObject(self.handle.as_raw_handle( = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information = note: BACKTRACE: = note: inside `std::sys::thread::PLATFORM::Thread::join` at RUSTLIB/std/src/sys/thread/PLATFORM.rs:LL:CC - = note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC - = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC + = note: inside `std::thread::lifecycle::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/lifecycle.rs:LL:CC + = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/join_handle.rs:LL:CC note: inside `main` --> tests/fail-dep/concurrency/windows_join_detached.rs:LL:CC | diff --git a/src/tools/miri/tests/fail-dep/concurrency/windows_join_main.stderr b/src/tools/miri/tests/fail-dep/concurrency/windows_join_main.stderr index 93f800ecca331..0ab89676db370 100644 --- a/src/tools/miri/tests/fail-dep/concurrency/windows_join_main.stderr +++ b/src/tools/miri/tests/fail-dep/concurrency/windows_join_main.stderr @@ -14,8 +14,8 @@ LL | let rc = unsafe { c::WaitForSingleObject(self.handle.as_raw_handle( | = note: BACKTRACE: = note: inside `std::sys::thread::PLATFORM::Thread::join` at RUSTLIB/std/src/sys/thread/PLATFORM.rs:LL:CC - = note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC - = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC + = note: inside `std::thread::lifecycle::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/lifecycle.rs:LL:CC + = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/join_handle.rs:LL:CC note: inside `main` --> tests/fail-dep/concurrency/windows_join_main.rs:LL:CC | diff --git a/src/tools/miri/tests/fail-dep/concurrency/windows_join_self.stderr b/src/tools/miri/tests/fail-dep/concurrency/windows_join_self.stderr index c76da9151b35e..bdfab966d6d98 100644 --- a/src/tools/miri/tests/fail-dep/concurrency/windows_join_self.stderr +++ b/src/tools/miri/tests/fail-dep/concurrency/windows_join_self.stderr @@ -12,8 +12,8 @@ LL | let rc = unsafe { c::WaitForSingleObject(self.handle.as_raw_handle( | = note: BACKTRACE: = note: inside `std::sys::thread::PLATFORM::Thread::join` at RUSTLIB/std/src/sys/thread/PLATFORM.rs:LL:CC - = note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC - = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC + = note: inside `std::thread::lifecycle::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/lifecycle.rs:LL:CC + = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/join_handle.rs:LL:CC note: inside `main` --> tests/fail-dep/concurrency/windows_join_self.rs:LL:CC | diff --git a/src/tools/miri/tests/fail-dep/libc/eventfd_block_read_twice.stderr b/src/tools/miri/tests/fail-dep/libc/eventfd_block_read_twice.stderr index a7dfa0b6ea650..6253fe6d2c768 100644 --- a/src/tools/miri/tests/fail-dep/libc/eventfd_block_read_twice.stderr +++ b/src/tools/miri/tests/fail-dep/libc/eventfd_block_read_twice.stderr @@ -6,8 +6,8 @@ LL | let ret = unsafe { libc::pthread_join(id, ptr::null_mut()) }; | = note: BACKTRACE: = note: inside `std::sys::thread::PLATFORM::Thread::join` at RUSTLIB/std/src/sys/thread/PLATFORM.rs:LL:CC - = note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC - = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC + = note: inside `std::thread::lifecycle::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/lifecycle.rs:LL:CC + = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/join_handle.rs:LL:CC note: inside `main` --> tests/fail-dep/libc/eventfd_block_read_twice.rs:LL:CC | diff --git a/src/tools/miri/tests/fail-dep/libc/eventfd_block_write_twice.stderr b/src/tools/miri/tests/fail-dep/libc/eventfd_block_write_twice.stderr index 0e554598ecf56..aecc54c2fd882 100644 --- a/src/tools/miri/tests/fail-dep/libc/eventfd_block_write_twice.stderr +++ b/src/tools/miri/tests/fail-dep/libc/eventfd_block_write_twice.stderr @@ -6,8 +6,8 @@ LL | let ret = unsafe { libc::pthread_join(id, ptr::null_mut()) }; | = note: BACKTRACE: = note: inside `std::sys::thread::PLATFORM::Thread::join` at RUSTLIB/std/src/sys/thread/PLATFORM.rs:LL:CC - = note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC - = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC + = note: inside `std::thread::lifecycle::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/lifecycle.rs:LL:CC + = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/join_handle.rs:LL:CC note: inside `main` --> tests/fail-dep/libc/eventfd_block_write_twice.rs:LL:CC | diff --git a/src/tools/miri/tests/fail-dep/libc/libc_epoll_block_two_thread.stderr b/src/tools/miri/tests/fail-dep/libc/libc_epoll_block_two_thread.stderr index 6740faedb3e9d..14390d632738d 100644 --- a/src/tools/miri/tests/fail-dep/libc/libc_epoll_block_two_thread.stderr +++ b/src/tools/miri/tests/fail-dep/libc/libc_epoll_block_two_thread.stderr @@ -6,8 +6,8 @@ LL | let ret = unsafe { libc::pthread_join(id, ptr::null_mut()) }; | = note: BACKTRACE: = note: inside `std::sys::thread::PLATFORM::Thread::join` at RUSTLIB/std/src/sys/thread/PLATFORM.rs:LL:CC - = note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC - = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC + = note: inside `std::thread::lifecycle::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/lifecycle.rs:LL:CC + = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/join_handle.rs:LL:CC note: inside `main` --> tests/fail-dep/libc/libc_epoll_block_two_thread.rs:LL:CC | diff --git a/src/tools/miri/tests/fail-dep/libc/socketpair-close-while-blocked.stderr b/src/tools/miri/tests/fail-dep/libc/socketpair-close-while-blocked.stderr index 1e802209fd795..97b5df7fb17ff 100644 --- a/src/tools/miri/tests/fail-dep/libc/socketpair-close-while-blocked.stderr +++ b/src/tools/miri/tests/fail-dep/libc/socketpair-close-while-blocked.stderr @@ -6,8 +6,8 @@ LL | let ret = unsafe { libc::pthread_join(id, ptr::null_mut()) }; | = note: BACKTRACE: = note: inside `std::sys::thread::PLATFORM::Thread::join` at RUSTLIB/std/src/sys/thread/PLATFORM.rs:LL:CC - = note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC - = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC + = note: inside `std::thread::lifecycle::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/lifecycle.rs:LL:CC + = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/join_handle.rs:LL:CC note: inside `main` --> tests/fail-dep/libc/socketpair-close-while-blocked.rs:LL:CC | diff --git a/src/tools/miri/tests/fail-dep/libc/socketpair_block_read_twice.stderr b/src/tools/miri/tests/fail-dep/libc/socketpair_block_read_twice.stderr index 3f7bbc779609f..38db735e9b4d4 100644 --- a/src/tools/miri/tests/fail-dep/libc/socketpair_block_read_twice.stderr +++ b/src/tools/miri/tests/fail-dep/libc/socketpair_block_read_twice.stderr @@ -6,8 +6,8 @@ LL | let ret = unsafe { libc::pthread_join(id, ptr::null_mut()) }; | = note: BACKTRACE: = note: inside `std::sys::thread::PLATFORM::Thread::join` at RUSTLIB/std/src/sys/thread/PLATFORM.rs:LL:CC - = note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC - = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC + = note: inside `std::thread::lifecycle::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/lifecycle.rs:LL:CC + = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/join_handle.rs:LL:CC note: inside `main` --> tests/fail-dep/libc/socketpair_block_read_twice.rs:LL:CC | diff --git a/src/tools/miri/tests/fail-dep/libc/socketpair_block_write_twice.stderr b/src/tools/miri/tests/fail-dep/libc/socketpair_block_write_twice.stderr index b8dbb513da209..a2f1c67b5efc8 100644 --- a/src/tools/miri/tests/fail-dep/libc/socketpair_block_write_twice.stderr +++ b/src/tools/miri/tests/fail-dep/libc/socketpair_block_write_twice.stderr @@ -6,8 +6,8 @@ LL | let ret = unsafe { libc::pthread_join(id, ptr::null_mut()) }; | = note: BACKTRACE: = note: inside `std::sys::thread::PLATFORM::Thread::join` at RUSTLIB/std/src/sys/thread/PLATFORM.rs:LL:CC - = note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC - = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC + = note: inside `std::thread::lifecycle::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/lifecycle.rs:LL:CC + = note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/join_handle.rs:LL:CC note: inside `main` --> tests/fail-dep/libc/socketpair_block_write_twice.rs:LL:CC | diff --git a/tests/debuginfo/thread.rs b/tests/debuginfo/thread.rs index 4274e449f1c9d..d78dad406ac6e 100644 --- a/tests/debuginfo/thread.rs +++ b/tests/debuginfo/thread.rs @@ -9,12 +9,12 @@ //@ cdb-command:g // //@ cdb-command:dx join_handle,d -//@ cdb-check:join_handle,d [Type: std::thread::JoinHandle >] -//@ cdb-check: [...] __0 [Type: std::thread::JoinInner >] +//@ cdb-check:join_handle,d [Type: std::thread::join_handle::JoinHandle >] +//@ cdb-check: [...] __0 [Type: std::thread::lifecycle::JoinInner >] // //@ cdb-command:dx t,d -//@ cdb-check:t,d : [...] [Type: std::thread::Thread *] -//@ cdb-check:[...] inner [...][Type: core::pin::Pin >] +//@ cdb-check:t,d : [...] [Type: std::thread::thread::Thread *] +//@ cdb-check:[...] inner [...][Type: core::pin::Pin >] use std::thread; diff --git a/tests/ui/binop/binary-op-not-allowed-issue-125631.stderr b/tests/ui/binop/binary-op-not-allowed-issue-125631.stderr index d95876659c7ac..a997fbee1f2a0 100644 --- a/tests/ui/binop/binary-op-not-allowed-issue-125631.stderr +++ b/tests/ui/binop/binary-op-not-allowed-issue-125631.stderr @@ -30,7 +30,7 @@ LL | == (Error::new(ErrorKind::Other, "2"), thread::current()); | ^^ ------------------------------------------------------ (std::io::Error, Thread) | note: `Thread` does not implement `PartialEq` - --> $SRC_DIR/std/src/thread/mod.rs:LL:COL + --> $SRC_DIR/std/src/thread/thread.rs:LL:COL | = note: `Thread` is defined in another crate note: `std::io::Error` does not implement `PartialEq` @@ -54,7 +54,7 @@ LL | struct T1; LL | struct T2; | ^^^^^^^^^ must implement `PartialEq` note: `Thread` does not implement `PartialEq` - --> $SRC_DIR/std/src/thread/mod.rs:LL:COL + --> $SRC_DIR/std/src/thread/thread.rs:LL:COL | = note: `Thread` is defined in another crate note: `std::io::Error` does not implement `PartialEq` diff --git a/tests/ui/closures/closure-move-sync.stderr b/tests/ui/closures/closure-move-sync.stderr index 2bb26b0c0b7d4..7241f78c3539e 100644 --- a/tests/ui/closures/closure-move-sync.stderr +++ b/tests/ui/closures/closure-move-sync.stderr @@ -18,7 +18,7 @@ note: required because it's used within this closure LL | let t = thread::spawn(|| { | ^^ note: required by a bound in `spawn` - --> $SRC_DIR/std/src/thread/mod.rs:LL:COL + --> $SRC_DIR/std/src/thread/functions.rs:LL:COL error: aborting due to 1 previous error diff --git a/tests/ui/consts/const-mut-refs/mut_ref_in_final.rs b/tests/ui/consts/const-mut-refs/mut_ref_in_final.rs index 9f9384adeb710..c620c2cf9fb8c 100644 --- a/tests/ui/consts/const-mut-refs/mut_ref_in_final.rs +++ b/tests/ui/consts/const-mut-refs/mut_ref_in_final.rs @@ -77,6 +77,36 @@ const RAW_MUT_CAST_C: SyncPtr = SyncPtr { x : &mut 42 as *mut _ as *const _ const RAW_MUT_COERCE_C: SyncPtr = SyncPtr { x: &mut 0 }; //~^ ERROR mutable borrows of temporaries +// Various cases of dangling references. +fn dangling() { + const fn helper_int2ptr() -> Option<&'static mut i32> { unsafe { + // Undefined behaviour (integer as pointer), who doesn't love tests like this. + Some(&mut *(42 as *mut i32)) + } } + const INT2PTR: Option<&mut i32> = helper_int2ptr(); //~ ERROR encountered a dangling reference + static INT2PTR_STATIC: Option<&mut i32> = helper_int2ptr(); //~ ERROR encountered a dangling reference + + const fn helper_dangling() -> Option<&'static mut i32> { unsafe { + // Undefined behaviour (dangling pointer), who doesn't love tests like this. + Some(&mut *(&mut 42 as *mut i32)) + } } + const DANGLING: Option<&mut i32> = helper_dangling(); //~ ERROR dangling reference + static DANGLING_STATIC: Option<&mut i32> = helper_dangling(); //~ ERROR dangling reference + +} + +// Allowed, because there is an explicit static mut. +static mut BUFFER: i32 = 42; +const fn ptr_to_buffer() -> Option<&'static mut i32> { unsafe { + Some(&mut *std::ptr::addr_of_mut!(BUFFER)) +} } +const MUT_TO_BUFFER: Option<&mut i32> = ptr_to_buffer(); + +// These are fine! Just statics pointing to mutable statics, nothing fundamentally wrong with this. +static MUT_STATIC: Option<&mut i32> = ptr_to_buffer(); +static mut MUT_ARRAY: &mut [u8] = &mut [42]; +static MUTEX: std::sync::Mutex<&mut [u8]> = std::sync::Mutex::new(unsafe { &mut *MUT_ARRAY }); + fn main() { println!("{}", unsafe { *A }); unsafe { *B = 4 } // Bad news diff --git a/tests/ui/consts/const-mut-refs/mut_ref_in_final.stderr b/tests/ui/consts/const-mut-refs/mut_ref_in_final.stderr index 08656776468f1..da6f2a28d5a8c 100644 --- a/tests/ui/consts/const-mut-refs/mut_ref_in_final.stderr +++ b/tests/ui/consts/const-mut-refs/mut_ref_in_final.stderr @@ -120,7 +120,51 @@ LL | const RAW_MUT_COERCE_C: SyncPtr = SyncPtr { x: &mut 0 }; = note: To avoid accidentally creating global mutable state, such temporaries must be immutable = help: If you really want global mutable state, try replacing the temporary by an interior mutable `static` or a `static mut` -error: aborting due to 12 previous errors +error[E0080]: constructing invalid value at ..0: encountered a dangling reference (0x2a[noalloc] has no provenance) + --> $DIR/mut_ref_in_final.rs:86:5 + | +LL | const INT2PTR: Option<&mut i32> = helper_int2ptr(); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ it is undefined behavior to use this value + | + = note: the rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior. + = note: the raw bytes of the constant (size: $SIZE, align: $ALIGN) { + HEX_DUMP + } + +error[E0080]: constructing invalid value at ..0: encountered a dangling reference (0x2a[noalloc] has no provenance) + --> $DIR/mut_ref_in_final.rs:87:5 + | +LL | static INT2PTR_STATIC: Option<&mut i32> = helper_int2ptr(); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ it is undefined behavior to use this value + | + = note: the rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior. + = note: the raw bytes of the constant (size: $SIZE, align: $ALIGN) { + HEX_DUMP + } + +error[E0080]: constructing invalid value at ..0: encountered a dangling reference (use-after-free) + --> $DIR/mut_ref_in_final.rs:93:5 + | +LL | const DANGLING: Option<&mut i32> = helper_dangling(); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ it is undefined behavior to use this value + | + = note: the rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior. + = note: the raw bytes of the constant (size: $SIZE, align: $ALIGN) { + HEX_DUMP + } + +error[E0080]: constructing invalid value at ..0: encountered a dangling reference (use-after-free) + --> $DIR/mut_ref_in_final.rs:94:5 + | +LL | static DANGLING_STATIC: Option<&mut i32> = helper_dangling(); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ it is undefined behavior to use this value + | + = note: the rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior. + = note: the raw bytes of the constant (size: $SIZE, align: $ALIGN) { + HEX_DUMP + } + +error: aborting due to 16 previous errors Some errors have detailed explanations: E0080, E0716, E0764. For more information about an error, try `rustc --explain E0080`. diff --git a/tests/ui/consts/const-mut-refs/mut_ref_in_final_dynamic_check.rs b/tests/ui/consts/const-mut-refs/mut_ref_in_final_dynamic_check.rs deleted file mode 100644 index 1ae901f1653a8..0000000000000 --- a/tests/ui/consts/const-mut-refs/mut_ref_in_final_dynamic_check.rs +++ /dev/null @@ -1,40 +0,0 @@ -//@ normalize-stderr: "(the raw bytes of the constant) \(size: [0-9]*, align: [0-9]*\)" -> "$1 (size: $$SIZE, align: $$ALIGN)" -//@ normalize-stderr: "( 0x[0-9a-f][0-9a-f] │)? ([0-9a-f][0-9a-f] |__ |╾─*ALLOC[0-9]+(\+[a-z0-9]+)?()?─*╼ )+ *│.*" -> " HEX_DUMP" -//@ normalize-stderr: "HEX_DUMP\s*\n\s*HEX_DUMP" -> "HEX_DUMP" -//@ dont-require-annotations: NOTE - -use std::sync::Mutex; - -// This file checks that our dynamic checks catch things that the static checks miss. -// We do not have static checks for these, because we do not look into function bodies. -// We treat all functions as not returning a mutable reference, because there is no way to -// do that without causing the borrow checker to complain (see the B4/helper test in -// mut_ref_in_final.rs). - -static mut BUFFER: i32 = 42; - -const fn helper() -> Option<&'static mut i32> { unsafe { - Some(&mut *std::ptr::addr_of_mut!(BUFFER)) -} } -const MUT: Option<&mut i32> = helper(); //~ ERROR encountered mutable reference - -const fn helper_int2ptr() -> Option<&'static mut i32> { unsafe { - // Undefined behaviour (integer as pointer), who doesn't love tests like this. - Some(&mut *(42 as *mut i32)) -} } -const INT2PTR: Option<&mut i32> = helper_int2ptr(); //~ ERROR encountered a dangling reference -static INT2PTR_STATIC: Option<&mut i32> = helper_int2ptr(); //~ ERROR encountered a dangling reference - -const fn helper_dangling() -> Option<&'static mut i32> { unsafe { - // Undefined behaviour (dangling pointer), who doesn't love tests like this. - Some(&mut *(&mut 42 as *mut i32)) -} } -const DANGLING: Option<&mut i32> = helper_dangling(); //~ ERROR dangling reference -static DANGLING_STATIC: Option<&mut i32> = helper_dangling(); //~ ERROR dangling reference - -// These are fine! Just statics pointing to mutable statics, nothing fundamentally wrong with this. -static MUT_STATIC: Option<&mut i32> = helper(); -static mut MUT_ARRAY: &mut [u8] = &mut [42]; -static MUTEX: Mutex<&mut [u8]> = Mutex::new(unsafe { &mut *MUT_ARRAY }); - -fn main() {} diff --git a/tests/ui/consts/const-mut-refs/mut_ref_in_final_dynamic_check.stderr b/tests/ui/consts/const-mut-refs/mut_ref_in_final_dynamic_check.stderr deleted file mode 100644 index 96263998ad42c..0000000000000 --- a/tests/ui/consts/const-mut-refs/mut_ref_in_final_dynamic_check.stderr +++ /dev/null @@ -1,58 +0,0 @@ -error[E0080]: constructing invalid value at ..0: encountered mutable reference in `const` value - --> $DIR/mut_ref_in_final_dynamic_check.rs:19:1 - | -LL | const MUT: Option<&mut i32> = helper(); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ it is undefined behavior to use this value - | - = note: the rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior. - = note: the raw bytes of the constant (size: $SIZE, align: $ALIGN) { - HEX_DUMP - } - -error[E0080]: constructing invalid value at ..0: encountered a dangling reference (0x2a[noalloc] has no provenance) - --> $DIR/mut_ref_in_final_dynamic_check.rs:25:1 - | -LL | const INT2PTR: Option<&mut i32> = helper_int2ptr(); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ it is undefined behavior to use this value - | - = note: the rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior. - = note: the raw bytes of the constant (size: $SIZE, align: $ALIGN) { - HEX_DUMP - } - -error[E0080]: constructing invalid value at ..0: encountered a dangling reference (0x2a[noalloc] has no provenance) - --> $DIR/mut_ref_in_final_dynamic_check.rs:26:1 - | -LL | static INT2PTR_STATIC: Option<&mut i32> = helper_int2ptr(); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ it is undefined behavior to use this value - | - = note: the rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior. - = note: the raw bytes of the constant (size: $SIZE, align: $ALIGN) { - HEX_DUMP - } - -error[E0080]: constructing invalid value at ..0: encountered a dangling reference (use-after-free) - --> $DIR/mut_ref_in_final_dynamic_check.rs:32:1 - | -LL | const DANGLING: Option<&mut i32> = helper_dangling(); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ it is undefined behavior to use this value - | - = note: the rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior. - = note: the raw bytes of the constant (size: $SIZE, align: $ALIGN) { - HEX_DUMP - } - -error[E0080]: constructing invalid value at ..0: encountered a dangling reference (use-after-free) - --> $DIR/mut_ref_in_final_dynamic_check.rs:33:1 - | -LL | static DANGLING_STATIC: Option<&mut i32> = helper_dangling(); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ it is undefined behavior to use this value - | - = note: the rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior. - = note: the raw bytes of the constant (size: $SIZE, align: $ALIGN) { - HEX_DUMP - } - -error: aborting due to 5 previous errors - -For more information about this error, try `rustc --explain E0080`. diff --git a/tests/ui/consts/issue-17718-const-bad-values.rs b/tests/ui/consts/issue-17718-const-bad-values.rs index a447350e35bf8..5b51c6186b130 100644 --- a/tests/ui/consts/issue-17718-const-bad-values.rs +++ b/tests/ui/consts/issue-17718-const-bad-values.rs @@ -9,6 +9,5 @@ const C1: &'static mut [usize] = &mut []; static mut S: i32 = 3; const C2: &'static mut i32 = unsafe { &mut S }; -//~^ ERROR: encountered mutable reference fn main() {} diff --git a/tests/ui/consts/issue-17718-const-bad-values.stderr b/tests/ui/consts/issue-17718-const-bad-values.stderr index 2c54200c8a2f2..11e11adcb5aee 100644 --- a/tests/ui/consts/issue-17718-const-bad-values.stderr +++ b/tests/ui/consts/issue-17718-const-bad-values.stderr @@ -8,18 +8,6 @@ LL | const C1: &'static mut [usize] = &mut []; = note: To avoid accidentally creating global mutable state, such temporaries must be immutable = help: If you really want global mutable state, try replacing the temporary by an interior mutable `static` or a `static mut` -error[E0080]: constructing invalid value: encountered mutable reference in `const` value - --> $DIR/issue-17718-const-bad-values.rs:11:1 - | -LL | const C2: &'static mut i32 = unsafe { &mut S }; - | ^^^^^^^^^^^^^^^^^^^^^^^^^^ it is undefined behavior to use this value - | - = note: the rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior. - = note: the raw bytes of the constant (size: $PTR, align: $PTR) { - HEX_DUMP - } - -error: aborting due to 2 previous errors +error: aborting due to 1 previous error -Some errors have detailed explanations: E0080, E0764. -For more information about an error, try `rustc --explain E0080`. +For more information about this error, try `rustc --explain E0764`. diff --git a/tests/ui/consts/miri_unleashed/mutable_references.rs b/tests/ui/consts/miri_unleashed/mutable_references.rs index 2e95393ccbf56..78f0cd520cb2b 100644 --- a/tests/ui/consts/miri_unleashed/mutable_references.rs +++ b/tests/ui/consts/miri_unleashed/mutable_references.rs @@ -25,8 +25,8 @@ static BOO: &mut Foo<()> = &mut Foo(()); const BLUNT: &mut i32 = &mut 42; //~^ ERROR: pointing to read-only memory +// This is fine, it points to a static so there are no questions of pointer identity. const SUBTLE: &mut i32 = unsafe { - //~^ ERROR: encountered mutable reference static mut STATIC: i32 = 0; &mut STATIC }; diff --git a/tests/ui/consts/miri_unleashed/mutable_references.stderr b/tests/ui/consts/miri_unleashed/mutable_references.stderr index 9bff2711e5f1f..4187a9e1c2bfa 100644 --- a/tests/ui/consts/miri_unleashed/mutable_references.stderr +++ b/tests/ui/consts/miri_unleashed/mutable_references.stderr @@ -43,17 +43,6 @@ LL | const BLUNT: &mut i32 = &mut 42; HEX_DUMP } -error[E0080]: constructing invalid value: encountered mutable reference in `const` value - --> $DIR/mutable_references.rs:28:1 - | -LL | const SUBTLE: &mut i32 = unsafe { - | ^^^^^^^^^^^^^^^^^^^^^^ it is undefined behavior to use this value - | - = note: the rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rustc repository if you believe it should not be considered undefined behavior. - = note: the raw bytes of the constant (size: $SIZE, align: $ALIGN) { - HEX_DUMP - } - error[E0080]: constructing invalid value at .x.: encountered `UnsafeCell` in read-only memory --> $DIR/mutable_references.rs:40:1 | @@ -210,7 +199,7 @@ help: skipping check that does not even have a feature gate LL | const RAW_MUT_COERCE: SyncPtr = SyncPtr { x: &mut 0 }; | ^^^^^^ -error: aborting due to 16 previous errors; 1 warning emitted +error: aborting due to 15 previous errors; 1 warning emitted Some errors have detailed explanations: E0080, E0594. For more information about an error, try `rustc --explain E0080`. diff --git a/tests/ui/errors/remap-path-prefix-sysroot.with-remap.stderr b/tests/ui/errors/remap-path-prefix-sysroot.with-remap.stderr index 88d713d2b048c..8540f492a662a 100644 --- a/tests/ui/errors/remap-path-prefix-sysroot.with-remap.stderr +++ b/tests/ui/errors/remap-path-prefix-sysroot.with-remap.stderr @@ -7,7 +7,7 @@ LL | self.thread.join().unwrap(); | move occurs because `self.thread` has type `JoinHandle<()>`, which does not implement the `Copy` trait | note: `JoinHandle::::join` takes ownership of the receiver `self`, which moves `self.thread` - --> remapped/library/std/src/thread/mod.rs:LL:COL + --> remapped/library/std/src/thread/join_handle.rs:LL:COL | LL | pub fn join(self) -> Result { | ^^^^ diff --git a/tests/ui/errors/remap-path-prefix-sysroot.without-remap.stderr b/tests/ui/errors/remap-path-prefix-sysroot.without-remap.stderr index 9b337699c3295..91a3d5b5d6c8a 100644 --- a/tests/ui/errors/remap-path-prefix-sysroot.without-remap.stderr +++ b/tests/ui/errors/remap-path-prefix-sysroot.without-remap.stderr @@ -7,7 +7,7 @@ LL | self.thread.join().unwrap(); | move occurs because `self.thread` has type `JoinHandle<()>`, which does not implement the `Copy` trait | note: `JoinHandle::::join` takes ownership of the receiver `self`, which moves `self.thread` - --> $SRC_DIR_REAL/std/src/thread/mod.rs:LL:COL + --> $SRC_DIR_REAL/std/src/thread/join_handle.rs:LL:COL | LL | pub fn join(self) -> Result { | ^^^^ diff --git a/tests/ui/threads-sendsync/rc-is-not-send.stderr b/tests/ui/threads-sendsync/rc-is-not-send.stderr index a06b683f729e0..784a0999c6011 100644 --- a/tests/ui/threads-sendsync/rc-is-not-send.stderr +++ b/tests/ui/threads-sendsync/rc-is-not-send.stderr @@ -30,7 +30,7 @@ note: required because it's used within this closure LL | thread::spawn(move || { | ^^^^^^^ note: required by a bound in `spawn` - --> $SRC_DIR/std/src/thread/mod.rs:LL:COL + --> $SRC_DIR/std/src/thread/functions.rs:LL:COL error: aborting due to 1 previous error