diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs index 209b8efa2c3b3..ac20f9c64fa1a 100644 --- a/compiler/rustc_codegen_llvm/src/attributes.rs +++ b/compiler/rustc_codegen_llvm/src/attributes.rs @@ -229,7 +229,7 @@ fn instrument_function_attr<'ll>( } fn nojumptables_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> Option<&'ll Attribute> { - if !sess.opts.unstable_opts.no_jump_tables { + if sess.opts.cg.jump_tables { return None; } diff --git a/compiler/rustc_interface/src/tests.rs b/compiler/rustc_interface/src/tests.rs index be33db21d1df6..6c08b37dec083 100644 --- a/compiler/rustc_interface/src/tests.rs +++ b/compiler/rustc_interface/src/tests.rs @@ -620,6 +620,7 @@ fn test_codegen_options_tracking_hash() { tracked!(force_frame_pointers, FramePointer::Always); tracked!(force_unwind_tables, Some(true)); tracked!(instrument_coverage, InstrumentCoverage::Yes); + tracked!(jump_tables, false); tracked!(link_dead_code, Some(true)); tracked!(linker_plugin_lto, LinkerPluginLto::LinkerPluginAuto); tracked!(llvm_args, vec![String::from("1"), String::from("2")]); @@ -831,7 +832,6 @@ fn test_unstable_options_tracking_hash() { tracked!(mutable_noalias, false); tracked!(next_solver, NextSolverConfig { coherence: true, globally: true }); tracked!(no_generate_arange_section, true); - tracked!(no_jump_tables, true); tracked!(no_link, true); tracked!(no_profiler_runtime, true); tracked!(no_trait_vptr, true); diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs index b89aec7d22a91..c9d73adf31d49 100644 --- a/compiler/rustc_session/src/options.rs +++ b/compiler/rustc_session/src/options.rs @@ -2093,6 +2093,8 @@ options! { "instrument the generated code to support LLVM source-based code coverage reports \ (note, the compiler build config must include `profiler = true`); \ implies `-C symbol-mangling-version=v0`"), + jump_tables: bool = (true, parse_bool, [TRACKED], + "allow jump table and lookup table generation from switch case lowering (default: yes)"), link_arg: (/* redirected to link_args */) = ((), parse_string_push, [UNTRACKED], "a single extra argument to append to the linker invocation (can be used several times)"), link_args: Vec = (Vec::new(), parse_list, [UNTRACKED], @@ -2475,8 +2477,6 @@ options! { "omit DWARF address ranges that give faster lookups"), no_implied_bounds_compat: bool = (false, parse_bool, [TRACKED], "disable the compatibility version of the `implied_bounds_ty` query"), - no_jump_tables: bool = (false, parse_no_value, [TRACKED], - "disable the jump tables and lookup tables that can be generated from a switch case lowering"), no_leak_check: bool = (false, parse_no_value, [UNTRACKED], "disable the 'leak check' for subtyping; unsound, but useful for tests"), no_link: bool = (false, parse_no_value, [TRACKED], diff --git a/compiler/rustc_target/src/spec/base/helenos.rs b/compiler/rustc_target/src/spec/base/helenos.rs new file mode 100644 index 0000000000000..8d6f406e41f73 --- /dev/null +++ b/compiler/rustc_target/src/spec/base/helenos.rs @@ -0,0 +1,17 @@ +use crate::spec::{PanicStrategy, RelroLevel, StackProbeType, TargetOptions}; + +pub(crate) fn opts() -> TargetOptions { + TargetOptions { + os: "helenos".into(), + + dynamic_linking: true, + // we need the linker to keep libgcc and friends + no_default_libraries: false, + has_rpath: true, + relro_level: RelroLevel::Full, + panic_strategy: PanicStrategy::Abort, + stack_probes: StackProbeType::Inline, + + ..Default::default() + } +} diff --git a/compiler/rustc_target/src/spec/base/mod.rs b/compiler/rustc_target/src/spec/base/mod.rs index 6ab8597a4ecb0..ca1c9649ee4d7 100644 --- a/compiler/rustc_target/src/spec/base/mod.rs +++ b/compiler/rustc_target/src/spec/base/mod.rs @@ -8,6 +8,7 @@ pub(crate) mod dragonfly; pub(crate) mod freebsd; pub(crate) mod fuchsia; pub(crate) mod haiku; +pub(crate) mod helenos; pub(crate) mod hermit; pub(crate) mod hurd; pub(crate) mod hurd_gnu; diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs index 39a260f9a9b44..b49e7fc9cff66 100644 --- a/compiler/rustc_target/src/spec/mod.rs +++ b/compiler/rustc_target/src/spec/mod.rs @@ -1534,6 +1534,12 @@ supported_targets! { ("i686-unknown-haiku", i686_unknown_haiku), ("x86_64-unknown-haiku", x86_64_unknown_haiku), + ("aarch64-unknown-helenos", aarch64_unknown_helenos), + ("i686-unknown-helenos", i686_unknown_helenos), + ("powerpc-unknown-helenos", powerpc_unknown_helenos), + ("sparc64-unknown-helenos", sparc64_unknown_helenos), + ("x86_64-unknown-helenos", x86_64_unknown_helenos), + ("i686-unknown-hurd-gnu", i686_unknown_hurd_gnu), ("x86_64-unknown-hurd-gnu", x86_64_unknown_hurd_gnu), diff --git a/compiler/rustc_target/src/spec/targets/aarch64_unknown_helenos.rs b/compiler/rustc_target/src/spec/targets/aarch64_unknown_helenos.rs new file mode 100644 index 0000000000000..31b4a2111cbf6 --- /dev/null +++ b/compiler/rustc_target/src/spec/targets/aarch64_unknown_helenos.rs @@ -0,0 +1,22 @@ +use crate::spec::{Target, base}; + +pub(crate) fn target() -> Target { + let mut base = base::helenos::opts(); + base.max_atomic_width = Some(128); + base.features = "+v8a".into(); + base.linker = Some("aarch64-helenos-gcc".into()); + + Target { + llvm_target: "aarch64-unknown-helenos".into(), + metadata: crate::spec::TargetMetadata { + description: Some("ARM64 HelenOS".into()), + tier: Some(3), + host_tools: Some(false), + std: Some(true), + }, + pointer_width: 64, + data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32".into(), + arch: "aarch64".into(), + options: base, + } +} diff --git a/compiler/rustc_target/src/spec/targets/i686_unknown_helenos.rs b/compiler/rustc_target/src/spec/targets/i686_unknown_helenos.rs new file mode 100644 index 0000000000000..1cd32d6f78d93 --- /dev/null +++ b/compiler/rustc_target/src/spec/targets/i686_unknown_helenos.rs @@ -0,0 +1,26 @@ +use crate::spec::{Cc, LinkerFlavor, Lld, RustcAbi, Target, base}; + +pub(crate) fn target() -> Target { + let mut base = base::helenos::opts(); + base.cpu = "pentium4".into(); + base.max_atomic_width = Some(64); + base.linker = Some("i686-helenos-gcc".into()); + base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m32"]); + base.rustc_abi = Some(RustcAbi::X86Sse2); + + Target { + llvm_target: "i686-unknown-helenos".into(), + metadata: crate::spec::TargetMetadata { + description: Some("IA-32 (i686) HelenOS".into()), + tier: Some(3), + host_tools: Some(false), + std: Some(true), + }, + pointer_width: 32, + data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\ + i128:128-f64:32:64-f80:32-n8:16:32-S128" + .into(), + arch: "x86".into(), + options: base, + } +} diff --git a/compiler/rustc_target/src/spec/targets/powerpc_unknown_helenos.rs b/compiler/rustc_target/src/spec/targets/powerpc_unknown_helenos.rs new file mode 100644 index 0000000000000..2b713e8a5ff2f --- /dev/null +++ b/compiler/rustc_target/src/spec/targets/powerpc_unknown_helenos.rs @@ -0,0 +1,24 @@ +use rustc_abi::Endian; + +use crate::spec::{Target, TargetMetadata, base}; + +pub(crate) fn target() -> Target { + let mut base = base::helenos::opts(); + base.endian = Endian::Big; + base.max_atomic_width = Some(32); + base.linker = Some("ppc-helenos-gcc".into()); + + Target { + llvm_target: "powerpc-unknown-helenos".into(), + metadata: TargetMetadata { + description: Some("PowerPC HelenOS".into()), + tier: Some(3), + host_tools: Some(false), + std: Some(true), + }, + pointer_width: 32, + data_layout: "E-m:e-p:32:32-Fn32-i64:64-n32".into(), + arch: "powerpc".into(), + options: base, + } +} diff --git a/compiler/rustc_target/src/spec/targets/sparc64_unknown_helenos.rs b/compiler/rustc_target/src/spec/targets/sparc64_unknown_helenos.rs new file mode 100644 index 0000000000000..8c3def57d0cf4 --- /dev/null +++ b/compiler/rustc_target/src/spec/targets/sparc64_unknown_helenos.rs @@ -0,0 +1,26 @@ +use rustc_abi::Endian; + +use crate::spec::{Cc, LinkerFlavor, Lld, Target, TargetMetadata, base}; + +pub(crate) fn target() -> Target { + let mut base = base::helenos::opts(); + base.endian = Endian::Big; + base.cpu = "v9".into(); + base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]); + base.max_atomic_width = Some(64); + base.linker = Some("sparc64-helenos-gcc".into()); + + Target { + llvm_target: "sparc64-unknown-helenos".into(), + metadata: TargetMetadata { + description: Some("SPARC HelenOS".into()), + tier: Some(3), + host_tools: Some(false), + std: Some(true), + }, + pointer_width: 64, + data_layout: "E-m:e-i64:64-i128:128-n32:64-S128".into(), + arch: "sparc64".into(), + options: base, + } +} diff --git a/compiler/rustc_target/src/spec/targets/x86_64_unknown_helenos.rs b/compiler/rustc_target/src/spec/targets/x86_64_unknown_helenos.rs new file mode 100644 index 0000000000000..82c9807f73e3e --- /dev/null +++ b/compiler/rustc_target/src/spec/targets/x86_64_unknown_helenos.rs @@ -0,0 +1,25 @@ +use crate::spec::{Cc, LinkerFlavor, Lld, Target, base}; + +pub(crate) fn target() -> Target { + let mut base = base::helenos::opts(); + base.cpu = "x86-64".into(); + base.plt_by_default = false; + base.max_atomic_width = Some(64); + base.linker = Some("amd64-helenos-gcc".into()); + base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64"]); + + Target { + llvm_target: "x86_64-unknown-helenos".into(), + metadata: crate::spec::TargetMetadata { + description: Some("64-bit HelenOS".into()), + tier: Some(3), + host_tools: Some(false), + std: Some(true), + }, + pointer_width: 64, + data_layout: + "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128".into(), + arch: "x86_64".into(), + options: base, + } +} diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs index 7d8077f231dde..7ad1679b1c822 100644 --- a/library/alloc/src/boxed.rs +++ b/library/alloc/src/boxed.rs @@ -192,11 +192,15 @@ use core::fmt; use core::future::Future; use core::hash::{Hash, Hasher}; use core::marker::{Tuple, Unsize}; +#[cfg(not(no_global_oom_handling))] +use core::mem::MaybeUninit; use core::mem::{self, SizedTypeProperties}; use core::ops::{ AsyncFn, AsyncFnMut, AsyncFnOnce, CoerceUnsized, Coroutine, CoroutineState, Deref, DerefMut, DerefPure, DispatchFromDyn, LegacyReceiver, }; +#[cfg(not(no_global_oom_handling))] +use core::ops::{Residual, Try}; use core::pin::{Pin, PinCoerceUnsized}; use core::ptr::{self, NonNull, Unique}; use core::task::{Context, Poll}; @@ -386,6 +390,82 @@ impl Box { pub fn try_new_zeroed() -> Result>, AllocError> { Box::try_new_zeroed_in(Global) } + + /// Maps the value in a box, reusing the allocation if possible. + /// + /// `f` is called on the value in the box, and the result is returned, also boxed. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `Box::map(b, f)` instead of `b.map(f)`. This + /// is so that there is no conflict with a method on the inner type. + /// + /// # Examples + /// + /// ``` + /// #![feature(smart_pointer_try_map)] + /// + /// let b = Box::new(7); + /// let new = Box::map(b, |i| i + 7); + /// assert_eq!(*new, 14); + /// ``` + #[cfg(not(no_global_oom_handling))] + #[unstable(feature = "smart_pointer_try_map", issue = "144419")] + pub fn map(this: Self, f: impl FnOnce(T) -> U) -> Box { + if size_of::() == size_of::() && align_of::() == align_of::() { + let (value, allocation) = Box::take(this); + Box::write( + unsafe { mem::transmute::>, Box>>(allocation) }, + f(value), + ) + } else { + Box::new(f(*this)) + } + } + + /// Attempts to map the value in a box, reusing the allocation if possible. + /// + /// `f` is called on the value in the box, and if the operation succeeds, the result is + /// returned, also boxed. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `Box::try_map(b, f)` instead of `b.try_map(f)`. This + /// is so that there is no conflict with a method on the inner type. + /// + /// # Examples + /// + /// ``` + /// #![feature(smart_pointer_try_map)] + /// + /// let b = Box::new(7); + /// let new = Box::try_map(b, u32::try_from).unwrap(); + /// assert_eq!(*new, 7); + /// ``` + #[cfg(not(no_global_oom_handling))] + #[unstable(feature = "smart_pointer_try_map", issue = "144419")] + pub fn try_map( + this: Self, + f: impl FnOnce(T) -> R, + ) -> >>::TryType + where + R: Try, + R::Residual: Residual>, + { + if size_of::() == size_of::() && align_of::() == align_of::() { + let (value, allocation) = Box::take(this); + try { + Box::write( + unsafe { + mem::transmute::>, Box>>( + allocation, + ) + }, + f(value)?, + ) + } + } else { + try { Box::new(f(*this)?) } + } + } } impl Box { diff --git a/library/alloc/src/collections/vec_deque/extract_if.rs b/library/alloc/src/collections/vec_deque/extract_if.rs new file mode 100644 index 0000000000000..bed7d46482cf4 --- /dev/null +++ b/library/alloc/src/collections/vec_deque/extract_if.rs @@ -0,0 +1,149 @@ +use core::ops::{Range, RangeBounds}; +use core::{fmt, ptr, slice}; + +use super::VecDeque; +use crate::alloc::{Allocator, Global}; + +/// An iterator which uses a closure to determine if an element should be removed. +/// +/// This struct is created by [`VecDeque::extract_if`]. +/// See its documentation for more. +/// +/// # Example +/// +/// ``` +/// #![feature(vec_deque_extract_if)] +/// +/// use std::collections::vec_deque::ExtractIf; +/// use std::collections::vec_deque::VecDeque; +/// +/// let mut v = VecDeque::from([0, 1, 2]); +/// let iter: ExtractIf<'_, _, _> = v.extract_if(.., |x| *x % 2 == 0); +/// ``` +#[unstable(feature = "vec_deque_extract_if", issue = "147750")] +#[must_use = "iterators are lazy and do nothing unless consumed"] +pub struct ExtractIf< + 'a, + T, + F, + #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global, +> { + vec: &'a mut VecDeque, + /// The index of the item that will be inspected by the next call to `next`. + idx: usize, + /// Elements at and beyond this point will be retained. Must be equal or smaller than `old_len`. + end: usize, + /// The number of items that have been drained (removed) thus far. + del: usize, + /// The original length of `vec` prior to draining. + old_len: usize, + /// The filter test predicate. + pred: F, +} + +impl<'a, T, F, A: Allocator> ExtractIf<'a, T, F, A> { + pub(super) fn new>( + vec: &'a mut VecDeque, + pred: F, + range: R, + ) -> Self { + let old_len = vec.len(); + let Range { start, end } = slice::range(range, ..old_len); + + // Guard against the deque getting leaked (leak amplification) + vec.len = 0; + ExtractIf { vec, idx: start, del: 0, end, old_len, pred } + } + + /// Returns a reference to the underlying allocator. + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub fn allocator(&self) -> &A { + self.vec.allocator() + } +} + +#[unstable(feature = "vec_deque_extract_if", issue = "147750")] +impl Iterator for ExtractIf<'_, T, F, A> +where + F: FnMut(&mut T) -> bool, +{ + type Item = T; + + fn next(&mut self) -> Option { + while self.idx < self.end { + let i = self.idx; + // SAFETY: + // We know that `i < self.end` from the if guard and that `self.end <= self.old_len` from + // the validity of `Self`. Therefore `i` points to an element within `vec`. + // + // Additionally, the i-th element is valid because each element is visited at most once + // and it is the first time we access vec[i]. + // + // Note: we can't use `vec.get_mut(i).unwrap()` here since the precondition for that + // function is that i < vec.len, but we've set vec's length to zero. + let idx = self.vec.to_physical_idx(i); + let cur = unsafe { &mut *self.vec.ptr().add(idx) }; + let drained = (self.pred)(cur); + // Update the index *after* the predicate is called. If the index + // is updated prior and the predicate panics, the element at this + // index would be leaked. + self.idx += 1; + if drained { + self.del += 1; + // SAFETY: We never touch this element again after returning it. + return Some(unsafe { ptr::read(cur) }); + } else if self.del > 0 { + let hole_slot = self.vec.to_physical_idx(i - self.del); + // SAFETY: `self.del` > 0, so the hole slot must not overlap with current element. + // We use copy for move, and never touch this element again. + unsafe { self.vec.wrap_copy(idx, hole_slot, 1) }; + } + } + None + } + + fn size_hint(&self) -> (usize, Option) { + (0, Some(self.end - self.idx)) + } +} + +#[unstable(feature = "vec_deque_extract_if", issue = "147750")] +impl Drop for ExtractIf<'_, T, F, A> { + fn drop(&mut self) { + if self.del > 0 { + let src = self.vec.to_physical_idx(self.idx); + let dst = self.vec.to_physical_idx(self.idx - self.del); + let len = self.old_len - self.idx; + // SAFETY: Trailing unchecked items must be valid since we never touch them. + unsafe { self.vec.wrap_copy(src, dst, len) }; + } + self.vec.len = self.old_len - self.del; + } +} + +#[unstable(feature = "vec_deque_extract_if", issue = "147750")] +impl fmt::Debug for ExtractIf<'_, T, F, A> +where + T: fmt::Debug, + A: Allocator, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let peek = if self.idx < self.end { + let idx = self.vec.to_physical_idx(self.idx); + // This has to use pointer arithmetic as `self.vec[self.idx]` or + // `self.vec.get_unchecked(self.idx)` wouldn't work since we + // temporarily set the length of `self.vec` to zero. + // + // SAFETY: + // Since `self.idx` is smaller than `self.end` and `self.end` is + // smaller than `self.old_len`, `idx` is valid for indexing the + // buffer. Also, per the invariant of `self.idx`, this element + // has not been inspected/moved out yet. + Some(unsafe { &*self.vec.ptr().add(idx) }) + } else { + None + }; + f.debug_struct("ExtractIf").field("peek", &peek).finish_non_exhaustive() + } +} diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs index ac619a42d356d..dc5aaa8726032 100644 --- a/library/alloc/src/collections/vec_deque/mod.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -32,6 +32,11 @@ pub use self::drain::Drain; mod drain; +#[unstable(feature = "vec_deque_extract_if", issue = "147750")] +pub use self::extract_if::ExtractIf; + +mod extract_if; + #[stable(feature = "rust1", since = "1.0.0")] pub use self::iter_mut::IterMut; @@ -227,6 +232,78 @@ impl VecDeque { wrap_index(idx.wrapping_sub(subtrahend).wrapping_add(self.capacity()), self.capacity()) } + /// Get source, destination and count (like the arguments to [`ptr::copy_nonoverlapping`]) + /// for copying `count` values from index `src` to index `dst`. + /// One of the ranges can wrap around the physical buffer, for this reason 2 triples are returned. + /// + /// Use of the word "ranges" specifically refers to `src..src + count` and `dst..dst + count`. + /// + /// # Safety + /// + /// - Ranges must not overlap: `src.abs_diff(dst) >= count`. + /// - Ranges must be in bounds of the logical buffer: `src + count <= self.capacity()` and `dst + count <= self.capacity()`. + /// - `head` must be in bounds: `head < self.capacity()`. + #[cfg(not(no_global_oom_handling))] + unsafe fn nonoverlapping_ranges( + &mut self, + src: usize, + dst: usize, + count: usize, + head: usize, + ) -> [(*const T, *mut T, usize); 2] { + // "`src` and `dst` must be at least as far apart as `count`" + debug_assert!( + src.abs_diff(dst) >= count, + "`src` and `dst` must not overlap. src={src} dst={dst} count={count}", + ); + debug_assert!( + src.max(dst) + count <= self.capacity(), + "ranges must be in bounds. src={src} dst={dst} count={count} cap={}", + self.capacity(), + ); + + let wrapped_src = self.wrap_add(head, src); + let wrapped_dst = self.wrap_add(head, dst); + + let room_after_src = self.capacity() - wrapped_src; + let room_after_dst = self.capacity() - wrapped_dst; + + let src_wraps = room_after_src < count; + let dst_wraps = room_after_dst < count; + + // Wrapping occurs if `capacity` is contained within `wrapped_src..wrapped_src + count` or `wrapped_dst..wrapped_dst + count`. + // Since these two ranges must not overlap as per the safety invariants of this function, only one range can wrap. + debug_assert!( + !(src_wraps && dst_wraps), + "BUG: at most one of src and dst can wrap. src={src} dst={dst} count={count} cap={}", + self.capacity(), + ); + + unsafe { + let ptr = self.ptr(); + let src_ptr = ptr.add(wrapped_src); + let dst_ptr = ptr.add(wrapped_dst); + + if src_wraps { + [ + (src_ptr, dst_ptr, room_after_src), + (ptr, dst_ptr.add(room_after_src), count - room_after_src), + ] + } else if dst_wraps { + [ + (src_ptr, dst_ptr, room_after_dst), + (src_ptr.add(room_after_dst), ptr, count - room_after_dst), + ] + } else { + [ + (src_ptr, dst_ptr, count), + // null pointers are fine as long as the count is 0 + (ptr::null(), ptr::null_mut(), 0), + ] + } + } + } + /// Copies a contiguous block of memory len long from src to dst #[inline] unsafe fn copy(&mut self, src: usize, dst: usize, len: usize) { @@ -542,6 +619,95 @@ impl VecDeque { } debug_assert!(self.head < self.capacity() || self.capacity() == 0); } + + /// Creates an iterator which uses a closure to determine if an element in the range should be removed. + /// + /// If the closure returns `true`, the element is removed from the deque and yielded. If the closure + /// returns `false`, or panics, the element remains in the deque and will not be yielded. + /// + /// Only elements that fall in the provided range are considered for extraction, but any elements + /// after the range will still have to be moved if any element has been extracted. + /// + /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating + /// or the iteration short-circuits, then the remaining elements will be retained. + /// Use [`retain_mut`] with a negated predicate if you do not need the returned iterator. + /// + /// [`retain_mut`]: VecDeque::retain_mut + /// + /// Using this method is equivalent to the following code: + /// + /// ``` + /// #![feature(vec_deque_extract_if)] + /// # use std::collections::VecDeque; + /// # let some_predicate = |x: &mut i32| { *x % 2 == 1 }; + /// # let mut deq: VecDeque<_> = (0..10).collect(); + /// # let mut deq2 = deq.clone(); + /// # let range = 1..5; + /// let mut i = range.start; + /// let end_items = deq.len() - range.end; + /// # let mut extracted = vec![]; + /// + /// while i < deq.len() - end_items { + /// if some_predicate(&mut deq[i]) { + /// let val = deq.remove(i).unwrap(); + /// // your code here + /// # extracted.push(val); + /// } else { + /// i += 1; + /// } + /// } + /// + /// # let extracted2: Vec<_> = deq2.extract_if(range, some_predicate).collect(); + /// # assert_eq!(deq, deq2); + /// # assert_eq!(extracted, extracted2); + /// ``` + /// + /// But `extract_if` is easier to use. `extract_if` is also more efficient, + /// because it can backshift the elements of the array in bulk. + /// + /// The iterator also lets you mutate the value of each element in the + /// closure, regardless of whether you choose to keep or remove it. + /// + /// # Panics + /// + /// If `range` is out of bounds. + /// + /// # Examples + /// + /// Splitting a deque into even and odd values, reusing the original deque: + /// + /// ``` + /// #![feature(vec_deque_extract_if)] + /// use std::collections::VecDeque; + /// + /// let mut numbers = VecDeque::from([1, 2, 3, 4, 5, 6, 8, 9, 11, 13, 14, 15]); + /// + /// let evens = numbers.extract_if(.., |x| *x % 2 == 0).collect::>(); + /// let odds = numbers; + /// + /// assert_eq!(evens, VecDeque::from([2, 4, 6, 8, 14])); + /// assert_eq!(odds, VecDeque::from([1, 3, 5, 9, 11, 13, 15])); + /// ``` + /// + /// Using the range argument to only process a part of the deque: + /// + /// ``` + /// #![feature(vec_deque_extract_if)] + /// use std::collections::VecDeque; + /// + /// let mut items = VecDeque::from([0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 2, 1, 2]); + /// let ones = items.extract_if(7.., |x| *x == 1).collect::>(); + /// assert_eq!(items, VecDeque::from([0, 0, 0, 0, 0, 0, 0, 2, 2, 2])); + /// assert_eq!(ones.len(), 3); + /// ``` + #[unstable(feature = "vec_deque_extract_if", issue = "147750")] + pub fn extract_if(&mut self, range: R, filter: F) -> ExtractIf<'_, T, F, A> + where + F: FnMut(&mut T) -> bool, + R: RangeBounds, + { + ExtractIf::new(self, filter, range) + } } impl VecDeque { @@ -2971,6 +3137,222 @@ impl VecDeque { self.truncate(new_len); } } + + /// Clones the elements at the range `src` and appends them to the end. + /// + /// # Panics + /// + /// Panics if the starting index is greater than the end index + /// or if either index is greater than the length of the vector. + /// + /// # Examples + /// + /// ``` + /// #![feature(deque_extend_front)] + /// use std::collections::VecDeque; + /// + /// let mut characters = VecDeque::from(['a', 'b', 'c', 'd', 'e']); + /// characters.extend_from_within(2..); + /// assert_eq!(characters, ['a', 'b', 'c', 'd', 'e', 'c', 'd', 'e']); + /// + /// let mut numbers = VecDeque::from([0, 1, 2, 3, 4]); + /// numbers.extend_from_within(..2); + /// assert_eq!(numbers, [0, 1, 2, 3, 4, 0, 1]); + /// + /// let mut strings = VecDeque::from([String::from("hello"), String::from("world"), String::from("!")]); + /// strings.extend_from_within(1..=2); + /// assert_eq!(strings, ["hello", "world", "!", "world", "!"]); + /// ``` + #[cfg(not(no_global_oom_handling))] + #[unstable(feature = "deque_extend_front", issue = "146975")] + pub fn extend_from_within(&mut self, src: R) + where + R: RangeBounds, + { + let range = slice::range(src, ..self.len()); + self.reserve(range.len()); + + // SAFETY: + // - `slice::range` guarantees that the given range is valid for indexing self + // - at least `range.len()` additional space is available + unsafe { + self.spec_extend_from_within(range); + } + } + + /// Clones the elements at the range `src` and prepends them to the front. + /// + /// # Panics + /// + /// Panics if the starting index is greater than the end index + /// or if either index is greater than the length of the vector. + /// + /// # Examples + /// + /// ``` + /// #![feature(deque_extend_front)] + /// use std::collections::VecDeque; + /// + /// let mut characters = VecDeque::from(['a', 'b', 'c', 'd', 'e']); + /// characters.prepend_from_within(2..); + /// assert_eq!(characters, ['c', 'd', 'e', 'a', 'b', 'c', 'd', 'e']); + /// + /// let mut numbers = VecDeque::from([0, 1, 2, 3, 4]); + /// numbers.prepend_from_within(..2); + /// assert_eq!(numbers, [0, 1, 0, 1, 2, 3, 4]); + /// + /// let mut strings = VecDeque::from([String::from("hello"), String::from("world"), String::from("!")]); + /// strings.prepend_from_within(1..=2); + /// assert_eq!(strings, ["world", "!", "hello", "world", "!"]); + /// ``` + #[cfg(not(no_global_oom_handling))] + #[unstable(feature = "deque_extend_front", issue = "146975")] + pub fn prepend_from_within(&mut self, src: R) + where + R: RangeBounds, + { + let range = slice::range(src, ..self.len()); + self.reserve(range.len()); + + // SAFETY: + // - `slice::range` guarantees that the given range is valid for indexing self + // - at least `range.len()` additional space is available + unsafe { + self.spec_prepend_from_within(range); + } + } +} + +/// Associated functions have the following preconditions: +/// +/// - `src` needs to be a valid range: `src.start <= src.end <= self.len()`. +/// - The buffer must have enough spare capacity: `self.capacity() - self.len() >= src.len()`. +#[cfg(not(no_global_oom_handling))] +trait SpecExtendFromWithin { + unsafe fn spec_extend_from_within(&mut self, src: Range); + + unsafe fn spec_prepend_from_within(&mut self, src: Range); +} + +#[cfg(not(no_global_oom_handling))] +impl SpecExtendFromWithin for VecDeque { + default unsafe fn spec_extend_from_within(&mut self, src: Range) { + let dst = self.len(); + let count = src.end - src.start; + let src = src.start; + + unsafe { + // SAFETY: + // - Ranges do not overlap: src entirely spans initialized values, dst entirely spans uninitialized values. + // - Ranges are in bounds: guaranteed by the caller. + let ranges = self.nonoverlapping_ranges(src, dst, count, self.head); + + // `len` is updated after every clone to prevent leaking and + // leave the deque in the right state when a clone implementation panics + + for (src, dst, count) in ranges { + for offset in 0..count { + dst.add(offset).write((*src.add(offset)).clone()); + self.len += 1; + } + } + } + } + + default unsafe fn spec_prepend_from_within(&mut self, src: Range) { + let dst = 0; + let count = src.end - src.start; + let src = src.start + count; + + let new_head = self.wrap_sub(self.head, count); + let cap = self.capacity(); + + unsafe { + // SAFETY: + // - Ranges do not overlap: src entirely spans initialized values, dst entirely spans uninitialized values. + // - Ranges are in bounds: guaranteed by the caller. + let ranges = self.nonoverlapping_ranges(src, dst, count, new_head); + + // Cloning is done in reverse because we prepend to the front of the deque, + // we can't get holes in the *logical* buffer. + // `head` and `len` are updated after every clone to prevent leaking and + // leave the deque in the right state when a clone implementation panics + + // Clone the first range + let (src, dst, count) = ranges[1]; + for offset in (0..count).rev() { + dst.add(offset).write((*src.add(offset)).clone()); + self.head -= 1; + self.len += 1; + } + + // Clone the second range + let (src, dst, count) = ranges[0]; + let mut iter = (0..count).rev(); + if let Some(offset) = iter.next() { + dst.add(offset).write((*src.add(offset)).clone()); + // After the first clone of the second range, wrap `head` around + if self.head == 0 { + self.head = cap; + } + self.head -= 1; + self.len += 1; + + // Continue like normal + for offset in iter { + dst.add(offset).write((*src.add(offset)).clone()); + self.head -= 1; + self.len += 1; + } + } + } + } +} + +#[cfg(not(no_global_oom_handling))] +impl SpecExtendFromWithin for VecDeque { + unsafe fn spec_extend_from_within(&mut self, src: Range) { + let dst = self.len(); + let count = src.end - src.start; + let src = src.start; + + unsafe { + // SAFETY: + // - Ranges do not overlap: src entirely spans initialized values, dst entirely spans uninitialized values. + // - Ranges are in bounds: guaranteed by the caller. + let ranges = self.nonoverlapping_ranges(src, dst, count, self.head); + for (src, dst, count) in ranges { + ptr::copy_nonoverlapping(src, dst, count); + } + } + + // SAFETY: + // - The elements were just initialized by `copy_nonoverlapping` + self.len += count; + } + + unsafe fn spec_prepend_from_within(&mut self, src: Range) { + let dst = 0; + let count = src.end - src.start; + let src = src.start + count; + + let new_head = self.wrap_sub(self.head, count); + + unsafe { + // SAFETY: + // - Ranges do not overlap: src entirely spans initialized values, dst entirely spans uninitialized values. + // - Ranges are in bounds: guaranteed by the caller. + let ranges = self.nonoverlapping_ranges(src, dst, count, new_head); + for (src, dst, count) in ranges { + ptr::copy_nonoverlapping(src, dst, count); + } + } + + // SAFETY: + // - The elements were just initialized by `copy_nonoverlapping` + self.head = new_head; + self.len += count; + } } /// Returns the index in the underlying buffer for a given logical element index. diff --git a/library/alloc/src/collections/vec_deque/tests.rs b/library/alloc/src/collections/vec_deque/tests.rs index 2501534e95080..dc50cc34d9dac 100644 --- a/library/alloc/src/collections/vec_deque/tests.rs +++ b/library/alloc/src/collections/vec_deque/tests.rs @@ -1,6 +1,8 @@ -use core::iter::TrustedLen; +use std::iter::TrustedLen; +use std::panic::{AssertUnwindSafe, catch_unwind}; use super::*; +use crate::testing::crash_test::{CrashTestDummy, Panic}; use crate::testing::macros::struct_with_counted_drop; #[bench] @@ -1161,3 +1163,271 @@ fn issue_80303() { assert_eq!(vda, vdb); assert_eq!(hash_code(vda), hash_code(vdb)); } + +#[test] +fn extract_if_test() { + let mut m: VecDeque = VecDeque::from([1, 2, 3, 4, 5, 6]); + let deleted = m.extract_if(.., |v| *v < 4).collect::>(); + + assert_eq!(deleted, &[1, 2, 3]); + assert_eq!(m, &[4, 5, 6]); +} + +#[test] +fn drain_to_empty_test() { + let mut m: VecDeque = VecDeque::from([1, 2, 3, 4, 5, 6]); + let deleted = m.extract_if(.., |_| true).collect::>(); + + assert_eq!(deleted, &[1, 2, 3, 4, 5, 6]); + assert_eq!(m, &[]); +} + +#[test] +fn extract_if_empty() { + let mut list: VecDeque = VecDeque::new(); + + { + let mut iter = list.extract_if(.., |_| true); + assert_eq!(iter.size_hint(), (0, Some(0))); + assert_eq!(iter.next(), None); + assert_eq!(iter.size_hint(), (0, Some(0))); + assert_eq!(iter.next(), None); + assert_eq!(iter.size_hint(), (0, Some(0))); + } + + assert_eq!(list.len(), 0); + assert_eq!(list, vec![]); +} + +#[test] +fn extract_if_zst() { + let mut list: VecDeque<_> = [(), (), (), (), ()].into_iter().collect(); + let initial_len = list.len(); + let mut count = 0; + + { + let mut iter = list.extract_if(.., |_| true); + assert_eq!(iter.size_hint(), (0, Some(initial_len))); + while let Some(_) = iter.next() { + count += 1; + assert_eq!(iter.size_hint(), (0, Some(initial_len - count))); + } + assert_eq!(iter.size_hint(), (0, Some(0))); + assert_eq!(iter.next(), None); + assert_eq!(iter.size_hint(), (0, Some(0))); + } + + assert_eq!(count, initial_len); + assert_eq!(list.len(), 0); + assert_eq!(list, vec![]); +} + +#[test] +fn extract_if_false() { + let mut list: VecDeque<_> = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect(); + + let initial_len = list.len(); + let mut count = 0; + + { + let mut iter = list.extract_if(.., |_| false); + assert_eq!(iter.size_hint(), (0, Some(initial_len))); + for _ in iter.by_ref() { + count += 1; + } + assert_eq!(iter.size_hint(), (0, Some(0))); + assert_eq!(iter.next(), None); + assert_eq!(iter.size_hint(), (0, Some(0))); + } + + assert_eq!(count, 0); + assert_eq!(list.len(), initial_len); + assert_eq!(list, vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); +} + +#[test] +fn extract_if_true() { + let mut list: VecDeque<_> = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect(); + + let initial_len = list.len(); + let mut count = 0; + + { + let mut iter = list.extract_if(.., |_| true); + assert_eq!(iter.size_hint(), (0, Some(initial_len))); + while let Some(_) = iter.next() { + count += 1; + assert_eq!(iter.size_hint(), (0, Some(initial_len - count))); + } + assert_eq!(iter.size_hint(), (0, Some(0))); + assert_eq!(iter.next(), None); + assert_eq!(iter.size_hint(), (0, Some(0))); + } + + assert_eq!(count, initial_len); + assert_eq!(list.len(), 0); + assert_eq!(list, vec![]); +} + +#[test] +fn extract_if_non_contiguous() { + let mut list = + [1, 2, 4, 6, 7, 9, 11, 13, 15, 17, 18, 20, 22, 24, 26, 27, 29, 31, 33, 34, 35, 36, 37, 39] + .into_iter() + .collect::>(); + list.rotate_left(3); + + assert!(!list.is_contiguous()); + assert_eq!( + list, + [6, 7, 9, 11, 13, 15, 17, 18, 20, 22, 24, 26, 27, 29, 31, 33, 34, 35, 36, 37, 39, 1, 2, 4] + ); + + let removed = list.extract_if(.., |x| *x % 2 == 0).collect::>(); + assert_eq!(removed.len(), 10); + assert_eq!(removed, vec![6, 18, 20, 22, 24, 26, 34, 36, 2, 4]); + + assert_eq!(list.len(), 14); + assert_eq!(list, vec![7, 9, 11, 13, 15, 17, 27, 29, 31, 33, 35, 37, 39, 1]); +} + +#[test] +fn extract_if_complex() { + { + // [+xxx++++++xxxxx++++x+x++] + let mut list = [ + 1, 2, 4, 6, 7, 9, 11, 13, 15, 17, 18, 20, 22, 24, 26, 27, 29, 31, 33, 34, 35, 36, 37, + 39, + ] + .into_iter() + .collect::>(); + + let removed = list.extract_if(.., |x| *x % 2 == 0).collect::>(); + assert_eq!(removed.len(), 10); + assert_eq!(removed, vec![2, 4, 6, 18, 20, 22, 24, 26, 34, 36]); + + assert_eq!(list.len(), 14); + assert_eq!(list, vec![1, 7, 9, 11, 13, 15, 17, 27, 29, 31, 33, 35, 37, 39]); + } + + { + // [xxx++++++xxxxx++++x+x++] + let mut list = + [2, 4, 6, 7, 9, 11, 13, 15, 17, 18, 20, 22, 24, 26, 27, 29, 31, 33, 34, 35, 36, 37, 39] + .into_iter() + .collect::>(); + + let removed = list.extract_if(.., |x| *x % 2 == 0).collect::>(); + assert_eq!(removed.len(), 10); + assert_eq!(removed, vec![2, 4, 6, 18, 20, 22, 24, 26, 34, 36]); + + assert_eq!(list.len(), 13); + assert_eq!(list, vec![7, 9, 11, 13, 15, 17, 27, 29, 31, 33, 35, 37, 39]); + } + + { + // [xxx++++++xxxxx++++x+x] + let mut list = + [2, 4, 6, 7, 9, 11, 13, 15, 17, 18, 20, 22, 24, 26, 27, 29, 31, 33, 34, 35, 36] + .into_iter() + .collect::>(); + + let removed = list.extract_if(.., |x| *x % 2 == 0).collect::>(); + assert_eq!(removed.len(), 10); + assert_eq!(removed, vec![2, 4, 6, 18, 20, 22, 24, 26, 34, 36]); + + assert_eq!(list.len(), 11); + assert_eq!(list, vec![7, 9, 11, 13, 15, 17, 27, 29, 31, 33, 35]); + } + + { + // [xxxxxxxxxx+++++++++++] + let mut list = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19] + .into_iter() + .collect::>(); + + let removed = list.extract_if(.., |x| *x % 2 == 0).collect::>(); + assert_eq!(removed.len(), 10); + assert_eq!(removed, vec![2, 4, 6, 8, 10, 12, 14, 16, 18, 20]); + + assert_eq!(list.len(), 10); + assert_eq!(list, vec![1, 3, 5, 7, 9, 11, 13, 15, 17, 19]); + } + + { + // [+++++++++++xxxxxxxxxx] + let mut list = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20] + .into_iter() + .collect::>(); + + let removed = list.extract_if(.., |x| *x % 2 == 0).collect::>(); + assert_eq!(removed.len(), 10); + assert_eq!(removed, vec![2, 4, 6, 8, 10, 12, 14, 16, 18, 20]); + + assert_eq!(list.len(), 10); + assert_eq!(list, vec![1, 3, 5, 7, 9, 11, 13, 15, 17, 19]); + } +} + +#[test] +#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] +fn extract_if_drop_panic_leak() { + let d0 = CrashTestDummy::new(0); + let d1 = CrashTestDummy::new(1); + let d2 = CrashTestDummy::new(2); + let d3 = CrashTestDummy::new(3); + let d4 = CrashTestDummy::new(4); + let d5 = CrashTestDummy::new(5); + let d6 = CrashTestDummy::new(6); + let d7 = CrashTestDummy::new(7); + let mut q = VecDeque::new(); + q.push_back(d3.spawn(Panic::Never)); + q.push_back(d4.spawn(Panic::Never)); + q.push_back(d5.spawn(Panic::Never)); + q.push_back(d6.spawn(Panic::Never)); + q.push_back(d7.spawn(Panic::Never)); + q.push_front(d2.spawn(Panic::Never)); + q.push_front(d1.spawn(Panic::InDrop)); + q.push_front(d0.spawn(Panic::Never)); + + catch_unwind(AssertUnwindSafe(|| q.extract_if(.., |_| true).for_each(drop))).unwrap_err(); + + assert_eq!(d0.dropped(), 1); + assert_eq!(d1.dropped(), 1); + assert_eq!(d2.dropped(), 0); + assert_eq!(d3.dropped(), 0); + assert_eq!(d4.dropped(), 0); + assert_eq!(d5.dropped(), 0); + assert_eq!(d6.dropped(), 0); + assert_eq!(d7.dropped(), 0); + drop(q); + assert_eq!(d2.dropped(), 1); + assert_eq!(d3.dropped(), 1); + assert_eq!(d4.dropped(), 1); + assert_eq!(d5.dropped(), 1); + assert_eq!(d6.dropped(), 1); + assert_eq!(d7.dropped(), 1); +} + +#[test] +#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] +fn extract_if_pred_panic_leak() { + struct_with_counted_drop!(D(u32), DROPS); + + let mut q = VecDeque::new(); + q.push_back(D(3)); + q.push_back(D(4)); + q.push_back(D(5)); + q.push_back(D(6)); + q.push_back(D(7)); + q.push_front(D(2)); + q.push_front(D(1)); + q.push_front(D(0)); + + _ = catch_unwind(AssertUnwindSafe(|| { + q.extract_if(.., |item| if item.0 >= 2 { panic!() } else { true }).for_each(drop) + })); + + assert_eq!(DROPS.get(), 2); // 0 and 1 + assert_eq!(q.len(), 6); +} diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index fd54a375f3ea9..786f88c29ef46 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -147,7 +147,9 @@ #![feature(trusted_fused)] #![feature(trusted_len)] #![feature(trusted_random_access)] +#![feature(try_blocks)] #![feature(try_trait_v2)] +#![feature(try_trait_v2_residual)] #![feature(try_with_capacity)] #![feature(tuple_trait)] #![feature(ub_checks)] diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs index 0baae0b314eb2..a24ea6e526c4b 100644 --- a/library/alloc/src/rc.rs +++ b/library/alloc/src/rc.rs @@ -255,6 +255,8 @@ use core::marker::{PhantomData, Unsize}; use core::mem::{self, ManuallyDrop, align_of_val_raw}; use core::num::NonZeroUsize; use core::ops::{CoerceUnsized, Deref, DerefMut, DerefPure, DispatchFromDyn, LegacyReceiver}; +#[cfg(not(no_global_oom_handling))] +use core::ops::{Residual, Try}; use core::panic::{RefUnwindSafe, UnwindSafe}; #[cfg(not(no_global_oom_handling))] use core::pin::Pin; @@ -639,6 +641,93 @@ impl Rc { pub fn pin(value: T) -> Pin> { unsafe { Pin::new_unchecked(Rc::new(value)) } } + + /// Maps the value in an `Rc`, reusing the allocation if possible. + /// + /// `f` is called on a reference to the value in the `Rc`, and the result is returned, also in + /// an `Rc`. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `Rc::map(r, f)` instead of `r.map(f)`. This + /// is so that there is no conflict with a method on the inner type. + /// + /// # Examples + /// + /// ``` + /// #![feature(smart_pointer_try_map)] + /// + /// use std::rc::Rc; + /// + /// let r = Rc::new(7); + /// let new = Rc::map(r, |i| i + 7); + /// assert_eq!(*new, 14); + /// ``` + #[cfg(not(no_global_oom_handling))] + #[unstable(feature = "smart_pointer_try_map", issue = "144419")] + pub fn map(this: Self, f: impl FnOnce(&T) -> U) -> Rc { + if size_of::() == size_of::() + && align_of::() == align_of::() + && Rc::is_unique(&this) + { + unsafe { + let ptr = Rc::into_raw(this); + let value = ptr.read(); + let mut allocation = Rc::from_raw(ptr.cast::>()); + + Rc::get_mut_unchecked(&mut allocation).write(f(&value)); + allocation.assume_init() + } + } else { + Rc::new(f(&*this)) + } + } + + /// Attempts to map the value in an `Rc`, reusing the allocation if possible. + /// + /// `f` is called on a reference to the value in the `Rc`, and if the operation succeeds, the + /// result is returned, also in an `Rc`. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `Rc::try_map(r, f)` instead of `r.try_map(f)`. This + /// is so that there is no conflict with a method on the inner type. + /// + /// # Examples + /// + /// ``` + /// #![feature(smart_pointer_try_map)] + /// + /// use std::rc::Rc; + /// + /// let b = Rc::new(7); + /// let new = Rc::try_map(b, |&i| u32::try_from(i)).unwrap(); + /// assert_eq!(*new, 7); + /// ``` + #[cfg(not(no_global_oom_handling))] + #[unstable(feature = "smart_pointer_try_map", issue = "144419")] + pub fn try_map( + this: Self, + f: impl FnOnce(&T) -> R, + ) -> >>::TryType + where + R: Try, + R::Residual: Residual>, + { + if size_of::() == size_of::() + && align_of::() == align_of::() + && Rc::is_unique(&this) + { + unsafe { + let ptr = Rc::into_raw(this); + let value = ptr.read(); + let mut allocation = Rc::from_raw(ptr.cast::>()); + + Rc::get_mut_unchecked(&mut allocation).write(f(&value)?); + try { allocation.assume_init() } + } + } else { + try { Rc::new(f(&*this)?) } + } + } } impl Rc { @@ -3991,6 +4080,128 @@ impl UniqueRc { pub fn new(value: T) -> Self { Self::new_in(value, Global) } + + /// Maps the value in a `UniqueRc`, reusing the allocation if possible. + /// + /// `f` is called on a reference to the value in the `UniqueRc`, and the result is returned, + /// also in a `UniqueRc`. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `UniqueRc::map(u, f)` instead of `u.map(f)`. This + /// is so that there is no conflict with a method on the inner type. + /// + /// # Examples + /// + /// ``` + /// #![feature(smart_pointer_try_map)] + /// #![feature(unique_rc_arc)] + /// + /// use std::rc::UniqueRc; + /// + /// let r = UniqueRc::new(7); + /// let new = UniqueRc::map(r, |i| i + 7); + /// assert_eq!(*new, 14); + /// ``` + #[cfg(not(no_global_oom_handling))] + #[unstable(feature = "smart_pointer_try_map", issue = "144419")] + pub fn map(this: Self, f: impl FnOnce(T) -> U) -> UniqueRc { + if size_of::() == size_of::() + && align_of::() == align_of::() + && UniqueRc::weak_count(&this) == 0 + { + unsafe { + let ptr = UniqueRc::into_raw(this); + let value = ptr.read(); + let mut allocation = UniqueRc::from_raw(ptr.cast::>()); + + allocation.write(f(value)); + allocation.assume_init() + } + } else { + UniqueRc::new(f(UniqueRc::unwrap(this))) + } + } + + /// Attempts to map the value in a `UniqueRc`, reusing the allocation if possible. + /// + /// `f` is called on a reference to the value in the `UniqueRc`, and if the operation succeeds, + /// the result is returned, also in a `UniqueRc`. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `UniqueRc::try_map(u, f)` instead of `u.try_map(f)`. This + /// is so that there is no conflict with a method on the inner type. + /// + /// # Examples + /// + /// ``` + /// #![feature(smart_pointer_try_map)] + /// #![feature(unique_rc_arc)] + /// + /// use std::rc::UniqueRc; + /// + /// let b = UniqueRc::new(7); + /// let new = UniqueRc::try_map(b, u32::try_from).unwrap(); + /// assert_eq!(*new, 7); + /// ``` + #[cfg(not(no_global_oom_handling))] + #[unstable(feature = "smart_pointer_try_map", issue = "144419")] + pub fn try_map( + this: Self, + f: impl FnOnce(T) -> R, + ) -> >>::TryType + where + R: Try, + R::Residual: Residual>, + { + if size_of::() == size_of::() + && align_of::() == align_of::() + && UniqueRc::weak_count(&this) == 0 + { + unsafe { + let ptr = UniqueRc::into_raw(this); + let value = ptr.read(); + let mut allocation = UniqueRc::from_raw(ptr.cast::>()); + + allocation.write(f(value)?); + try { allocation.assume_init() } + } + } else { + try { UniqueRc::new(f(UniqueRc::unwrap(this))?) } + } + } + + #[cfg(not(no_global_oom_handling))] + fn unwrap(this: Self) -> T { + let this = ManuallyDrop::new(this); + let val: T = unsafe { ptr::read(&**this) }; + + let _weak = Weak { ptr: this.ptr, alloc: Global }; + + val + } +} + +impl UniqueRc { + #[cfg(not(no_global_oom_handling))] + unsafe fn from_raw(ptr: *const T) -> Self { + let offset = unsafe { data_offset(ptr) }; + + // Reverse the offset to find the original RcInner. + let rc_ptr = unsafe { ptr.byte_sub(offset) as *mut RcInner }; + + Self { + ptr: unsafe { NonNull::new_unchecked(rc_ptr) }, + _marker: PhantomData, + _marker2: PhantomData, + alloc: Global, + } + } + + #[cfg(not(no_global_oom_handling))] + fn into_raw(this: Self) -> *const T { + let this = ManuallyDrop::new(this); + Self::as_ptr(&*this) + } } impl UniqueRc { @@ -4041,6 +4252,40 @@ impl UniqueRc { Rc::from_inner_in(this.ptr, alloc) } } + + #[cfg(not(no_global_oom_handling))] + fn weak_count(this: &Self) -> usize { + this.inner().weak() - 1 + } + + #[cfg(not(no_global_oom_handling))] + fn inner(&self) -> &RcInner { + // SAFETY: while this UniqueRc is alive we're guaranteed that the inner pointer is valid. + unsafe { self.ptr.as_ref() } + } + + #[cfg(not(no_global_oom_handling))] + fn as_ptr(this: &Self) -> *const T { + let ptr: *mut RcInner = NonNull::as_ptr(this.ptr); + + // SAFETY: This cannot go through Deref::deref or UniqueRc::inner because + // this is required to retain raw/mut provenance such that e.g. `get_mut` can + // write through the pointer after the Rc is recovered through `from_raw`. + unsafe { &raw mut (*ptr).value } + } + + #[inline] + #[cfg(not(no_global_oom_handling))] + fn into_inner_with_allocator(this: Self) -> (NonNull>, A) { + let this = mem::ManuallyDrop::new(this); + (this.ptr, unsafe { ptr::read(&this.alloc) }) + } + + #[inline] + #[cfg(not(no_global_oom_handling))] + unsafe fn from_inner_in(ptr: NonNull>, alloc: A) -> Self { + Self { ptr, _marker: PhantomData, _marker2: PhantomData, alloc } + } } impl UniqueRc { @@ -4059,6 +4304,14 @@ impl UniqueRc { } } +#[cfg(not(no_global_oom_handling))] +impl UniqueRc, A> { + unsafe fn assume_init(self) -> UniqueRc { + let (ptr, alloc) = UniqueRc::into_inner_with_allocator(self); + unsafe { UniqueRc::from_inner_in(ptr.cast(), alloc) } + } +} + #[unstable(feature = "unique_rc_arc", issue = "112566")] impl Deref for UniqueRc { type Target = T; diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs index c6b85ca5b30b3..13b5cf23e72d8 100644 --- a/library/alloc/src/sync.rs +++ b/library/alloc/src/sync.rs @@ -22,6 +22,8 @@ use core::marker::{PhantomData, Unsize}; use core::mem::{self, ManuallyDrop, align_of_val_raw}; use core::num::NonZeroUsize; use core::ops::{CoerceUnsized, Deref, DerefMut, DerefPure, DispatchFromDyn, LegacyReceiver}; +#[cfg(not(no_global_oom_handling))] +use core::ops::{Residual, Try}; use core::panic::{RefUnwindSafe, UnwindSafe}; use core::pin::{Pin, PinCoerceUnsized}; use core::ptr::{self, NonNull}; @@ -650,6 +652,93 @@ impl Arc { )?)) } } + + /// Maps the value in an `Arc`, reusing the allocation if possible. + /// + /// `f` is called on a reference to the value in the `Arc`, and the result is returned, also in + /// an `Arc`. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `Arc::map(a, f)` instead of `r.map(a)`. This + /// is so that there is no conflict with a method on the inner type. + /// + /// # Examples + /// + /// ``` + /// #![feature(smart_pointer_try_map)] + /// + /// use std::sync::Arc; + /// + /// let r = Arc::new(7); + /// let new = Arc::map(r, |i| i + 7); + /// assert_eq!(*new, 14); + /// ``` + #[cfg(not(no_global_oom_handling))] + #[unstable(feature = "smart_pointer_try_map", issue = "144419")] + pub fn map(this: Self, f: impl FnOnce(&T) -> U) -> Arc { + if size_of::() == size_of::() + && align_of::() == align_of::() + && Arc::is_unique(&this) + { + unsafe { + let ptr = Arc::into_raw(this); + let value = ptr.read(); + let mut allocation = Arc::from_raw(ptr.cast::>()); + + Arc::get_mut_unchecked(&mut allocation).write(f(&value)); + allocation.assume_init() + } + } else { + Arc::new(f(&*this)) + } + } + + /// Attempts to map the value in an `Arc`, reusing the allocation if possible. + /// + /// `f` is called on a reference to the value in the `Arc`, and if the operation succeeds, the + /// result is returned, also in an `Arc`. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `Arc::try_map(a, f)` instead of `a.try_map(f)`. This + /// is so that there is no conflict with a method on the inner type. + /// + /// # Examples + /// + /// ``` + /// #![feature(smart_pointer_try_map)] + /// + /// use std::sync::Arc; + /// + /// let b = Arc::new(7); + /// let new = Arc::try_map(b, |&i| u32::try_from(i)).unwrap(); + /// assert_eq!(*new, 7); + /// ``` + #[cfg(not(no_global_oom_handling))] + #[unstable(feature = "smart_pointer_try_map", issue = "144419")] + pub fn try_map( + this: Self, + f: impl FnOnce(&T) -> R, + ) -> >>::TryType + where + R: Try, + R::Residual: Residual>, + { + if size_of::() == size_of::() + && align_of::() == align_of::() + && Arc::is_unique(&this) + { + unsafe { + let ptr = Arc::into_raw(this); + let value = ptr.read(); + let mut allocation = Arc::from_raw(ptr.cast::>()); + + Arc::get_mut_unchecked(&mut allocation).write(f(&value)?); + try { allocation.assume_init() } + } + } else { + try { Arc::new(f(&*this)?) } + } + } } impl Arc { @@ -4403,6 +4492,128 @@ impl UniqueArc { pub fn new(value: T) -> Self { Self::new_in(value, Global) } + + /// Maps the value in a `UniqueArc`, reusing the allocation if possible. + /// + /// `f` is called on a reference to the value in the `UniqueArc`, and the result is returned, + /// also in a `UniqueArc`. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `UniqueArc::map(u, f)` instead of `u.map(f)`. This + /// is so that there is no conflict with a method on the inner type. + /// + /// # Examples + /// + /// ``` + /// #![feature(smart_pointer_try_map)] + /// #![feature(unique_rc_arc)] + /// + /// use std::sync::UniqueArc; + /// + /// let r = UniqueArc::new(7); + /// let new = UniqueArc::map(r, |i| i + 7); + /// assert_eq!(*new, 14); + /// ``` + #[cfg(not(no_global_oom_handling))] + #[unstable(feature = "smart_pointer_try_map", issue = "144419")] + pub fn map(this: Self, f: impl FnOnce(T) -> U) -> UniqueArc { + if size_of::() == size_of::() + && align_of::() == align_of::() + && UniqueArc::weak_count(&this) == 0 + { + unsafe { + let ptr = UniqueArc::into_raw(this); + let value = ptr.read(); + let mut allocation = UniqueArc::from_raw(ptr.cast::>()); + + allocation.write(f(value)); + allocation.assume_init() + } + } else { + UniqueArc::new(f(UniqueArc::unwrap(this))) + } + } + + /// Attempts to map the value in a `UniqueArc`, reusing the allocation if possible. + /// + /// `f` is called on a reference to the value in the `UniqueArc`, and if the operation succeeds, + /// the result is returned, also in a `UniqueArc`. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `UniqueArc::try_map(u, f)` instead of `u.try_map(f)`. This + /// is so that there is no conflict with a method on the inner type. + /// + /// # Examples + /// + /// ``` + /// #![feature(smart_pointer_try_map)] + /// #![feature(unique_rc_arc)] + /// + /// use std::sync::UniqueArc; + /// + /// let b = UniqueArc::new(7); + /// let new = UniqueArc::try_map(b, u32::try_from).unwrap(); + /// assert_eq!(*new, 7); + /// ``` + #[cfg(not(no_global_oom_handling))] + #[unstable(feature = "smart_pointer_try_map", issue = "144419")] + pub fn try_map( + this: Self, + f: impl FnOnce(T) -> R, + ) -> >>::TryType + where + R: Try, + R::Residual: Residual>, + { + if size_of::() == size_of::() + && align_of::() == align_of::() + && UniqueArc::weak_count(&this) == 0 + { + unsafe { + let ptr = UniqueArc::into_raw(this); + let value = ptr.read(); + let mut allocation = UniqueArc::from_raw(ptr.cast::>()); + + allocation.write(f(value)?); + try { allocation.assume_init() } + } + } else { + try { UniqueArc::new(f(UniqueArc::unwrap(this))?) } + } + } + + #[cfg(not(no_global_oom_handling))] + fn unwrap(this: Self) -> T { + let this = ManuallyDrop::new(this); + let val: T = unsafe { ptr::read(&**this) }; + + let _weak = Weak { ptr: this.ptr, alloc: Global }; + + val + } +} + +impl UniqueArc { + #[cfg(not(no_global_oom_handling))] + unsafe fn from_raw(ptr: *const T) -> Self { + let offset = unsafe { data_offset(ptr) }; + + // Reverse the offset to find the original ArcInner. + let rc_ptr = unsafe { ptr.byte_sub(offset) as *mut ArcInner }; + + Self { + ptr: unsafe { NonNull::new_unchecked(rc_ptr) }, + _marker: PhantomData, + _marker2: PhantomData, + alloc: Global, + } + } + + #[cfg(not(no_global_oom_handling))] + fn into_raw(this: Self) -> *const T { + let this = ManuallyDrop::new(this); + Self::as_ptr(&*this) + } } impl UniqueArc { @@ -4456,6 +4667,40 @@ impl UniqueArc { Arc::from_inner_in(this.ptr, alloc) } } + + #[cfg(not(no_global_oom_handling))] + fn weak_count(this: &Self) -> usize { + this.inner().weak.load(Acquire) - 1 + } + + #[cfg(not(no_global_oom_handling))] + fn inner(&self) -> &ArcInner { + // SAFETY: while this UniqueArc is alive we're guaranteed that the inner pointer is valid. + unsafe { self.ptr.as_ref() } + } + + #[cfg(not(no_global_oom_handling))] + fn as_ptr(this: &Self) -> *const T { + let ptr: *mut ArcInner = NonNull::as_ptr(this.ptr); + + // SAFETY: This cannot go through Deref::deref or UniqueArc::inner because + // this is required to retain raw/mut provenance such that e.g. `get_mut` can + // write through the pointer after the Rc is recovered through `from_raw`. + unsafe { &raw mut (*ptr).data } + } + + #[inline] + #[cfg(not(no_global_oom_handling))] + fn into_inner_with_allocator(this: Self) -> (NonNull>, A) { + let this = mem::ManuallyDrop::new(this); + (this.ptr, unsafe { ptr::read(&this.alloc) }) + } + + #[inline] + #[cfg(not(no_global_oom_handling))] + unsafe fn from_inner_in(ptr: NonNull>, alloc: A) -> Self { + Self { ptr, _marker: PhantomData, _marker2: PhantomData, alloc } + } } impl UniqueArc { @@ -4486,6 +4731,14 @@ impl UniqueArc { } } +#[cfg(not(no_global_oom_handling))] +impl UniqueArc, A> { + unsafe fn assume_init(self) -> UniqueArc { + let (ptr, alloc) = UniqueArc::into_inner_with_allocator(self); + unsafe { UniqueArc::from_inner_in(ptr.cast(), alloc) } + } +} + #[unstable(feature = "unique_rc_arc", issue = "112566")] impl Deref for UniqueArc { type Target = T; diff --git a/library/alloctests/tests/lib.rs b/library/alloctests/tests/lib.rs index f94f92397bb18..c2649be0558a1 100644 --- a/library/alloctests/tests/lib.rs +++ b/library/alloctests/tests/lib.rs @@ -6,6 +6,7 @@ #![feature(char_max_len)] #![feature(cow_is_borrowed)] #![feature(core_intrinsics)] +#![feature(deque_extend_front)] #![feature(downcast_unchecked)] #![feature(exact_size_is_empty)] #![feature(hashmap_internals)] diff --git a/library/alloctests/tests/vec_deque.rs b/library/alloctests/tests/vec_deque.rs index a82906d55e5d0..0a4a0e0cac4d7 100644 --- a/library/alloctests/tests/vec_deque.rs +++ b/library/alloctests/tests/vec_deque.rs @@ -1,3 +1,4 @@ +use core::cell::Cell; use core::num::NonZero; use std::assert_matches::assert_matches; use std::collections::TryReserveErrorKind::*; @@ -1849,3 +1850,234 @@ fn test_truncate_front() { v.truncate_front(5); assert_eq!(v.as_slices(), ([2, 3, 4, 5, 6].as_slice(), [].as_slice())); } + +#[test] +fn test_extend_from_within() { + let mut v = VecDeque::with_capacity(8); + v.extend(0..6); + v.truncate_front(4); + assert_eq!(v, [2, 3, 4, 5]); + v.extend_from_within(1..4); + assert_eq!(v, [2, 3, 4, 5, 3, 4, 5]); + // check it really wrapped + assert_eq!(v.as_slices(), ([2, 3, 4, 5, 3, 4].as_slice(), [5].as_slice())); + v.extend_from_within(1..=2); + assert_eq!(v, [2, 3, 4, 5, 3, 4, 5, 3, 4]); + v.extend_from_within(..3); + assert_eq!(v, [2, 3, 4, 5, 3, 4, 5, 3, 4, 2, 3, 4]); +} + +/// Struct that allows tracking clone and drop calls and can be set to panic on calling clone. +struct CloneTracker<'a> { + id: usize, + // Counters can be set to None if not needed. + clone: Option<&'a Cell>, + drop: Option<&'a Cell>, + panic: bool, +} + +impl<'a> CloneTracker<'a> { + pub const DUMMY: Self = Self { id: 999, clone: None, drop: None, panic: false }; +} + +impl<'a> Clone for CloneTracker<'a> { + fn clone(&self) -> Self { + if self.panic { + panic!(); + } + + if let Some(clone_count) = self.clone { + clone_count.update(|c| c + 1); + } + + Self { id: self.id, clone: self.clone, drop: self.drop, panic: false } + } +} + +impl<'a> Drop for CloneTracker<'a> { + fn drop(&mut self) { + if let Some(drop_count) = self.drop { + drop_count.update(|c| c + 1); + } + } +} + +#[test] +fn test_extend_from_within_clone() { + let clone_counts = [const { Cell::new(0) }; 4]; + let mut v = VecDeque::with_capacity(10); + // insert 2 dummy elements to have the buffer wrap later + v.extend([CloneTracker::DUMMY; 2]); + v.extend(clone_counts.iter().enumerate().map(|(id, clone_count)| CloneTracker { + id, + clone: Some(clone_count), + drop: None, + panic: false, + })); + // remove the dummy elements + v.truncate_front(4); + assert_eq!(v.iter().map(|tr| tr.id).collect::>(), [0, 1, 2, 3]); + + v.extend_from_within(2..); + assert_eq!(v.iter().map(|tr| tr.id).collect::>(), [0, 1, 2, 3, 2, 3]); + // elements at index 2 and 3 should have been cloned once + assert_eq!(clone_counts.each_ref().map(Cell::get), [0, 0, 1, 1]); + // it is important that the deque wraps because of this operation, we want to test if wrapping is handled correctly + v.extend_from_within(1..5); + // total length is 10, 8 in the first part and 2 in the second part + assert_eq!(v.as_slices().0.len(), 8); + assert_eq!(v.iter().map(|tr| tr.id).collect::>(), [0, 1, 2, 3, 2, 3, 1, 2, 3, 2]); + // the new elements are from indices 1, 2, 3 and 2, those elements should have their clone count + // incremented (clone count at index 2 gets incremented twice so ends up at 3) + assert_eq!(clone_counts.each_ref().map(Cell::get), [0, 1, 3, 2]); +} + +#[test] +#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] +fn test_extend_from_within_clone_panic() { + let clone_counts = [const { Cell::new(0) }; 4]; + let drop_count = Cell::new(0); + let mut v = VecDeque::with_capacity(8); + // insert 2 dummy elements to have the buffer wrap later + v.extend([CloneTracker::DUMMY; 2]); + v.extend(clone_counts.iter().enumerate().map(|(id, clone_count)| CloneTracker { + id, + clone: Some(clone_count), + drop: Some(&drop_count), + panic: false, + })); + // remove the dummy elements + v.truncate_front(4); + assert_eq!(v.iter().map(|tr| tr.id).collect::>(), [0, 1, 2, 3]); + + // panic after wrapping + v[2].panic = true; + catch_unwind(AssertUnwindSafe(|| { + v.extend_from_within(..); + })) + .unwrap_err(); + v[2].panic = false; + assert_eq!(v.iter().map(|tr| tr.id).collect::>(), [0, 1, 2, 3, 0, 1]); + // the first 2 elements were cloned + assert_eq!(clone_counts.each_ref().map(Cell::get), [1, 1, 0, 0]); + // nothing should have been dropped + assert_eq!(drop_count.get(), 0); + + v.truncate_front(2); + assert_eq!(drop_count.get(), 4); + assert_eq!(v.iter().map(|tr| tr.id).collect::>(), [0, 1]); + + // panic before wrapping + v[1].panic = true; + catch_unwind(AssertUnwindSafe(|| { + v.extend_from_within(..); + })) + .unwrap_err(); + v[1].panic = false; + assert_eq!(v.iter().map(|tr| tr.id).collect::>(), [0, 1, 0]); + // only the first element was cloned + assert_eq!(clone_counts.each_ref().map(Cell::get), [2, 1, 0, 0]); + // nothing more should have been dropped + assert_eq!(drop_count.get(), 4); +} + +#[test] +fn test_prepend_from_within() { + let mut v = VecDeque::with_capacity(8); + v.extend(0..6); + v.truncate_front(4); + v.prepend_from_within(..=0); + assert_eq!(v.as_slices(), ([2, 2, 3, 4, 5].as_slice(), [].as_slice())); + v.prepend_from_within(2..); + assert_eq!(v.as_slices(), ([3, 4].as_slice(), [5, 2, 2, 3, 4, 5].as_slice())); + v.prepend_from_within(..); + assert_eq!(v, [[3, 4, 5, 2, 2, 3, 4, 5]; 2].as_flattened()); +} + +#[test] +fn test_prepend_from_within_clone() { + let clone_counts = [const { Cell::new(0) }; 4]; + // insert 2 dummy elements to have the buffer wrap later + let mut v = VecDeque::with_capacity(10); + v.extend([CloneTracker::DUMMY; 2]); + v.extend(clone_counts.iter().enumerate().map(|(id, clone_count)| CloneTracker { + id, + clone: Some(clone_count), + drop: None, + panic: false, + })); + // remove the dummy elements + v.truncate_front(4); + assert_eq!(v.iter().map(|tr| tr.id).collect::>(), [0, 1, 2, 3]); + + v.prepend_from_within(..2); + assert_eq!(v.iter().map(|tr| tr.id).collect::>(), [0, 1, 0, 1, 2, 3]); + v.prepend_from_within(1..5); + assert_eq!(v.iter().map(|tr| tr.id).collect::>(), [1, 0, 1, 2, 0, 1, 0, 1, 2, 3]); + // count the number of each element and subtract one (clone should have been called n-1 times if we have n elements) + // example: 0 appears 3 times so should have been cloned twice, 1 appears 4 times so cloned 3 times, etc + assert_eq!(clone_counts.each_ref().map(Cell::get), [2, 3, 1, 0]); +} + +#[test] +#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")] +fn test_prepend_from_within_clone_panic() { + let clone_counts = [const { Cell::new(0) }; 4]; + let drop_count = Cell::new(0); + let mut v = VecDeque::with_capacity(8); + // insert 2 dummy elements to have the buffer wrap later + v.extend([CloneTracker::DUMMY; 2]); + v.extend(clone_counts.iter().enumerate().map(|(id, clone_count)| CloneTracker { + id, + clone: Some(clone_count), + drop: Some(&drop_count), + panic: false, + })); + // remove the dummy elements + v.truncate_front(4); + assert_eq!(v.iter().map(|tr| tr.id).collect::>(), [0, 1, 2, 3]); + + // panic after wrapping + v[1].panic = true; + catch_unwind(AssertUnwindSafe(|| { + v.prepend_from_within(..); + })) + .unwrap_err(); + v[1].panic = false; + assert_eq!(v.iter().map(|tr| tr.id).collect::>(), [2, 3, 0, 1, 2, 3]); + // the last 2 elements were cloned + assert_eq!(clone_counts.each_ref().map(Cell::get), [0, 0, 1, 1]); + // nothing should have been dropped + assert_eq!(drop_count.get(), 0); + + v.truncate_front(2); + assert_eq!(drop_count.get(), 4); + assert_eq!(v.iter().map(|tr| tr.id).collect::>(), [2, 3]); + + // panic before wrapping + v[0].panic = true; + catch_unwind(AssertUnwindSafe(|| { + v.prepend_from_within(..); + })) + .unwrap_err(); + v[0].panic = false; + assert_eq!(v.iter().map(|tr| tr.id).collect::>(), [3, 2, 3]); + // only the first element was cloned + assert_eq!(clone_counts.each_ref().map(Cell::get), [0, 0, 1, 2]); + // nothing more should have been dropped + assert_eq!(drop_count.get(), 4); +} + +#[test] +fn test_extend_and_prepend_from_within() { + let mut v = ('0'..='9').map(String::from).collect::>(); + v.truncate_front(5); + v.extend_from_within(4..); + v.prepend_from_within(..2); + assert_eq!(v.iter().map(|s| &**s).collect::(), "56567899"); + v.clear(); + v.extend(['1', '2', '3'].map(String::from)); + v.prepend_from_within(..); + v.extend_from_within(..); + assert_eq!(v.iter().map(|s| &**s).collect::(), "123123123123"); +} diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs index c3460a6409069..509d7e335fb23 100644 --- a/library/core/src/num/int_macros.rs +++ b/library/core/src/num/int_macros.rs @@ -2705,7 +2705,7 @@ macro_rules! int_impl { Self::carrying_mul_add(self, rhs, carry, 0) } - /// Calculates the "full multiplication" `self * rhs + carry1 + carry2` + /// Calculates the "full multiplication" `self * rhs + carry + add` /// without the possibility to overflow. /// /// This returns the low-order (wrapping) bits and the high-order (overflow) bits diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index b5b768cf677aa..793d84d5139cc 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -3000,7 +3000,7 @@ macro_rules! uint_impl { Self::carrying_mul_add(self, rhs, carry, 0) } - /// Calculates the "full multiplication" `self * rhs + carry1 + carry2`. + /// Calculates the "full multiplication" `self * rhs + carry + add`. /// /// This returns the low-order (wrapping) bits and the high-order (overflow) bits /// of the result as two separate values, in that order. diff --git a/library/std/src/fs/tests.rs b/library/std/src/fs/tests.rs index 4d67ba9248998..0517760c35501 100644 --- a/library/std/src/fs/tests.rs +++ b/library/std/src/fs/tests.rs @@ -5,6 +5,7 @@ use rand::RngCore; target_os = "freebsd", target_os = "linux", target_os = "netbsd", + target_os = "illumos", target_vendor = "apple", ))] use crate::assert_matches::assert_matches; @@ -14,6 +15,7 @@ use crate::char::MAX_LEN_UTF8; target_os = "freebsd", target_os = "linux", target_os = "netbsd", + target_os = "illumos", target_vendor = "apple", ))] use crate::fs::TryLockError; @@ -227,6 +229,7 @@ fn file_test_io_seek_and_write() { target_os = "linux", target_os = "netbsd", target_os = "solaris", + target_os = "illumos", target_vendor = "apple", ))] fn file_lock_multiple_shared() { @@ -251,6 +254,7 @@ fn file_lock_multiple_shared() { target_os = "linux", target_os = "netbsd", target_os = "solaris", + target_os = "illumos", target_vendor = "apple", ))] fn file_lock_blocking() { @@ -276,6 +280,7 @@ fn file_lock_blocking() { target_os = "linux", target_os = "netbsd", target_os = "solaris", + target_os = "illumos", target_vendor = "apple", ))] fn file_lock_drop() { @@ -298,6 +303,7 @@ fn file_lock_drop() { target_os = "linux", target_os = "netbsd", target_os = "solaris", + target_os = "illumos", target_vendor = "apple", ))] fn file_lock_dup() { diff --git a/library/std/src/sys/fs/unix.rs b/library/std/src/sys/fs/unix.rs index d9a7fcb0e2d39..3efe67390d7dd 100644 --- a/library/std/src/sys/fs/unix.rs +++ b/library/std/src/sys/fs/unix.rs @@ -1292,6 +1292,7 @@ impl File { target_os = "netbsd", target_os = "openbsd", target_os = "cygwin", + target_os = "illumos", target_vendor = "apple", ))] pub fn lock(&self) -> io::Result<()> { @@ -1316,6 +1317,7 @@ impl File { target_os = "openbsd", target_os = "cygwin", target_os = "solaris", + target_os = "illumos", target_vendor = "apple", )))] pub fn lock(&self) -> io::Result<()> { @@ -1329,6 +1331,7 @@ impl File { target_os = "netbsd", target_os = "openbsd", target_os = "cygwin", + target_os = "illumos", target_vendor = "apple", ))] pub fn lock_shared(&self) -> io::Result<()> { @@ -1353,6 +1356,7 @@ impl File { target_os = "openbsd", target_os = "cygwin", target_os = "solaris", + target_os = "illumos", target_vendor = "apple", )))] pub fn lock_shared(&self) -> io::Result<()> { @@ -1366,6 +1370,7 @@ impl File { target_os = "netbsd", target_os = "openbsd", target_os = "cygwin", + target_os = "illumos", target_vendor = "apple", ))] pub fn try_lock(&self) -> Result<(), TryLockError> { @@ -1406,6 +1411,7 @@ impl File { target_os = "openbsd", target_os = "cygwin", target_os = "solaris", + target_os = "illumos", target_vendor = "apple", )))] pub fn try_lock(&self) -> Result<(), TryLockError> { @@ -1422,6 +1428,7 @@ impl File { target_os = "netbsd", target_os = "openbsd", target_os = "cygwin", + target_os = "illumos", target_vendor = "apple", ))] pub fn try_lock_shared(&self) -> Result<(), TryLockError> { @@ -1462,6 +1469,7 @@ impl File { target_os = "openbsd", target_os = "cygwin", target_os = "solaris", + target_os = "illumos", target_vendor = "apple", )))] pub fn try_lock_shared(&self) -> Result<(), TryLockError> { @@ -1478,6 +1486,7 @@ impl File { target_os = "netbsd", target_os = "openbsd", target_os = "cygwin", + target_os = "illumos", target_vendor = "apple", ))] pub fn unlock(&self) -> io::Result<()> { @@ -1502,6 +1511,7 @@ impl File { target_os = "openbsd", target_os = "cygwin", target_os = "solaris", + target_os = "illumos", target_vendor = "apple", )))] pub fn unlock(&self) -> io::Result<()> { diff --git a/src/bootstrap/src/core/sanity.rs b/src/bootstrap/src/core/sanity.rs index 08e5abc0a03b1..ed63b2aae452c 100644 --- a/src/bootstrap/src/core/sanity.rs +++ b/src/bootstrap/src/core/sanity.rs @@ -33,6 +33,11 @@ pub struct Finder { // // Targets can be removed from this list once they are present in the stage0 compiler (usually by updating the beta compiler of the bootstrap). const STAGE0_MISSING_TARGETS: &[&str] = &[ + "aarch64-unknown-helenos", + "i686-unknown-helenos", + "x86_64-unknown-helenos", + "powerpc-unknown-helenos", + "sparc64-unknown-helenos", // just a dummy comment so the list doesn't get onelined "riscv64gc-unknown-redox", ]; diff --git a/src/doc/rustc/src/SUMMARY.md b/src/doc/rustc/src/SUMMARY.md index 6622ef2cf82f0..e4623a2a87f48 100644 --- a/src/doc/rustc/src/SUMMARY.md +++ b/src/doc/rustc/src/SUMMARY.md @@ -120,6 +120,7 @@ - [solaris](platform-support/solaris.md) - [\*-nto-qnx-\*](platform-support/nto-qnx.md) - [\*-unikraft-linux-musl](platform-support/unikraft-linux-musl.md) + - [\*-unknown-helenos](platform-support/helenos.md) - [\*-unknown-hermit](platform-support/hermit.md) - [\*-unknown-freebsd](platform-support/freebsd.md) - [\*-unknown-managarm-mlibc](platform-support/managarm.md) diff --git a/src/doc/rustc/src/codegen-options/index.md b/src/doc/rustc/src/codegen-options/index.md index 0e340de4daa27..bdd490058e32b 100644 --- a/src/doc/rustc/src/codegen-options/index.md +++ b/src/doc/rustc/src/codegen-options/index.md @@ -209,6 +209,27 @@ Note that while the `-C instrument-coverage` option is stable, the profile data format produced by the resulting instrumentation may change, and may not work with coverage tools other than those built and shipped with the compiler. +## jump-tables + +This option is used to allow or prevent the LLVM codegen backend from creating +jump tables when lowering switches from Rust code. + +* `y`, `yes`, `on`, `true` or no value: allow jump tables (the default). +* `n`, `no`, `off` or `false`: disable jump tables. + +To prevent jump tables being created from Rust code, a target must ensure +all crates are compiled with jump tables disabled. + +Note, in many cases the Rust toolchain is distributed with precompiled +crates, such as the core and std crates, which could possibly include +jump tables. Furthermore, this option does not guarantee a target will +be free of jump tables. They could arise from external dependencies, +inline asm, or other complicated interactions when using crates which +are compiled with jump table support. + +Disabling jump tables can be used to help provide protection against +jump-oriented-programming (JOP) attacks. + ## link-arg This flag lets you append a single extra argument to the linker invocation. diff --git a/src/doc/rustc/src/platform-support.md b/src/doc/rustc/src/platform-support.md index 99739ee734e4c..0d91095fbbb92 100644 --- a/src/doc/rustc/src/platform-support.md +++ b/src/doc/rustc/src/platform-support.md @@ -255,6 +255,7 @@ target | std | host | notes [`aarch64-kmc-solid_asp3`](platform-support/kmc-solid.md) | ✓ | | ARM64 SOLID with TOPPERS/ASP3 [`aarch64-nintendo-switch-freestanding`](platform-support/aarch64-nintendo-switch-freestanding.md) | * | | ARM64 Nintendo Switch, Horizon [`aarch64-unknown-freebsd`](platform-support/freebsd.md) | ✓ | ✓ | ARM64 FreeBSD +[`aarch64-unknown-helenos`](platform-support/helenos.md) | ✓ | | ARM64 HelenOS [`aarch64-unknown-hermit`](platform-support/hermit.md) | ✓ | | ARM64 Hermit [`aarch64-unknown-illumos`](platform-support/illumos.md) | ✓ | ✓ | ARM64 illumos `aarch64-unknown-linux-gnu_ilp32` | ✓ | ✓ | ARM64 Linux (ILP32 ABI) @@ -320,6 +321,7 @@ target | std | host | notes [`i686-apple-darwin`](platform-support/apple-darwin.md) | ✓ | ✓ | 32-bit macOS (10.12+, Sierra+, Penryn) [^x86_32-floats-return-ABI] [`i686-pc-nto-qnx700`](platform-support/nto-qnx.md) | * | | 32-bit x86 QNX Neutrino 7.0 RTOS (Pentium 4) [^x86_32-floats-return-ABI] `i686-unknown-haiku` | ✓ | ✓ | 32-bit Haiku (Pentium 4) [^x86_32-floats-return-ABI] +[`i686-unknown-helenos`](platform-support/helenos.md) | ✓ | | HelenOS IA-32 (see docs for pending issues) [`i686-unknown-hurd-gnu`](platform-support/hurd.md) | ✓ | ✓ | 32-bit GNU/Hurd (Pentium 4) [^x86_32-floats-return-ABI] [`i686-unknown-netbsd`](platform-support/netbsd.md) | ✓ | ✓ | NetBSD/i386 (Pentium 4) [^x86_32-floats-return-ABI] [`i686-unknown-openbsd`](platform-support/openbsd.md) | ✓ | ✓ | 32-bit OpenBSD (Pentium 4) [^x86_32-floats-return-ABI] @@ -356,6 +358,7 @@ target | std | host | notes [`mipsisa64r6el-unknown-linux-gnuabi64`](platform-support/mips-release-6.md) | ✓ | ✓ | 64-bit MIPS Release 6 Little Endian `msp430-none-elf` | * | | 16-bit MSP430 microcontrollers [`powerpc-unknown-freebsd`](platform-support/freebsd.md) | ? | | PowerPC FreeBSD +[`powerpc-unknown-helenos`](platform-support/helenos.md) | ✓ | | PowerPC HelenOS [`powerpc-unknown-linux-gnuspe`](platform-support/powerpc-unknown-linux-gnuspe.md) | ✓ | | PowerPC SPE Linux `powerpc-unknown-linux-musl` | ? | | PowerPC Linux with musl 1.2.3 [`powerpc-unknown-linux-muslspe`](platform-support/powerpc-unknown-linux-muslspe.md) | ? | | PowerPC SPE Linux with musl 1.2.3 @@ -399,6 +402,7 @@ target | std | host | notes [`s390x-unknown-linux-musl`](platform-support/s390x-unknown-linux-musl.md) | ✓ | | S390x Linux (kernel 3.2, musl 1.2.3) `sparc-unknown-linux-gnu` | ✓ | | 32-bit SPARC Linux [`sparc-unknown-none-elf`](./platform-support/sparc-unknown-none-elf.md) | * | | Bare 32-bit SPARC V7+ +[`sparc64-unknown-helenos`](platform-support/helenos.md) | ✓ | | sparc64 HelenOS [`sparc64-unknown-netbsd`](platform-support/netbsd.md) | ✓ | ✓ | NetBSD/sparc64 [`sparc64-unknown-openbsd`](platform-support/openbsd.md) | ✓ | ✓ | OpenBSD/sparc64 [`thumbv4t-none-eabi`](platform-support/armv4t-none-eabi.md) | * | | Thumb-mode Bare Armv4T @@ -429,6 +433,7 @@ target | std | host | notes `x86_64-unknown-dragonfly` | ✓ | ✓ | 64-bit DragonFlyBSD `x86_64-unknown-haiku` | ✓ | ✓ | 64-bit Haiku [`x86_64-unknown-hermit`](platform-support/hermit.md) | ✓ | | x86_64 Hermit +[`x86_64-unknown-helenos`](platform-support/helenos.md) | ✓ | | x86_64 (amd64) HelenOS [`x86_64-unknown-hurd-gnu`](platform-support/hurd.md) | ✓ | ✓ | 64-bit GNU/Hurd `x86_64-unknown-l4re-uclibc` | ? | | [`x86_64-unknown-linux-none`](platform-support/x86_64-unknown-linux-none.md) | * | | 64-bit Linux with no libc diff --git a/src/doc/rustc/src/platform-support/helenos.md b/src/doc/rustc/src/platform-support/helenos.md new file mode 100644 index 0000000000000..0dce4f6928ffb --- /dev/null +++ b/src/doc/rustc/src/platform-support/helenos.md @@ -0,0 +1,68 @@ +# `*-unknown-helenos` + +**Tier: 3** + +Targets for [HelenOS](https://www.helenos.org). +These targets allow compiling user-space applications, that you can then copy into your HelenOS ISO image to run them. + +Target triplets available: + +- `x86_64-unknown-helenos` +- `sparc64-unknown-helenos` +- `powerpc-unknown-helenos` +- `aarch64-unknown-helenos` +- `i686-unknown-helenos`* + + +On i686, some portions of native HelenOS libraries run into issues due to vector instructions accessing variables from the stack that seems +to be misaligned. It is not clear if this is fault of HelenOS or Rust. Most programs work, but for example calling `ui_window_create` from HelenOS +libui does not work. + +## Target maintainers + +- Matěj Volf ([@mvolfik](https://github.com/mvolfik)) + +## Requirements + +These targets only support cross-compilation. The targets will[^helenos-libstd-pending] support libstd, although support of some platform features (filesystem, networking) may be limited. + +You need to have a local clone of the HelenOS repository and the HelenOS toolchain set up, no HelenOS-Rust development artifacts are available. + +[^helenos-libstd-pending]: libstd is not yet available, because it needs to be done in a separate PR, because compiler support needs to be merged first to allow creating libc bindings + +## Building + +If you want to avoid the full setup, fully automated Docker-based build system is available at https://github.com/mvolfik/helenos-rust-autobuild + +### HelenOS toolchain setup + +For compilation of standard library, you need to build the HelenOS toolchain (because Rust needs to use `*-helenos-gcc` as linker) and its libraries (libc and a few others). See [this HelenOS wiki page](https://www.helenos.org/wiki/UsersGuide/CompilingFromSource#a2.Buildasupportedcross-compiler) for instruction on setting up the build. At the end of step 4 (_Configure and build_), after `ninja image_path`, invoke `ninja export-dev` to build the shared libraries. + +Copy the libraries to the path where the compiler automatically searches for them. This will be the directory where you installed the toolchain (for example `~/.local/share/HelenOS/cross/i686-helenos/lib`). In the folder where you built HelenOS, you can run these commands: + +```sh +touch /tmp/test.c +HELENOS_LIB_PATH="$(realpath "$(amd64-helenos-gcc -v -c /tmp/test.c 2>&1 | grep LIBRARY_PATH | cut -d= -f2 | cut -d: -f2)")" +# use sparc64-helenos-gcc above for the SPARC toolchain, etc +cp -P export-dev/lib/* "$HELENOS_LIB_PATH" +``` + +### Building the target + +When you have the HelenOS toolchain set up and installed in your path, you can build the Rust toolchain using the standard procedure. See [rustc dev guide](https://rustc-dev-guide.rust-lang.org/building/how-to-build-and-run.html). + +In the most simple case, this means that you can run `./x build library --stage 1 --target x86_64-unknown-linux-gnu,-unknown-helenos` (the first target triple should be your host machine, adjust accordingly). Then run `rustup toolchain link mytoolchain build/host/stage1` to allow using your toolchain for building Rust programs. + +### Building Rust programs + +If you linked the toolchain above as `mytoolchain`, run `cargo +mytoolchain build --target -unknown-helenos`. + +## Testing + +After you build a Rust program for HelenOS, you can put it into the `dist` directory of the HelenOS build, build the ISO image, and then run it either in an emulator, or on real hardware. See HelenOS wiki for further instructions on running the OS. + +Running the Rust testsuite has not been attempted yet due to missing host tools (thus the test suite can't be run natively) and insufficient networking support (thus we can't use the `remote-test-server` tool). + +## Cross-compilation toolchains and C code + +You should be able to cross-compile and link any needed C code using `-helenos-gcc` that you built above. However, note that clang support is highly lacking. Therefore, to run tools such as `bindgen`, you will need to provide flag `-nostdinc` and manually specify the include paths to HelenOS headers, which you will find in the `export-dev` folder + in the cross-compilation toolchain (e.g. `~/.local/share/HelenOS/cross/lib/gcc/i686-helenos/14.2.0/include`). You can see an example of proper build.rs at https://github.com/mvolfik/helenos-ui-rs/blob/master/build.rs diff --git a/src/doc/unstable-book/src/compiler-flags/no-jump-tables.md b/src/doc/unstable-book/src/compiler-flags/no-jump-tables.md deleted file mode 100644 index f096c20f4bd54..0000000000000 --- a/src/doc/unstable-book/src/compiler-flags/no-jump-tables.md +++ /dev/null @@ -1,19 +0,0 @@ -# `no-jump-tables` - -The tracking issue for this feature is [#116592](https://github.com/rust-lang/rust/issues/116592) - ---- - -This option enables the `-fno-jump-tables` flag for LLVM, which makes the -codegen backend avoid generating jump tables when lowering switches. - -This option adds the LLVM `no-jump-tables=true` attribute to every function. - -The option can be used to help provide protection against -jump-oriented-programming (JOP) attacks, such as with the linux kernel's [IBT]. - -```sh -RUSTFLAGS="-Zno-jump-tables" cargo +nightly build -Z build-std -``` - -[IBT]: https://www.phoronix.com/news/Linux-IBT-By-Default-Tip diff --git a/tests/assembly-llvm/targets/targets-elf.rs b/tests/assembly-llvm/targets/targets-elf.rs index 6c85dbcfed1a5..d2f22fd8d1a50 100644 --- a/tests/assembly-llvm/targets/targets-elf.rs +++ b/tests/assembly-llvm/targets/targets-elf.rs @@ -34,6 +34,9 @@ //@ revisions: aarch64_unknown_fuchsia //@ [aarch64_unknown_fuchsia] compile-flags: --target aarch64-unknown-fuchsia //@ [aarch64_unknown_fuchsia] needs-llvm-components: aarch64 +//@ revisions: aarch64_unknown_helenos +//@ [aarch64_unknown_helenos] compile-flags: --target aarch64-unknown-helenos +//@ [aarch64_unknown_helenos] needs-llvm-components: aarch64 //@ revisions: aarch64_unknown_hermit //@ [aarch64_unknown_hermit] compile-flags: --target aarch64-unknown-hermit //@ [aarch64_unknown_hermit] needs-llvm-components: aarch64 @@ -256,6 +259,9 @@ //@ revisions: i686_unknown_haiku //@ [i686_unknown_haiku] compile-flags: --target i686-unknown-haiku //@ [i686_unknown_haiku] needs-llvm-components: x86 +//@ revisions: i686_unknown_helenos +//@ [i686_unknown_helenos] compile-flags: --target i686-unknown-helenos +//@ [i686_unknown_helenos] needs-llvm-components: x86 //@ revisions: i686_unknown_hurd_gnu //@ [i686_unknown_hurd_gnu] compile-flags: --target i686-unknown-hurd-gnu //@ [i686_unknown_hurd_gnu] needs-llvm-components: x86 @@ -394,6 +400,9 @@ //@ revisions: powerpc_unknown_freebsd //@ [powerpc_unknown_freebsd] compile-flags: --target powerpc-unknown-freebsd //@ [powerpc_unknown_freebsd] needs-llvm-components: powerpc +//@ revisions: powerpc_unknown_helenos +//@ [powerpc_unknown_helenos] compile-flags: --target powerpc-unknown-helenos +//@ [powerpc_unknown_helenos] needs-llvm-components: powerpc //@ revisions: powerpc_unknown_linux_gnu //@ [powerpc_unknown_linux_gnu] compile-flags: --target powerpc-unknown-linux-gnu //@ [powerpc_unknown_linux_gnu] needs-llvm-components: powerpc @@ -517,6 +526,9 @@ //@ revisions: s390x_unknown_linux_musl //@ [s390x_unknown_linux_musl] compile-flags: --target s390x-unknown-linux-musl //@ [s390x_unknown_linux_musl] needs-llvm-components: systemz +//@ revisions: sparc64_unknown_helenos +//@ [sparc64_unknown_helenos] compile-flags: --target sparc64-unknown-helenos +//@ [sparc64_unknown_helenos] needs-llvm-components: sparc //@ revisions: sparc64_unknown_linux_gnu //@ [sparc64_unknown_linux_gnu] compile-flags: --target sparc64-unknown-linux-gnu //@ [sparc64_unknown_linux_gnu] needs-llvm-components: sparc @@ -634,6 +646,9 @@ //@ revisions: x86_64_unknown_haiku //@ [x86_64_unknown_haiku] compile-flags: --target x86_64-unknown-haiku //@ [x86_64_unknown_haiku] needs-llvm-components: x86 +//@ revisions: x86_64_unknown_helenos +//@ [x86_64_unknown_helenos] compile-flags: --target x86_64-unknown-helenos +//@ [x86_64_unknown_helenos] needs-llvm-components: x86 //@ revisions: x86_64_unknown_hurd_gnu //@ [x86_64_unknown_hurd_gnu] compile-flags: --target x86_64-unknown-hurd-gnu //@ [x86_64_unknown_hurd_gnu] needs-llvm-components: x86 diff --git a/tests/assembly-llvm/x86_64-no-jump-tables.rs b/tests/assembly-llvm/x86_64-no-jump-tables.rs index bb10042d8f629..e469aee7ed945 100644 --- a/tests/assembly-llvm/x86_64-no-jump-tables.rs +++ b/tests/assembly-llvm/x86_64-no-jump-tables.rs @@ -1,10 +1,10 @@ -// Test that jump tables are (not) emitted when the `-Zno-jump-tables` +// Test that jump tables are (not) emitted when the `-Cjump-tables=no` // flag is (not) set. //@ revisions: unset set //@ assembly-output: emit-asm //@ compile-flags: -Copt-level=3 -//@ [set] compile-flags: -Zno-jump-tables +//@ [set] compile-flags: -Cjump-tables=no //@ only-x86_64 //@ ignore-sgx diff --git a/tests/codegen-llvm/no-jump-tables.rs b/tests/codegen-llvm/no-jump-tables.rs index e49de7e9dc1e9..92f9c1f204a64 100644 --- a/tests/codegen-llvm/no-jump-tables.rs +++ b/tests/codegen-llvm/no-jump-tables.rs @@ -1,11 +1,12 @@ // Test that the `no-jump-tables` function attribute are (not) emitted when -// the `-Zno-jump-tables` flag is (not) set. +// the `-Cjump-tables=no` flag is (not) set. //@ add-core-stubs -//@ revisions: unset set +//@ revisions: unset set_no set_yes //@ needs-llvm-components: x86 //@ compile-flags: --target x86_64-unknown-linux-gnu -//@ [set] compile-flags: -Zno-jump-tables +//@ [set_no] compile-flags: -Cjump-tables=no +//@ [set_yes] compile-flags: -Cjump-tables=yes #![crate_type = "lib"] #![feature(no_core, lang_items)] @@ -19,5 +20,6 @@ pub fn foo() { // CHECK: @foo() unnamed_addr #0 // unset-NOT: attributes #0 = { {{.*}}"no-jump-tables"="true"{{.*}} } - // set: attributes #0 = { {{.*}}"no-jump-tables"="true"{{.*}} } + // set_yes-NOT: attributes #0 = { {{.*}}"no-jump-tables"="true"{{.*}} } + // set_no: attributes #0 = { {{.*}}"no-jump-tables"="true"{{.*}} } } diff --git a/tests/ui/check-cfg/cfg-crate-features.stderr b/tests/ui/check-cfg/cfg-crate-features.stderr index 39fee52a909b6..38301f470bf73 100644 --- a/tests/ui/check-cfg/cfg-crate-features.stderr +++ b/tests/ui/check-cfg/cfg-crate-features.stderr @@ -24,7 +24,7 @@ warning: unexpected `cfg` condition value: `does_not_exist` LL | #![cfg(not(target(os = "does_not_exist")))] | ^^^^^^^^^^^^^^^^^^^^^ | - = note: expected values for `target_os` are: `aix`, `amdhsa`, `android`, `cuda`, `cygwin`, `dragonfly`, `emscripten`, `espidf`, `freebsd`, `fuchsia`, `haiku`, `hermit`, `horizon`, `hurd`, `illumos`, `ios`, `l4re`, `linux`, `lynxos178`, `macos`, `managarm`, `motor`, `netbsd`, `none`, `nto`, `nuttx`, `openbsd`, `psp`, `psx`, `redox`, `rtems`, `solaris`, `solid_asp3`, `teeos`, and `trusty` and 12 more + = note: expected values for `target_os` are: `aix`, `amdhsa`, `android`, `cuda`, `cygwin`, `dragonfly`, `emscripten`, `espidf`, `freebsd`, `fuchsia`, `haiku`, `helenos`, `hermit`, `horizon`, `hurd`, `illumos`, `ios`, `l4re`, `linux`, `lynxos178`, `macos`, `managarm`, `motor`, `netbsd`, `none`, `nto`, `nuttx`, `openbsd`, `psp`, `psx`, `redox`, `rtems`, `solaris`, `solid_asp3`, and `teeos` and 13 more = note: see for more information about checking conditional configuration = note: `#[warn(unexpected_cfgs)]` on by default diff --git a/tests/ui/check-cfg/well-known-values.stderr b/tests/ui/check-cfg/well-known-values.stderr index 3f14c7b08eac6..8205756d64dd1 100644 --- a/tests/ui/check-cfg/well-known-values.stderr +++ b/tests/ui/check-cfg/well-known-values.stderr @@ -201,7 +201,7 @@ warning: unexpected `cfg` condition value: `_UNEXPECTED_VALUE` LL | target_os = "_UNEXPECTED_VALUE", | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | - = note: expected values for `target_os` are: `aix`, `amdhsa`, `android`, `cuda`, `cygwin`, `dragonfly`, `emscripten`, `espidf`, `freebsd`, `fuchsia`, `haiku`, `hermit`, `horizon`, `hurd`, `illumos`, `ios`, `l4re`, `linux`, `lynxos178`, `macos`, `managarm`, `motor`, `netbsd`, `none`, `nto`, `nuttx`, `openbsd`, `psp`, `psx`, `redox`, `rtems`, `solaris`, `solid_asp3`, `teeos`, `trusty`, `tvos`, `uefi`, `unknown`, `vexos`, `visionos`, `vita`, `vxworks`, `wasi`, `watchos`, `windows`, `xous`, and `zkvm` + = note: expected values for `target_os` are: `aix`, `amdhsa`, `android`, `cuda`, `cygwin`, `dragonfly`, `emscripten`, `espidf`, `freebsd`, `fuchsia`, `haiku`, `helenos`, `hermit`, `horizon`, `hurd`, `illumos`, `ios`, `l4re`, `linux`, `lynxos178`, `macos`, `managarm`, `motor`, `netbsd`, `none`, `nto`, `nuttx`, `openbsd`, `psp`, `psx`, `redox`, `rtems`, `solaris`, `solid_asp3`, `teeos`, `trusty`, `tvos`, `uefi`, `unknown`, `vexos`, `visionos`, `vita`, `vxworks`, `wasi`, `watchos`, `windows`, `xous`, and `zkvm` = note: see for more information about checking conditional configuration warning: unexpected `cfg` condition value: `_UNEXPECTED_VALUE` @@ -274,7 +274,7 @@ LL | #[cfg(target_os = "linuz")] // testing that we suggest `linux` | | | help: there is a expected value with a similar name: `"linux"` | - = note: expected values for `target_os` are: `aix`, `amdhsa`, `android`, `cuda`, `cygwin`, `dragonfly`, `emscripten`, `espidf`, `freebsd`, `fuchsia`, `haiku`, `hermit`, `horizon`, `hurd`, `illumos`, `ios`, `l4re`, `linux`, `lynxos178`, `macos`, `managarm`, `motor`, `netbsd`, `none`, `nto`, `nuttx`, `openbsd`, `psp`, `psx`, `redox`, `rtems`, `solaris`, `solid_asp3`, `teeos`, `trusty`, `tvos`, `uefi`, `unknown`, `vexos`, `visionos`, `vita`, `vxworks`, `wasi`, `watchos`, `windows`, `xous`, and `zkvm` + = note: expected values for `target_os` are: `aix`, `amdhsa`, `android`, `cuda`, `cygwin`, `dragonfly`, `emscripten`, `espidf`, `freebsd`, `fuchsia`, `haiku`, `helenos`, `hermit`, `horizon`, `hurd`, `illumos`, `ios`, `l4re`, `linux`, `lynxos178`, `macos`, `managarm`, `motor`, `netbsd`, `none`, `nto`, `nuttx`, `openbsd`, `psp`, `psx`, `redox`, `rtems`, `solaris`, `solid_asp3`, `teeos`, `trusty`, `tvos`, `uefi`, `unknown`, `vexos`, `visionos`, `vita`, `vxworks`, `wasi`, `watchos`, `windows`, `xous`, and `zkvm` = note: see for more information about checking conditional configuration warning: 28 warnings emitted diff --git a/tests/ui/parallel-rustc/ty-variance-issue-124423.stderr b/tests/ui/parallel-rustc/ty-variance-issue-124423.stderr index 7ba89f75bd1b5..81ba66c42faa3 100644 --- a/tests/ui/parallel-rustc/ty-variance-issue-124423.stderr +++ b/tests/ui/parallel-rustc/ty-variance-issue-124423.stderr @@ -278,10 +278,9 @@ note: if you're trying to build a new `Box<_, _>` consider using one of the foll Box::::new_uninit Box::::new_zeroed Box::::try_new - and 22 others --> $SRC_DIR/alloc/src/boxed.rs:LL:COL error: aborting due to 30 previous errors Some errors have detailed explanations: E0121, E0224, E0261, E0412, E0599. -For more information about an error, try `rustc --explain E0121`. +For more information about an error, try `rustc --explain E0121`. \ No newline at end of file diff --git a/tests/ui/parallel-rustc/ty-variance-issue-127971.stderr b/tests/ui/parallel-rustc/ty-variance-issue-127971.stderr index 9929d3ee22ced..55d52d35f4a8d 100644 --- a/tests/ui/parallel-rustc/ty-variance-issue-127971.stderr +++ b/tests/ui/parallel-rustc/ty-variance-issue-127971.stderr @@ -104,10 +104,9 @@ note: if you're trying to build a new `Box<_, _>` consider using one of the foll Box::::new_uninit Box::::new_zeroed Box::::try_new - and 22 others --> $SRC_DIR/alloc/src/boxed.rs:LL:COL error: aborting due to 11 previous errors Some errors have detailed explanations: E0121, E0224, E0261, E0599. -For more information about an error, try `rustc --explain E0121`. +For more information about an error, try `rustc --explain E0121`. \ No newline at end of file diff --git a/tests/ui/privacy/suggest-box-new.stderr b/tests/ui/privacy/suggest-box-new.stderr index 37b2989dcc148..7df293e1e1d94 100644 --- a/tests/ui/privacy/suggest-box-new.stderr +++ b/tests/ui/privacy/suggest-box-new.stderr @@ -63,7 +63,7 @@ LL - x: (), LL - })), LL + wtf: Some(Box::new_in(_, _)), | - = and 12 other candidates + = and 13 other candidates help: consider using the `Default` trait | LL - wtf: Some(Box(U { @@ -118,7 +118,7 @@ LL + let _ = Box::new_zeroed(); LL - let _ = Box {}; LL + let _ = Box::new_in(_, _); | - = and 13 other candidates + = and 14 other candidates help: consider using the `Default` trait | LL - let _ = Box {}; @@ -141,12 +141,12 @@ LL - let _ = Box:: {}; LL + let _ = Box::::new_in(_, _); | LL - let _ = Box:: {}; -LL + let _ = Box::::into_inner(_); +LL + let _ = Box::::map(_, _); | LL - let _ = Box:: {}; -LL + let _ = Box::::write(_, _); +LL + let _ = Box::::into_inner(_); | - = and 4 other candidates + = and 5 other candidates help: consider using the `Default` trait | LL - let _ = Box:: {}; diff --git a/tests/ui/suggestions/multi-suggestion.ascii.stderr b/tests/ui/suggestions/multi-suggestion.ascii.stderr index 9c8867a17711e..4bd6c19e0829b 100644 --- a/tests/ui/suggestions/multi-suggestion.ascii.stderr +++ b/tests/ui/suggestions/multi-suggestion.ascii.stderr @@ -63,7 +63,7 @@ LL - x: (), LL - })), LL + wtf: Some(Box::new_in(_, _)), | - = and 12 other candidates + = and 13 other candidates help: consider using the `Default` trait | LL - wtf: Some(Box(U { @@ -118,7 +118,7 @@ LL + let _ = Box::new_zeroed(); LL - let _ = Box {}; LL + let _ = Box::new_in(_, _); | - = and 13 other candidates + = and 14 other candidates help: consider using the `Default` trait | LL - let _ = Box {}; diff --git a/tests/ui/suggestions/multi-suggestion.unicode.stderr b/tests/ui/suggestions/multi-suggestion.unicode.stderr index 4fdab51493e26..b11570f34161c 100644 --- a/tests/ui/suggestions/multi-suggestion.unicode.stderr +++ b/tests/ui/suggestions/multi-suggestion.unicode.stderr @@ -63,7 +63,7 @@ LL - x: (), LL - })), LL + wtf: Some(Box::new_in(_, _)), │ - ╰ and 12 other candidates + ╰ and 13 other candidates help: consider using the `Default` trait ╭╴ LL - wtf: Some(Box(U { @@ -118,7 +118,7 @@ LL + let _ = Box::new_zeroed(); LL - let _ = Box {}; LL + let _ = Box::new_in(_, _); │ - ╰ and 13 other candidates + ╰ and 14 other candidates help: consider using the `Default` trait ╭╴ LL - let _ = Box {};