diff --git a/src/liballoc/collections/btree/map.rs b/src/liballoc/collections/btree/map.rs index 099687bd6b082..61d2295ba8c1c 100644 --- a/src/liballoc/collections/btree/map.rs +++ b/src/liballoc/collections/btree/map.rs @@ -215,59 +215,6 @@ impl Clone for BTreeMap { clone_subtree(self.root.as_ref().unwrap().as_ref()) } } - - fn clone_from(&mut self, other: &Self) { - BTreeClone::clone_from(self, other); - } -} - -trait BTreeClone { - fn clone_from(&mut self, other: &Self); -} - -impl BTreeClone for BTreeMap { - default fn clone_from(&mut self, other: &Self) { - *self = other.clone(); - } -} - -impl BTreeClone for BTreeMap { - fn clone_from(&mut self, other: &Self) { - // This truncates `self` to `other.len()` by calling `split_off` on - // the first key after `other.len()` elements if it exists. - let split_off_key = if self.len() > other.len() { - let diff = self.len() - other.len(); - if diff <= other.len() { - self.iter().nth_back(diff - 1).map(|pair| (*pair.0).clone()) - } else { - self.iter().nth(other.len()).map(|pair| (*pair.0).clone()) - } - } else { - None - }; - if let Some(key) = split_off_key { - self.split_off(&key); - } - - let mut siter = self.range_mut(..); - let mut oiter = other.iter(); - // After truncation, `self` is at most as long as `other` so this loop - // replaces every key-value pair in `self`. Since `oiter` is in sorted - // order and the structure of the `BTreeMap` stays the same, - // the BTree invariants are maintained at the end of the loop. - while !siter.is_empty() { - if let Some((ok, ov)) = oiter.next() { - // SAFETY: This is safe because `siter` is nonempty. - let (sk, sv) = unsafe { siter.next_unchecked() }; - sk.clone_from(ok); - sv.clone_from(ov); - } else { - break; - } - } - // If `other` is longer than `self`, the remaining elements are inserted. - self.extend(oiter.map(|(k, v)| ((*k).clone(), (*v).clone()))); - } } impl super::Recover for BTreeMap diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index ecec1fb039b33..3ebb5662350a7 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -108,7 +108,7 @@ #![feature(ptr_offset_from)] #![feature(rustc_attrs)] #![feature(receiver_trait)] -#![feature(specialization)] +#![feature(min_specialization)] #![feature(staged_api)] #![feature(std_internals)] #![feature(str_internals)] diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index 2f9505ec79ffa..307f6714f3230 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -249,7 +249,7 @@ use core::mem::{self, align_of, align_of_val, forget, size_of_val}; use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver}; use core::pin::Pin; use core::ptr::{self, NonNull}; -use core::slice::{self, from_raw_parts_mut}; +use core::slice::from_raw_parts_mut; use crate::alloc::{box_free, handle_alloc_error, AllocInit, AllocRef, Global, Layout}; use crate::string::String; @@ -1221,6 +1221,12 @@ impl RcEqIdent for Rc { } } +// Hack to allow specializing on `Eq` even though `Eq` has a method. +#[rustc_unsafe_specialization_marker] +pub(crate) trait MarkerEq: PartialEq {} + +impl MarkerEq for T {} + /// We're doing this specialization here, and not as a more general optimization on `&T`, because it /// would otherwise add a cost to all equality checks on refs. We assume that `Rc`s are used to /// store large values, that are slow to clone, but also heavy to check for equality, causing this @@ -1229,7 +1235,7 @@ impl RcEqIdent for Rc { /// /// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive. #[stable(feature = "rust1", since = "1.0.0")] -impl RcEqIdent for Rc { +impl RcEqIdent for Rc { #[inline] fn eq(&self, other: &Rc) -> bool { Rc::ptr_eq(self, other) || **self == **other @@ -1548,25 +1554,25 @@ impl iter::FromIterator for Rc<[T]> { /// # assert_eq!(&*evens, &*(0..10).collect::>()); /// ``` fn from_iter>(iter: I) -> Self { - RcFromIter::from_iter(iter.into_iter()) + ToRcSlice::to_rc_slice(iter.into_iter()) } } /// Specialization trait used for collecting into `Rc<[T]>`. -trait RcFromIter { - fn from_iter(iter: I) -> Self; +trait ToRcSlice: Iterator + Sized { + fn to_rc_slice(self) -> Rc<[T]>; } -impl> RcFromIter for Rc<[T]> { - default fn from_iter(iter: I) -> Self { - iter.collect::>().into() +impl> ToRcSlice for I { + default fn to_rc_slice(self) -> Rc<[T]> { + self.collect::>().into() } } -impl> RcFromIter for Rc<[T]> { - default fn from_iter(iter: I) -> Self { +impl> ToRcSlice for I { + fn to_rc_slice(self) -> Rc<[T]> { // This is the case for a `TrustedLen` iterator. - let (low, high) = iter.size_hint(); + let (low, high) = self.size_hint(); if let Some(high) = high { debug_assert_eq!( low, @@ -1577,29 +1583,15 @@ impl> RcFromIter for Rc<[T]> { unsafe { // SAFETY: We need to ensure that the iterator has an exact length and we have. - Rc::from_iter_exact(iter, low) + Rc::from_iter_exact(self, low) } } else { // Fall back to normal implementation. - iter.collect::>().into() + self.collect::>().into() } } } -impl<'a, T: 'a + Clone> RcFromIter<&'a T, slice::Iter<'a, T>> for Rc<[T]> { - fn from_iter(iter: slice::Iter<'a, T>) -> Self { - // Delegate to `impl From<&[T]> for Rc<[T]>`. - // - // In the case that `T: Copy`, we get to use `ptr::copy_nonoverlapping` - // which is even more performant. - // - // In the fall-back case we have `T: Clone`. This is still better - // than the `TrustedLen` implementation as slices have a known length - // and so we get to avoid calling `size_hint` and avoid the branching. - iter.as_slice().into() - } -} - /// `Weak` is a version of [`Rc`] that holds a non-owning reference to the /// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak` /// pointer, which returns an [`Option`]`<`[`Rc`]`>`. diff --git a/src/liballoc/sync.rs b/src/liballoc/sync.rs index a81e0cf7e1d2a..2b71378619c05 100644 --- a/src/liballoc/sync.rs +++ b/src/liballoc/sync.rs @@ -20,7 +20,7 @@ use core::mem::{self, align_of, align_of_val, size_of_val}; use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver}; use core::pin::Pin; use core::ptr::{self, NonNull}; -use core::slice::{self, from_raw_parts_mut}; +use core::slice::from_raw_parts_mut; use core::sync::atomic; use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; @@ -1779,7 +1779,7 @@ impl ArcEqIdent for Arc { /// /// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive. #[stable(feature = "rust1", since = "1.0.0")] -impl ArcEqIdent for Arc { +impl ArcEqIdent for Arc { #[inline] fn eq(&self, other: &Arc) -> bool { Arc::ptr_eq(self, other) || **self == **other @@ -2105,25 +2105,25 @@ impl iter::FromIterator for Arc<[T]> { /// # assert_eq!(&*evens, &*(0..10).collect::>()); /// ``` fn from_iter>(iter: I) -> Self { - ArcFromIter::from_iter(iter.into_iter()) + ToArcSlice::to_arc_slice(iter.into_iter()) } } /// Specialization trait used for collecting into `Arc<[T]>`. -trait ArcFromIter { - fn from_iter(iter: I) -> Self; +trait ToArcSlice: Iterator + Sized { + fn to_arc_slice(self) -> Arc<[T]>; } -impl> ArcFromIter for Arc<[T]> { - default fn from_iter(iter: I) -> Self { - iter.collect::>().into() +impl> ToArcSlice for I { + default fn to_arc_slice(self) -> Arc<[T]> { + self.collect::>().into() } } -impl> ArcFromIter for Arc<[T]> { - default fn from_iter(iter: I) -> Self { +impl> ToArcSlice for I { + fn to_arc_slice(self) -> Arc<[T]> { // This is the case for a `TrustedLen` iterator. - let (low, high) = iter.size_hint(); + let (low, high) = self.size_hint(); if let Some(high) = high { debug_assert_eq!( low, @@ -2134,29 +2134,15 @@ impl> ArcFromIter for Arc<[T]> { unsafe { // SAFETY: We need to ensure that the iterator has an exact length and we have. - Arc::from_iter_exact(iter, low) + Arc::from_iter_exact(self, low) } } else { // Fall back to normal implementation. - iter.collect::>().into() + self.collect::>().into() } } } -impl<'a, T: 'a + Clone> ArcFromIter<&'a T, slice::Iter<'a, T>> for Arc<[T]> { - fn from_iter(iter: slice::Iter<'a, T>) -> Self { - // Delegate to `impl From<&[T]> for Arc<[T]>`. - // - // In the case that `T: Copy`, we get to use `ptr::copy_nonoverlapping` - // which is even more performant. - // - // In the fall-back case we have `T: Clone`. This is still better - // than the `TrustedLen` implementation as slices have a known length - // and so we get to avoid calling `size_hint` and avoid the branching. - iter.as_slice().into() - } -} - #[stable(feature = "rust1", since = "1.0.0")] impl borrow::Borrow for Arc { fn borrow(&self) -> &T { diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs index b4a9da847876d..624ca2820d758 100644 --- a/src/liballoc/vec.rs +++ b/src/liballoc/vec.rs @@ -1619,8 +1619,8 @@ impl Vec { #[unstable(feature = "vec_resize_default", issue = "41758")] #[rustc_deprecated( reason = "This is moving towards being removed in favor \ - of `.resize_with(Default::default)`. If you disagree, please comment \ - in the tracking issue.", + of `.resize_with(Default::default)`. If you disagree, please comment \ + in the tracking issue.", since = "1.33.0" )] pub fn resize_default(&mut self, new_len: usize) { @@ -1825,6 +1825,7 @@ impl SpecFromElem for T { } } +#[rustc_specialization_trait] unsafe trait IsZero { /// Whether this value is zero fn is_zero(&self) -> bool; @@ -1874,9 +1875,12 @@ unsafe impl IsZero for *mut T { } } -// `Option<&T>`, `Option<&mut T>` and `Option>` are guaranteed to represent `None` as null. -// For fat pointers, the bytes that would be the pointer metadata in the `Some` variant -// are padding in the `None` variant, so ignoring them and zero-initializing instead is ok. +// `Option<&T>` and `Option>` are guaranteed to represent `None` as null. +// For fat pointers, the bytes that would be the pointer metadata in the `Some` +// variant are padding in the `None` variant, so ignoring them and +// zero-initializing instead is ok. +// `Option<&mut T>` never implements `Clone`, so there's no need for an impl of +// `SpecFromElem`. unsafe impl IsZero for Option<&T> { #[inline] @@ -1885,13 +1889,6 @@ unsafe impl IsZero for Option<&T> { } } -unsafe impl IsZero for Option<&mut T> { - #[inline] - fn is_zero(&self) -> bool { - self.is_none() - } -} - unsafe impl IsZero for Option> { #[inline] fn is_zero(&self) -> bool { diff --git a/src/libcore/iter/traits/marker.rs b/src/libcore/iter/traits/marker.rs index 404cc84495c96..a9ba3908c3898 100644 --- a/src/libcore/iter/traits/marker.rs +++ b/src/libcore/iter/traits/marker.rs @@ -13,6 +13,7 @@ /// [`Iterator::fuse`]: ../../std/iter/trait.Iterator.html#method.fuse /// [`Fuse`]: ../../std/iter/struct.Fuse.html #[stable(feature = "fused", since = "1.26.0")] +#[rustc_unsafe_specialization_marker] pub trait FusedIterator: Iterator {} #[stable(feature = "fused", since = "1.26.0")] @@ -38,6 +39,7 @@ impl FusedIterator for &mut I {} /// [`usize::MAX`]: ../../std/usize/constant.MAX.html /// [`.size_hint`]: ../../std/iter/trait.Iterator.html#method.size_hint #[unstable(feature = "trusted_len", issue = "37572")] +#[rustc_unsafe_specialization_marker] pub unsafe trait TrustedLen: Iterator {} #[unstable(feature = "trusted_len", issue = "37572")] diff --git a/src/libcore/marker.rs b/src/libcore/marker.rs index 09a8b417e6e22..339b07119c6d5 100644 --- a/src/libcore/marker.rs +++ b/src/libcore/marker.rs @@ -363,6 +363,13 @@ pub trait StructuralEq { /// [impls]: #implementors #[stable(feature = "rust1", since = "1.0.0")] #[lang = "copy"] +// FIXME(matthewjasper) This allows copying a type that doesn't implement +// `Copy` because of unsatisfied lifetime bounds (copying `A<'_>` when only +// `A<'static>: Copy` and `A<'_>: Clone`). +// We have this attribute here for now only because there are quite a few +// existing specializations on `Copy` that already exist in the standard +// library, and there's no way to safely have this behavior right now. +#[rustc_unsafe_specialization_marker] pub trait Copy: Clone { // Empty. }