From de72f4459e300b3d4e476bbedfa298b92da045de Mon Sep 17 00:00:00 2001 From: Stein Somers Date: Wed, 18 Nov 2020 18:19:38 +0100 Subject: [PATCH] BTree: add drain methods --- library/alloc/benches/btree/map.rs | 66 +++- library/alloc/benches/lib.rs | 1 + library/alloc/src/collections/btree/map.rs | 91 +++++ .../alloc/src/collections/btree/map/tests.rs | 1 + .../src/collections/btree/map/tests/drain.rs | 370 ++++++++++++++++++ library/alloc/src/collections/btree/set.rs | 79 ++++ .../alloc/src/collections/btree/set/tests.rs | 28 ++ .../issue-52202-use-suggestions.stderr | 6 +- 8 files changed, 630 insertions(+), 12 deletions(-) create mode 100644 library/alloc/src/collections/btree/map/tests/drain.rs diff --git a/library/alloc/benches/btree/map.rs b/library/alloc/benches/btree/map.rs index 89c21929dbcda..bc85f5b087f1e 100644 --- a/library/alloc/benches/btree/map.rs +++ b/library/alloc/benches/btree/map.rs @@ -384,17 +384,33 @@ pub fn clone_slim_100_and_clear(b: &mut Bencher) { } #[bench] -pub fn clone_slim_100_and_drain_all(b: &mut Bencher) { +pub fn clone_slim_100_and_drain_range_all(b: &mut Bencher) { + let src = slim_map(100); + b.iter(|| src.clone().drain(..).count()) +} + +#[bench] +pub fn clone_slim_100_and_drain_filter_all(b: &mut Bencher) { let src = slim_map(100); b.iter(|| src.clone().drain_filter(|_, _| true).count()) } #[bench] -pub fn clone_slim_100_and_drain_half(b: &mut Bencher) { +pub fn clone_slim_100_and_drain_range_half(b: &mut Bencher) { + let src = slim_map(100); + b.iter(|| { + let mut map = src.clone(); + assert_eq!(map.drain(25..75).count(), 100 / 2); + assert_eq!(map.len(), 100 / 2); + }) +} + +#[bench] +pub fn clone_slim_100_and_drain_filter_half(b: &mut Bencher) { let src = slim_map(100); b.iter(|| { let mut map = src.clone(); - assert_eq!(map.drain_filter(|i, _| i % 2 == 0).count(), 100 / 2); + assert_eq!(map.drain_filter(|i, _| (25..75).contains(i)).count(), 100 / 2); assert_eq!(map.len(), 100 / 2); }) } @@ -455,17 +471,33 @@ pub fn clone_slim_10k_and_clear(b: &mut Bencher) { } #[bench] -pub fn clone_slim_10k_and_drain_all(b: &mut Bencher) { +pub fn clone_slim_10k_and_drain_range_all(b: &mut Bencher) { + let src = slim_map(10_000); + b.iter(|| src.clone().drain(..).count()) +} + +#[bench] +pub fn clone_slim_10k_and_drain_filter_all(b: &mut Bencher) { let src = slim_map(10_000); b.iter(|| src.clone().drain_filter(|_, _| true).count()) } #[bench] -pub fn clone_slim_10k_and_drain_half(b: &mut Bencher) { +pub fn clone_slim_10k_and_drain_range_half(b: &mut Bencher) { + let src = slim_map(10_000); + b.iter(|| { + let mut map = src.clone(); + assert_eq!(map.drain(2500..7500).count(), 10_000 / 2); + assert_eq!(map.len(), 10_000 / 2); + }) +} + +#[bench] +pub fn clone_slim_10k_and_drain_filter_half(b: &mut Bencher) { let src = slim_map(10_000); b.iter(|| { let mut map = src.clone(); - assert_eq!(map.drain_filter(|i, _| i % 2 == 0).count(), 10_000 / 2); + assert_eq!(map.drain_filter(|i, _| (2500..7500).contains(i)).count(), 10_000 / 2); assert_eq!(map.len(), 10_000 / 2); }) } @@ -526,17 +558,33 @@ pub fn clone_fat_val_100_and_clear(b: &mut Bencher) { } #[bench] -pub fn clone_fat_val_100_and_drain_all(b: &mut Bencher) { +pub fn clone_fat_val_100_and_drain_range_all(b: &mut Bencher) { + let src = fat_val_map(100); + b.iter(|| src.clone().drain(..).count()) +} + +#[bench] +pub fn clone_fat_val_100_and_drain_filter_all(b: &mut Bencher) { let src = fat_val_map(100); b.iter(|| src.clone().drain_filter(|_, _| true).count()) } #[bench] -pub fn clone_fat_val_100_and_drain_half(b: &mut Bencher) { +pub fn clone_fat_val_100_and_drain_range_half(b: &mut Bencher) { + let src = fat_val_map(100); + b.iter(|| { + let mut map = src.clone(); + assert_eq!(map.drain(25..75).count(), 100 / 2); + assert_eq!(map.len(), 100 / 2); + }) +} + +#[bench] +pub fn clone_fat_val_100_and_drain_filter_half(b: &mut Bencher) { let src = fat_val_map(100); b.iter(|| { let mut map = src.clone(); - assert_eq!(map.drain_filter(|i, _| i % 2 == 0).count(), 100 / 2); + assert_eq!(map.drain_filter(|i, _| (25..75).contains(i)).count(), 100 / 2); assert_eq!(map.len(), 100 / 2); }) } diff --git a/library/alloc/benches/lib.rs b/library/alloc/benches/lib.rs index 38a8f65f1695a..e793c0cffb773 100644 --- a/library/alloc/benches/lib.rs +++ b/library/alloc/benches/lib.rs @@ -1,6 +1,7 @@ // Disabling on android for the time being // See https://github.com/rust-lang/rust/issues/73535#event-3477699747 #![cfg(not(target_os = "android"))] +#![feature(btree_drain)] #![feature(btree_drain_filter)] #![feature(map_first_last)] #![feature(repr_simd)] diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs index b6dd585052226..60792dadc1a2e 100644 --- a/library/alloc/src/collections/btree/map.rs +++ b/library/alloc/src/collections/btree/map.rs @@ -1264,6 +1264,66 @@ impl BTreeMap { BTreeMap { root: Some(right_root), length: right_len } } + /// Removes at once all keys within the range from the map, returning the + /// removed key-value pairs as an iterator in ascending key order. If the + /// iterator is dropped before being fully consumed, it drops the remaining + /// removed key-value pairs. + /// + /// The returned iterator keeps a mutable borrow on the map to allow + /// optimizing its implementation. + /// + /// # Panics + /// + /// Panics if range `start > end`. + /// Panics if range `start == end` and both bounds are `Excluded`. + /// May panic if the [`Ord`] implementation of type `T` is ill-defined, + /// either because it does not form a total order or because it does not + /// correspond to the [`Ord`] implementation of type `K`. + /// + /// # Leaking + /// + /// If the returned iterator goes out of scope without being dropped (due to + /// [`mem::forget`], for example), the map may have lost and leaked + /// key-value pairs arbitrarily, including key-value pairs outside the range. + /// + /// # Examples + /// + /// ``` + /// #![feature(btree_drain)] + /// use std::collections::BTreeMap; + /// + /// let mut a = BTreeMap::new(); + /// a.insert(1, "a"); + /// a.insert(2, "b"); + /// a.insert(3, "c"); + /// a.insert(17, "d"); + /// a.insert(41, "e"); + /// + /// let b: Vec<_> = a.drain(3..33).collect(); + /// assert_eq!(b, vec![(3, "c"), (17, "d")]); + /// assert_eq!(a.len(), 3); + /// ``` + #[unstable(feature = "btree_drain", issue = "81074")] + pub fn drain(&mut self, range: R) -> Drain<'_, K, V> + where + T: Ord, + K: Borrow + Ord, + R: RangeBounds, + { + let inner = if let Some(left_root) = self.root.as_mut() { + let total_num = self.length; + let right_root = left_root.split_off_range(range); + let (new_left_len, right_len) = + Root::calc_split_length(total_num, &left_root, &right_root); + self.length = new_left_len; + let right_range = right_root.into_dying().full_range(); + IntoIter { range: right_range, length: right_len } + } else { + IntoIter { range: LazyLeafRange::none(), length: 0 } + }; + Drain { inner, _marker: PhantomData } + } + /// Creates an iterator that visits all elements (key-value pairs) in /// ascending key order and uses a closure to determine if an element should /// be removed. If the closure returns `true`, the element is removed from @@ -1712,6 +1772,37 @@ impl Clone for Values<'_, K, V> { } } +/// An iterator produced by calling `drain` on BTreeMap. +#[unstable(feature = "btree_drain", issue = "81074")] +#[derive(Debug)] +pub struct Drain<'a, K, V> { + inner: IntoIter, + _marker: PhantomData<&'a mut BTreeMap>, +} + +#[unstable(feature = "btree_drain", issue = "81074")] +impl Iterator for Drain<'_, K, V> { + type Item = (K, V); + + fn next(&mut self) -> Option<(K, V)> { + self.inner.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Drain<'_, K, V> { + fn len(&self) -> usize { + self.inner.len() + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Drain<'_, K, V> {} + /// An iterator produced by calling `drain_filter` on BTreeMap. #[unstable(feature = "btree_drain_filter", issue = "70530")] pub struct DrainFilter<'a, K, V, F> diff --git a/library/alloc/src/collections/btree/map/tests.rs b/library/alloc/src/collections/btree/map/tests.rs index 3d6baaa393554..85d0aaa64043a 100644 --- a/library/alloc/src/collections/btree/map/tests.rs +++ b/library/alloc/src/collections/btree/map/tests.rs @@ -17,6 +17,7 @@ use std::ops::RangeBounds; use std::panic::{catch_unwind, AssertUnwindSafe}; use std::sync::atomic::{AtomicUsize, Ordering::SeqCst}; +mod drain; mod split_off_range; // Minimum number of elements to insert, to guarantee a tree with 2 levels, diff --git a/library/alloc/src/collections/btree/map/tests/drain.rs b/library/alloc/src/collections/btree/map/tests/drain.rs new file mode 100644 index 0000000000000..9717e25dcadd3 --- /dev/null +++ b/library/alloc/src/collections/btree/map/tests/drain.rs @@ -0,0 +1,370 @@ +use super::*; +use std::cmp::{max, min}; + +#[test] +fn empty() { + let mut map: BTreeMap = BTreeMap::new(); + map.drain(..); + assert!(map.is_empty()); + map.check(); +} + +// Drop the iterator, where most test cases consume it entirely. +#[test] +fn dropped_keeping_all() { + let pairs = (0..3).map(|i| (i, i)); + let mut map = BTreeMap::from_iter(pairs); + map.drain(..0); + assert!(map.keys().copied().eq(0..3)); + map.check(); +} + +// Drop the iterator, where most test cases consume it entirely. +#[test] +fn dropped_removing_all() { + let pairs = (0..3).map(|i| (i, i)); + let mut map = BTreeMap::from_iter(pairs.clone()); + map.drain(..); + assert!(map.is_empty()); + map.check(); +} + +#[test] +fn consumed_keeping_all() { + let pairs = (0..3).map(|i| (i, ())); + let mut map = BTreeMap::from_iter(pairs); + assert!(map.drain(..0).eq(iter::empty())); + assert!(map.keys().copied().eq(0..3)); + map.check(); +} + +#[test] +fn range_small() { + fn range_keys(size: i32, range: impl RangeBounds) -> Vec { + let mut map = BTreeMap::from_iter((1..=size).map(|i| (i, ()))); + map.drain(range).map(|kv| kv.0).collect() + } + + let size = 4; + let all = Vec::from_iter(1..=size); + let (first, last) = (vec![all[0]], vec![all[size as usize - 1]]); + + assert_eq!(range_keys(size, (Excluded(0), Excluded(size + 1))), all); + assert_eq!(range_keys(size, (Excluded(0), Included(size + 1))), all); + assert_eq!(range_keys(size, (Excluded(0), Included(size))), all); + assert_eq!(range_keys(size, (Excluded(0), Unbounded)), all); + assert_eq!(range_keys(size, (Included(0), Excluded(size + 1))), all); + assert_eq!(range_keys(size, (Included(0), Included(size + 1))), all); + assert_eq!(range_keys(size, (Included(0), Included(size))), all); + assert_eq!(range_keys(size, (Included(0), Unbounded)), all); + assert_eq!(range_keys(size, (Included(1), Excluded(size + 1))), all); + assert_eq!(range_keys(size, (Included(1), Included(size + 1))), all); + assert_eq!(range_keys(size, (Included(1), Included(size))), all); + assert_eq!(range_keys(size, (Included(1), Unbounded)), all); + assert_eq!(range_keys(size, (Unbounded, Excluded(size + 1))), all); + assert_eq!(range_keys(size, (Unbounded, Included(size + 1))), all); + assert_eq!(range_keys(size, (Unbounded, Included(size))), all); + assert_eq!(range_keys(size, ..), all); + + assert_eq!(range_keys(size, (Excluded(0), Excluded(1))), vec![]); + assert_eq!(range_keys(size, (Excluded(0), Included(0))), vec![]); + assert_eq!(range_keys(size, (Included(0), Included(0))), vec![]); + assert_eq!(range_keys(size, (Included(0), Excluded(1))), vec![]); + assert_eq!(range_keys(size, (Unbounded, Excluded(1))), vec![]); + assert_eq!(range_keys(size, (Unbounded, Included(0))), vec![]); + assert_eq!(range_keys(size, (Excluded(0), Excluded(2))), first); + assert_eq!(range_keys(size, (Excluded(0), Included(1))), first); + assert_eq!(range_keys(size, (Included(0), Excluded(2))), first); + assert_eq!(range_keys(size, (Included(0), Included(1))), first); + assert_eq!(range_keys(size, (Included(1), Excluded(2))), first); + assert_eq!(range_keys(size, (Included(1), Included(1))), first); + assert_eq!(range_keys(size, (Unbounded, Excluded(2))), first); + assert_eq!(range_keys(size, (Unbounded, Included(1))), first); + assert_eq!(range_keys(size, (Excluded(size - 1), Excluded(size + 1))), last); + assert_eq!(range_keys(size, (Excluded(size - 1), Included(size + 1))), last); + assert_eq!(range_keys(size, (Excluded(size - 1), Included(size))), last); + assert_eq!(range_keys(size, (Excluded(size - 1), Unbounded)), last); + assert_eq!(range_keys(size, (Included(size), Excluded(size + 1))), last); + assert_eq!(range_keys(size, (Included(size), Included(size + 1))), last); + assert_eq!(range_keys(size, (Included(size), Included(size))), last); + assert_eq!(range_keys(size, (Included(size), Unbounded)), last); + assert_eq!(range_keys(size, (Excluded(size), Excluded(size + 1))), vec![]); + assert_eq!(range_keys(size, (Excluded(size), Included(size))), vec![]); + assert_eq!(range_keys(size, (Excluded(size), Unbounded)), vec![]); + assert_eq!(range_keys(size, (Included(size + 1), Excluded(size + 1))), vec![]); + assert_eq!(range_keys(size, (Included(size + 1), Included(size + 1))), vec![]); + assert_eq!(range_keys(size, (Included(size + 1), Unbounded)), vec![]); +} + +fn test_size_range>( + size: usize, + height: usize, + compact: bool, + range: R, + keep: usize, +) { + let mut map = if compact { + BTreeMap::from_iter((0..size).map(|i| (i, ()))) + } else { + let mut map = BTreeMap::new(); + for i in 0..size { + map.insert(i, ()); + } + map + }; + assert_eq!(map.len(), size); + assert_eq!(map.height(), Some(height), "{}", map.dump_keys()); + assert_eq!(map.drain(range).count(), size - keep); + assert_eq!(map.len(), keep); + map.check(); +} + +fn test_size_keeping_n(size: usize, height: usize, compact: bool, keep: usize) { + for doomed_start in 0..keep + 1 { + test_size_range(size, height, compact, doomed_start..(doomed_start + size - keep), keep); + } +} + +fn test_size_all(size: usize, height: usize, compact: bool) { + for keep in 0..size + 1 { + test_size_keeping_n(size, height, compact, keep) + } +} + +fn test_size_some(size: usize, height: usize, compact: bool) { + test_size_keeping_n(size, height, compact, 0); + test_size_keeping_n(size, height, compact, 1); + test_size_keeping_n(size, height, compact, 2); + test_size_keeping_n(size, height, compact, size / 4); + test_size_keeping_n(size, height, compact, size / 2); + test_size_keeping_n(size, height, compact, size - 2); + test_size_keeping_n(size, height, compact, size - 1); +} + +#[test] +fn height_0_underfull_all() { + test_size_all(3, 0, false) +} + +#[test] +fn height_0_max_some() { + test_size_some(node::CAPACITY, 0, false) +} + +#[cfg(not(miri))] // Miri is too slow +#[test] +fn height_0_max_all() { + test_size_all(node::CAPACITY, 0, false) +} + +#[test] +fn height_1_min_keeping_0() { + test_size_keeping_n(MIN_INSERTS_HEIGHT_1, 1, false, 0) +} + +#[test] +fn height_1_min_keeping_1() { + test_size_keeping_n(MIN_INSERTS_HEIGHT_1, 1, false, 1) +} + +#[test] +fn height_1_min_keeping_2() { + test_size_keeping_n(MIN_INSERTS_HEIGHT_1, 1, false, 2) +} + +#[test] +fn height_1_min_keeping_7() { + test_size_keeping_n(MIN_INSERTS_HEIGHT_1, 1, false, 7) +} + +#[cfg(not(miri))] // Miri is too slow +#[test] +fn height_1_min_all() { + test_size_all(MIN_INSERTS_HEIGHT_1, 1, false) +} + +#[cfg(not(miri))] // Miri is too slow +#[test] +fn height_1_more_all() { + for size in MIN_INSERTS_HEIGHT_1 + 1..MIN_INSERTS_HEIGHT_2 { + test_size_all(size, 1, false) + } +} + +#[test] +fn height_2_min_keeping_0() { + test_size_keeping_n(MIN_INSERTS_HEIGHT_2, 2, false, 0) +} + +#[test] +fn height_2_min_keeping_1() { + test_size_keeping_n(MIN_INSERTS_HEIGHT_2, 2, false, 1) +} + +#[test] +fn height_2_min_keeping_12_left() { + test_size_range(MIN_INSERTS_HEIGHT_2, 2, false, 0..77, 12); +} + +#[test] +fn height_2_min_keeping_12_mid() { + test_size_range(MIN_INSERTS_HEIGHT_2, 2, false, 6..83, 12); +} + +#[test] +fn height_2_min_keeping_12_right() { + test_size_range(MIN_INSERTS_HEIGHT_2, 2, false, 12..89, 12) +} + +#[cfg(not(miri))] // Miri is too slow +#[test] +fn height_2_min_all() { + test_size_all(MIN_INSERTS_HEIGHT_2, 2, false) +} + +#[cfg(not(miri))] // Miri is too slow +#[ignore] +#[test] +fn height_2_more_some() { + for size in MIN_INSERTS_HEIGHT_2 + 1..MIN_INSERTS_HEIGHT_3 { + println!("size {}", size); + test_size_some(size, 2, false) + } +} + +// Simplest case of `fix_opposite_borders` encountering unmergeable +// internal children of which one ends up underfull. +#[test] +fn size_180() { + test_size_range(180, 2, false, 36..127, 89) +} + +// Simplest case of `fix_opposite_borders` encountering unmergeable +// internal children of which one is empty (and the other full). +#[test] +fn size_181_zero_vs_full() { + test_size_range(181, 2, false, 7..98, 90) +} + +#[cfg(not(miri))] // Miri is too slow +#[test] +fn height_3_min_some() { + test_size_some(MIN_INSERTS_HEIGHT_3, 3, false) +} + +#[cfg(not(miri))] // Miri is too slow +#[ignore] +#[test] +fn height_3_min_all() { + test_size_all(MIN_INSERTS_HEIGHT_3, 3, false) +} + +#[cfg(not(miri))] // Miri is too slow +#[ignore] +#[test] +fn height_4_min_some() { + test_size_some(MIN_INSERTS_HEIGHT_4, 4, false) +} + +#[test] +fn size_143_compact_keeping_1() { + test_size_keeping_n(143, 1, true, 1) +} + +#[cfg(not(miri))] // Miri is too slow +#[test] +fn size_143_compact_all() { + test_size_all(143, 1, true) +} + +#[test] +fn size_144_compact_keeping_1() { + test_size_keeping_n(144, 2, true, 1) +} + +#[cfg(not(miri))] // Miri is too slow +#[test] +fn size_144_compact_all() { + test_size_all(144, 2, true) +} + +#[cfg(not(miri))] // Miri is too slow +#[test] +fn size_1727_compact_some() { + test_size_some(1727, 2, true) +} + +#[cfg(not(miri))] // Miri is too slow +#[test] +fn size_1728_compact_some() { + test_size_some(1728, 3, true) +} + +#[cfg(not(miri))] // Miri is too slow +#[ignore] +#[test] +fn size_20735_compact_some() { + test_size_some(20735, 3, true) +} + +#[cfg(not(miri))] // Miri is too slow +#[ignore] +#[test] +fn size_20736_compact_some() { + test_size_some(20736, 4, true) +} + +#[cfg(not(miri))] // Miri is too slow +#[test] +fn sub_size_143_compact_some() { + for size in node::CAPACITY + 1..143 { + test_size_some(size, 1, true) + } +} + +#[cfg(not(miri))] // Miri is too slow +#[ignore] +#[test] +fn sub_size_1727_compact_some() { + for size in (144 + 1..1727).step_by(10) { + test_size_some(size, 2, true) + } +} + +#[test] +fn random_1() { + let mut rng = DeterministicRng::new(); + for _ in 0..if cfg!(miri) { 1 } else { 140 } { + let size = rng.next() as usize % 1024; + let mut map: BTreeMap<_, ()> = BTreeMap::new(); + for _ in 0..size { + map.insert(rng.next(), ()); + } + assert_eq!(map.len(), size); + let (x, y) = (rng.next(), rng.next()); + let bounds = min(x, y)..max(x, y); + let mut drainage = map.drain(bounds.clone()); + let drained = drainage.len(); + assert!(drainage.all(|(k, _)| bounds.contains(&k))); + assert_eq!(drained + map.len(), size); + map.check(); + assert!(!map.into_keys().any(|k| bounds.contains(&k))); + } +} + +#[test] +fn drop_panic_leak() { + let a = CrashTestDummy::new(0); + let b = CrashTestDummy::new(1); + let c = CrashTestDummy::new(2); + let mut map = BTreeMap::new(); + map.insert(a.spawn(Panic::Never), ()); + map.insert(b.spawn(Panic::InDrop), ()); + map.insert(c.spawn(Panic::Never), ()); + + catch_unwind(move || drop(map.drain(..))).unwrap_err(); + + assert_eq!(a.dropped(), 1); + assert_eq!(b.dropped(), 1); + assert_eq!(c.dropped(), 1); +} diff --git a/library/alloc/src/collections/btree/set.rs b/library/alloc/src/collections/btree/set.rs index 1ac3a7a06c29f..3dbcee29a441a 100644 --- a/library/alloc/src/collections/btree/set.rs +++ b/library/alloc/src/collections/btree/set.rs @@ -1007,6 +1007,55 @@ impl BTreeSet { BTreeSet { map: self.map.split_off_range(range) } } + /// Removes at once all elements within the range from the set, returning + /// the removed elements as an iterator in ascending order. If the iterator + /// is dropped before being fully consumed, it drops the remaining removed + /// elements. + /// + /// The returned iterator keeps a mutable borrow on the set to allow + /// optimizing its implementation. + /// + /// # Panics + /// + /// Panics if range `start > end`. + /// Panics if range `start == end` and both bounds are `Excluded`. + /// May panic if the [`Ord`] implementation of type `T` is ill-defined, + /// either because it does not form a total order or because it does not + /// correspond to the [`Ord`] implementation of type `K`. + /// + /// # Leaking + /// + /// If the returned iterator goes out of scope without being dropped (due to + /// [`core::mem::forget`], for example), the set set have lost and leaked + /// elements arbitrarily, including elements outside the range. + /// + /// # Examples + /// + /// ``` + /// #![feature(btree_drain)] + /// use std::collections::BTreeSet; + /// + /// let mut a = BTreeSet::new(); + /// a.insert(1); + /// a.insert(2); + /// a.insert(3); + /// a.insert(17); + /// a.insert(41); + /// + /// let b: Vec<_> = a.drain(3..33).collect(); + /// assert_eq!(b, vec![3, 17]); + /// assert_eq!(a.len(), 3); + /// ``` + #[unstable(feature = "btree_drain", issue = "81074")] + pub fn drain(&mut self, range: R) -> Drain<'_, T> + where + K: Ord, + T: Borrow + Ord, + R: RangeBounds, + { + Drain { iter: self.map.drain(range) } + } + /// Creates an iterator that visits all elements in ascending order and /// uses a closure to determine if an element should be removed. /// @@ -1190,6 +1239,36 @@ impl<'a, T> IntoIterator for &'a BTreeSet { } } +/// An iterator produced by calling `drain` on BTreeSet. +#[unstable(feature = "btree_drain", issue = "81074")] +#[derive(Debug)] +pub struct Drain<'a, T> { + iter: super::map::Drain<'a, T, ()>, +} + +#[unstable(feature = "btree_drain", issue = "81074")] +impl Iterator for Drain<'_, T> { + type Item = T; + + fn next(&mut self) -> Option { + self.iter.next().map(|(k, _)| k) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +#[unstable(feature = "btree_drain", issue = "81074")] +impl ExactSizeIterator for Drain<'_, T> { + fn len(&self) -> usize { + self.iter.len() + } +} + +#[unstable(feature = "btree_drain", issue = "81074")] +impl FusedIterator for Drain<'_, T> {} + /// An iterator produced by calling `drain_filter` on BTreeSet. #[unstable(feature = "btree_drain_filter", issue = "70530")] pub struct DrainFilter<'a, T, F> diff --git a/library/alloc/src/collections/btree/set/tests.rs b/library/alloc/src/collections/btree/set/tests.rs index 9c29b2982e653..49dd27cf0e95d 100644 --- a/library/alloc/src/collections/btree/set/tests.rs +++ b/library/alloc/src/collections/btree/set/tests.rs @@ -329,6 +329,34 @@ fn test_retain() { assert!(set.contains(&6)); } +#[test] +fn test_drain() { + let mut set: BTreeSet<_> = (1..=3).collect(); + let mut drained = set.drain(2..=2); + assert_eq!(drained.next(), Some(2)); + assert_eq!(drained.next(), None); + assert_eq!(set.len(), 2); + assert_eq!(set.first(), Some(&1)); + assert_eq!(set.last(), Some(&3)); +} + +#[test] +fn test_drain_drop_panic_leak() { + let a = CrashTestDummy::new(0); + let b = CrashTestDummy::new(1); + let c = CrashTestDummy::new(2); + let mut set = BTreeSet::new(); + set.insert(a.spawn(Panic::Never)); + set.insert(b.spawn(Panic::InDrop)); + set.insert(c.spawn(Panic::Never)); + + catch_unwind(move || drop(set.drain(..))).ok(); + + assert_eq!(a.dropped(), 1); + assert_eq!(b.dropped(), 1); + assert_eq!(c.dropped(), 1); +} + #[test] fn test_drain_filter() { let mut x = BTreeSet::from([1]); diff --git a/src/test/ui/rust-2018/issue-52202-use-suggestions.stderr b/src/test/ui/rust-2018/issue-52202-use-suggestions.stderr index 38cd9713d1a13..c2fe0244a0e01 100644 --- a/src/test/ui/rust-2018/issue-52202-use-suggestions.stderr +++ b/src/test/ui/rust-2018/issue-52202-use-suggestions.stderr @@ -10,11 +10,11 @@ LL | use crate::plumbing::Drain; | LL | use std::collections::binary_heap::Drain; | -LL | use std::collections::hash_map::Drain; +LL | use std::collections::btree_map::Drain; | -LL | use std::collections::hash_set::Drain; +LL | use std::collections::btree_set::Drain; | - and 3 other candidates + and 5 other candidates error: aborting due to previous error