Skip to content

Commit

Permalink
Auto merge of rust-lang#118273 - AngelicosPhosphoros:dedup_2_loops_ve…
Browse files Browse the repository at this point in the history
…rsion_77772_2, r=<try>

Split `Vec::dedup_by` into 2 cycles

First cycle runs until we found 2 same elements, second runs after if there any found in the first one. This allows to avoid any memory writes until we found an item which we want to remove.

This leads to significant performance gains if all `Vec` items are kept: -40% on my benchmark with unique integers.

Results of benchmarks before implementation (including new benchmark where nothing needs to be removed):
 *   vec::bench_dedup_all_100                 74.00ns/iter  +/- 13.00ns
 *   vec::bench_dedup_all_1000               572.00ns/iter +/- 272.00ns
 *   vec::bench_dedup_all_100000              64.42µs/iter  +/- 19.47µs
 *   __vec::bench_dedup_none_100                67.00ns/iter  +/- 17.00ns__
 *   __vec::bench_dedup_none_1000              662.00ns/iter  +/- 86.00ns__
 *   __vec::bench_dedup_none_10000               9.16µs/iter   +/- 2.71µs__
 *   __vec::bench_dedup_none_100000             91.25µs/iter   +/- 1.82µs__
 *   vec::bench_dedup_random_100             105.00ns/iter  +/- 11.00ns
 *   vec::bench_dedup_random_1000            781.00ns/iter  +/- 10.00ns
 *   vec::bench_dedup_random_10000             9.00µs/iter   +/- 5.62µs
 *   vec::bench_dedup_random_100000          449.81µs/iter  +/- 74.99µs
 *   vec::bench_dedup_slice_truncate_100     105.00ns/iter  +/- 16.00ns
 *   vec::bench_dedup_slice_truncate_1000      2.65µs/iter +/- 481.00ns
 *   vec::bench_dedup_slice_truncate_10000    18.33µs/iter   +/- 5.23µs
 *   vec::bench_dedup_slice_truncate_100000  501.12µs/iter  +/- 46.97µs

Results after implementation:
 *   vec::bench_dedup_all_100                 75.00ns/iter   +/- 9.00ns
 *   vec::bench_dedup_all_1000               494.00ns/iter +/- 117.00ns
 *   vec::bench_dedup_all_100000              58.13µs/iter   +/- 8.78µs
 *   __vec::bench_dedup_none_100                52.00ns/iter  +/- 22.00ns__
 *   __vec::bench_dedup_none_1000              417.00ns/iter +/- 116.00ns__
 *   __vec::bench_dedup_none_10000               4.11µs/iter +/- 546.00ns__
 *   __vec::bench_dedup_none_100000             40.47µs/iter   +/- 5.36µs__
 *   vec::bench_dedup_random_100              77.00ns/iter  +/- 15.00ns
 *   vec::bench_dedup_random_1000            681.00ns/iter  +/- 86.00ns
 *   vec::bench_dedup_random_10000            11.66µs/iter   +/- 2.22µs
 *   vec::bench_dedup_random_100000          469.35µs/iter  +/- 20.53µs
 *   vec::bench_dedup_slice_truncate_100     100.00ns/iter   +/- 5.00ns
 *   vec::bench_dedup_slice_truncate_1000      2.55µs/iter +/- 224.00ns
 *   vec::bench_dedup_slice_truncate_10000    18.95µs/iter   +/- 2.59µs
 *   vec::bench_dedup_slice_truncate_100000  492.85µs/iter  +/- 72.84µs

Resolves rust-lang#77772

P.S. Note that this is same PR as rust-lang#92104 I just missed review then forgot about it.
Also, I cannot reopen that pull request so I am creating a new one.
I responded to remaining questions directly by adding commentaries to my code.
  • Loading branch information
bors committed Nov 25, 2023
2 parents 3166210 + f0c9274 commit d340b8b
Show file tree
Hide file tree
Showing 2 changed files with 149 additions and 30 deletions.
123 changes: 105 additions & 18 deletions library/alloc/benches/vec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -658,73 +658,160 @@ fn random_sorted_fill(mut seed: u32, buf: &mut [u32]) {
buf.sort();
}

fn bench_vec_dedup_old(b: &mut Bencher, sz: usize) {
// Measures performance of slice dedup impl.
// "Old" implementation of Vec::dedup
fn bench_dedup_slice_truncate(b: &mut Bencher, sz: usize) {
let mut template = vec![0u32; sz];
b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
random_sorted_fill(0x43, &mut template);

let mut vec = template.clone();
b.iter(|| {
let vec = black_box(&mut vec);
let len = {
let (dedup, _) = vec.partition_dedup();
dedup.len()
};
vec.truncate(len);

black_box(vec.first());
let vec = black_box(vec);
vec.clear();
vec.extend_from_slice(&template);
});
}

fn bench_vec_dedup_new(b: &mut Bencher, sz: usize) {
// Measures performance of Vec::dedup on random data.
fn bench_vec_dedup_random(b: &mut Bencher, sz: usize) {
let mut template = vec![0u32; sz];
b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
random_sorted_fill(0x43, &mut template);

let mut vec = template.clone();
b.iter(|| {
let vec = black_box(&mut vec);
vec.dedup();
black_box(vec.first());
let vec = black_box(vec);
vec.clear();
vec.extend_from_slice(&template);
});
}

// Measures performance of Vec::dedup when there is no items removed
fn bench_vec_dedup_none(b: &mut Bencher, sz: usize) {
let mut template = vec![0u32; sz];
b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
template.chunks_exact_mut(2).for_each(|w| {
w[0] = black_box(0);
w[1] = black_box(5);
});

let mut vec = template.clone();
b.iter(|| {
let vec = black_box(&mut vec);
vec.dedup();
black_box(vec.first());
// Unlike other benches of `dedup`
// this doesn't reinitialize vec
// because we measure how efficient dedup is
// when no memory written
});
}

// Measures performance of Vec::dedup when there is all items removed
fn bench_vec_dedup_all(b: &mut Bencher, sz: usize) {
let mut template = vec![0u32; sz];
b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
template.iter_mut().for_each(|w| {
*w = black_box(0);
});

let mut vec = template.clone();
b.iter(|| {
let vec = black_box(&mut vec);
vec.dedup();
black_box(vec.first());
let vec = black_box(vec);
vec.clear();
vec.extend_from_slice(&template);
});
}

#[bench]
fn bench_dedup_old_100(b: &mut Bencher) {
bench_vec_dedup_old(b, 100);
fn bench_dedup_slice_truncate_100(b: &mut Bencher) {
bench_dedup_slice_truncate(b, 100);
}
#[bench]
fn bench_dedup_new_100(b: &mut Bencher) {
bench_vec_dedup_new(b, 100);
fn bench_dedup_random_100(b: &mut Bencher) {
bench_vec_dedup_random(b, 100);
}

#[bench]
fn bench_dedup_old_1000(b: &mut Bencher) {
bench_vec_dedup_old(b, 1000);
fn bench_dedup_none_100(b: &mut Bencher) {
bench_vec_dedup_none(b, 100);
}

#[bench]
fn bench_dedup_all_100(b: &mut Bencher) {
bench_vec_dedup_all(b, 100);
}

#[bench]
fn bench_dedup_slice_truncate_1000(b: &mut Bencher) {
bench_dedup_slice_truncate(b, 1000);
}
#[bench]
fn bench_dedup_random_1000(b: &mut Bencher) {
bench_vec_dedup_random(b, 1000);
}

#[bench]
fn bench_dedup_none_1000(b: &mut Bencher) {
bench_vec_dedup_none(b, 1000);
}

#[bench]
fn bench_dedup_new_1000(b: &mut Bencher) {
bench_vec_dedup_new(b, 1000);
fn bench_dedup_all_1000(b: &mut Bencher) {
bench_vec_dedup_all(b, 1000);
}

#[bench]
fn bench_dedup_old_10000(b: &mut Bencher) {
bench_vec_dedup_old(b, 10000);
fn bench_dedup_slice_truncate_10000(b: &mut Bencher) {
bench_dedup_slice_truncate(b, 10000);
}
#[bench]
fn bench_dedup_new_10000(b: &mut Bencher) {
bench_vec_dedup_new(b, 10000);
fn bench_dedup_random_10000(b: &mut Bencher) {
bench_vec_dedup_random(b, 10000);
}

#[bench]
fn bench_dedup_old_100000(b: &mut Bencher) {
bench_vec_dedup_old(b, 100000);
fn bench_dedup_none_10000(b: &mut Bencher) {
bench_vec_dedup_none(b, 10000);
}

#[bench]
fn bench_dedup_all_10000(b: &mut Bencher) {
bench_vec_dedup_all(b, 10000);
}

#[bench]
fn bench_dedup_slice_truncate_100000(b: &mut Bencher) {
bench_dedup_slice_truncate(b, 100000);
}
#[bench]
fn bench_dedup_random_100000(b: &mut Bencher) {
bench_vec_dedup_random(b, 100000);
}

#[bench]
fn bench_dedup_none_100000(b: &mut Bencher) {
bench_vec_dedup_none(b, 100000);
}

#[bench]
fn bench_dedup_new_100000(b: &mut Bencher) {
bench_vec_dedup_new(b, 100000);
fn bench_dedup_all_100000(b: &mut Bencher) {
bench_vec_dedup_all(b, 100000);
}

#[bench]
Expand Down
56 changes: 44 additions & 12 deletions library/alloc/src/vec/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1775,7 +1775,31 @@ impl<T, A: Allocator> Vec<T, A> {
return;
}

/* INVARIANT: vec.len() > read >= write > write-1 >= 0 */
// Check if we ever want to remove anything.
// This allows to use copy_non_overlapping in next cycle.
// And avoids any memory writes if we don't need to remove anything.
let mut possible_remove_idx = 1;
let start = self.as_mut_ptr();
while possible_remove_idx < len {
let found_duplicate = unsafe {
// SAFETY: possible_remove_idx always in range [1..len)
// Note that we start iteration from 1 so we never overflow.
let prev = start.add(possible_remove_idx.wrapping_sub(1));
let current = start.add(possible_remove_idx);
// We explicitly say in docs that references are reversed.
same_bucket(&mut *current, &mut *prev)
};
if found_duplicate {
break;
}
possible_remove_idx += 1;
}
// Don't need to remove anything.
if possible_remove_idx >= len {
return;
}

/* INVARIANT: vec.len() > read > write > write-1 >= 0 */
struct FillGapOnDrop<'a, T, A: core::alloc::Allocator> {
/* Offset of the element we want to check if it is duplicate */
read: usize,
Expand Down Expand Up @@ -1821,31 +1845,39 @@ impl<T, A: Allocator> Vec<T, A> {
}
}

let mut gap = FillGapOnDrop { read: 1, write: 1, vec: self };
let ptr = gap.vec.as_mut_ptr();

/* Drop items while going through Vec, it should be more efficient than
* doing slice partition_dedup + truncate */

// Construct gap first and then drop item to avoid memory corruption if `T::drop` panics.
let mut gap =
FillGapOnDrop { read: possible_remove_idx + 1, write: possible_remove_idx, vec: self };
unsafe {
// SAFETY: we checked that possible_remove_idx < len before.
// If drop panics, `gap` would remove this item without drop.
ptr::drop_in_place(start.add(possible_remove_idx));
}

/* SAFETY: Because of the invariant, read_ptr, prev_ptr and write_ptr
* are always in-bounds and read_ptr never aliases prev_ptr */
unsafe {
while gap.read < len {
let read_ptr = ptr.add(gap.read);
let prev_ptr = ptr.add(gap.write.wrapping_sub(1));
let read_ptr = start.add(gap.read);
let prev_ptr = start.add(gap.write.wrapping_sub(1));

if same_bucket(&mut *read_ptr, &mut *prev_ptr) {
// We explicitly say in docs that references are reversed.
let found_duplicate = same_bucket(&mut *read_ptr, &mut *prev_ptr);
if found_duplicate {
// Increase `gap.read` now since the drop may panic.
gap.read += 1;
/* We have found duplicate, drop it in-place */
ptr::drop_in_place(read_ptr);
} else {
let write_ptr = ptr.add(gap.write);
let write_ptr = start.add(gap.write);

/* Because `read_ptr` can be equal to `write_ptr`, we either
* have to use `copy` or conditional `copy_nonoverlapping`.
* Looks like the first option is faster. */
ptr::copy(read_ptr, write_ptr, 1);
/* read_ptr cannot be equal to write_ptr because at this point
* we guaranteed to skip at least one element (before loop starts).
*/
ptr::copy_nonoverlapping(read_ptr, write_ptr, 1);

/* We have filled that place, so go further */
gap.write += 1;
Expand Down

0 comments on commit d340b8b

Please sign in to comment.