Skip to content

Commit

Permalink
PR abseil#1618: inlined_vector: Use trivial relocation for `SwapInlin…
Browse files Browse the repository at this point in the history
…edElements`

Imported from GitHub PR abseil#1618

I noticed while working on abseil#1615 that `inlined_vector` could use the trivial relocatability trait here, too.
Here the memcpy codepath already exists; we just have to opt in to using it.
Merge 567a1dd into a7012a5

Merging this change closes abseil#1618

COPYBARA_INTEGRATE_REVIEW=abseil#1618 from Quuxplusone:trivial-swap 567a1dd
PiperOrigin-RevId: 609019296
Change-Id: I4055ab790245752179e405b490fcd479e7389726
  • Loading branch information
Quuxplusone authored and netkex committed Apr 3, 2024
1 parent a40f45f commit 7b0d408
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 17 deletions.
33 changes: 32 additions & 1 deletion absl/container/inlined_vector_test.cc
Expand Up @@ -304,6 +304,35 @@ TEST(UniquePtr, MoveAssign) {
}
}

// Swapping containers of unique pointers should work fine, with no
// leaks, despite the fact that unique pointers are trivially relocatable but
// not trivially destructible.
// TODO(absl-team): Using unique_ptr here is technically correct, but
// a trivially relocatable struct would be less semantically confusing.
TEST(UniquePtr, Swap) {
for (size_t size1 = 0; size1 < 5; ++size1) {
for (size_t size2 = 0; size2 < 5; ++size2) {
absl::InlinedVector<std::unique_ptr<size_t>, 2> a;
absl::InlinedVector<std::unique_ptr<size_t>, 2> b;
for (size_t i = 0; i < size1; ++i) {
a.push_back(std::make_unique<size_t>(i + 10));
}
for (size_t i = 0; i < size2; ++i) {
b.push_back(std::make_unique<size_t>(i + 20));
}
a.swap(b);
ASSERT_THAT(a, SizeIs(size2));
ASSERT_THAT(b, SizeIs(size1));
for (size_t i = 0; i < a.size(); ++i) {
ASSERT_THAT(a[i], Pointee(i + 20));
}
for (size_t i = 0; i < b.size(); ++i) {
ASSERT_THAT(b[i], Pointee(i + 10));
}
}
}
}

// At the end of this test loop, the elements between [erase_begin, erase_end)
// should have reference counts == 0, and all others elements should have
// reference counts == 1.
Expand Down Expand Up @@ -783,7 +812,9 @@ TEST(OverheadTest, Storage) {
// The union should be absorbing some of the allocation bookkeeping overhead
// in the larger vectors, leaving only the size_ field as overhead.

struct T { void* val; };
struct T {
void* val;
};
size_t expected_overhead = sizeof(T);

EXPECT_EQ((2 * expected_overhead),
Expand Down
31 changes: 15 additions & 16 deletions absl/container/internal/inlined_vector.h
Expand Up @@ -322,14 +322,13 @@ class Storage {

// The policy to be used specifically when swapping inlined elements.
using SwapInlinedElementsPolicy = absl::conditional_t<
// Fast path: if the value type can be trivially move constructed/assigned
// and destroyed, and we know the allocator doesn't do anything fancy,
// then it's safe for us to simply swap the bytes in the inline storage.
// It's as if we had move-constructed a temporary vector, move-assigned
// one to the other, then move-assigned the first from the temporary.
absl::conjunction<absl::is_trivially_move_constructible<ValueType<A>>,
absl::is_trivially_move_assignable<ValueType<A>>,
absl::is_trivially_destructible<ValueType<A>>,
// Fast path: if the value type can be trivially relocated, and we
// know the allocator doesn't do anything fancy, then it's safe for us
// to simply swap the bytes in the inline storage. It's as if we had
// relocated the first vector's elements into temporary storage,
// relocated the second's elements into the (now-empty) first's,
// and then relocated from temporary storage into the second.
absl::conjunction<absl::is_trivially_relocatable<ValueType<A>>,
std::is_same<A, std::allocator<ValueType<A>>>>::value,
MemcpyPolicy,
absl::conditional_t<IsSwapOk<A>::value, ElementwiseSwapPolicy,
Expand Down Expand Up @@ -624,8 +623,8 @@ void Storage<T, N, A>::InitFrom(const Storage& other) {

template <typename T, size_t N, typename A>
template <typename ValueAdapter>
auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)
-> void {
auto Storage<T, N, A>::Initialize(ValueAdapter values,
SizeType<A> new_size) -> void {
// Only callable from constructors!
ABSL_HARDENING_ASSERT(!GetIsAllocated());
ABSL_HARDENING_ASSERT(GetSize() == 0);
Expand Down Expand Up @@ -656,8 +655,8 @@ auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)

template <typename T, size_t N, typename A>
template <typename ValueAdapter>
auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)
-> void {
auto Storage<T, N, A>::Assign(ValueAdapter values,
SizeType<A> new_size) -> void {
StorageView<A> storage_view = MakeStorageView();

AllocationTransaction<A> allocation_tx(GetAllocator());
Expand Down Expand Up @@ -699,8 +698,8 @@ auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)

template <typename T, size_t N, typename A>
template <typename ValueAdapter>
auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
-> void {
auto Storage<T, N, A>::Resize(ValueAdapter values,
SizeType<A> new_size) -> void {
StorageView<A> storage_view = MakeStorageView();
Pointer<A> const base = storage_view.data;
const SizeType<A> size = storage_view.size;
Expand Down Expand Up @@ -885,8 +884,8 @@ auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
}

template <typename T, size_t N, typename A>
auto Storage<T, N, A>::Erase(ConstIterator<A> from, ConstIterator<A> to)
-> Iterator<A> {
auto Storage<T, N, A>::Erase(ConstIterator<A> from,
ConstIterator<A> to) -> Iterator<A> {
StorageView<A> storage_view = MakeStorageView();

auto erase_size = static_cast<SizeType<A>>(std::distance(from, to));
Expand Down

0 comments on commit 7b0d408

Please sign in to comment.