Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
75 changes: 33 additions & 42 deletions llvm/include/llvm/ADT/DenseMap.h
Original file line number Diff line number Diff line change
Expand Up @@ -413,9 +413,7 @@ class DenseMapBase : public DebugEpochBase {
return NextPowerOf2(NumEntries * 4 / 3 + 1);
}

void moveFromOldBuckets(iterator_range<BucketT *> OldBuckets) {
initEmpty();

void moveFromImpl(iterator_range<BucketT *> OldBuckets) {
// Insert all the old elements.
const KeyT EmptyKey = KeyInfoT::getEmptyKey();
const KeyT TombstoneKey = KeyInfoT::getTombstoneKey();
Expand All @@ -438,6 +436,14 @@ class DenseMapBase : public DebugEpochBase {
}
}

void moveFromOldBuckets(iterator_range<BucketT *> OldBuckets) {
initEmpty();
moveFromImpl(OldBuckets);
}

// Move key/value from Other to *this. Other will be in a zombie state.
void moveFrom(DerivedT &Other) { moveFromImpl(Other.buckets()); }

void copyFrom(const DerivedT &other) {
this->destroyAll();
derived().deallocateBuckets();
Expand Down Expand Up @@ -889,6 +895,12 @@ class SmallDenseMap
/// a large bucket. This union will be discriminated by the 'Small' bit.
AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;

struct ExactBucketCount {};
SmallDenseMap(unsigned NumBuckets, ExactBucketCount) {
allocateBuckets(NumBuckets);
this->BaseT::initEmpty();
}

public:
explicit SmallDenseMap(unsigned NumElementsToReserve = 0) {
init(NumElementsToReserve);
Expand Down Expand Up @@ -1065,7 +1077,10 @@ class SmallDenseMap
}

void deallocateBuckets() {
if (Small)
// Fast path to deallocateBuckets in case getLargeRep()->NumBuckets == 0,
// just like destroyAll. This path is used to destruct zombie instances
// after moves.
if (Small || getLargeRep()->NumBuckets == 0)
return;

deallocate_buffer(getLargeRep()->Buckets,
Expand Down Expand Up @@ -1096,46 +1111,22 @@ class SmallDenseMap
if (AtLeast > InlineBuckets)
AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast - 1));

if (Small) {
// First move the inline buckets into a temporary storage.
AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
BucketT *TmpEnd = TmpBegin;
SmallDenseMap Tmp(AtLeast, ExactBucketCount{});
Tmp.moveFrom(*this);

// Loop over the buckets, moving non-empty, non-tombstones into the
// temporary storage. Have the loop move the TmpEnd forward as it goes.
const KeyT EmptyKey = KeyInfoT::getEmptyKey();
const KeyT TombstoneKey = KeyInfoT::getTombstoneKey();
for (BucketT &B : inlineBuckets()) {
if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
!KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
"Too many inline buckets!");
::new (&TmpEnd->getFirst()) KeyT(std::move(B.getFirst()));
::new (&TmpEnd->getSecond()) ValueT(std::move(B.getSecond()));
++TmpEnd;
B.getSecond().~ValueT();
}
B.getFirst().~KeyT();
}

// AtLeast == InlineBuckets can happen if there are many tombstones,
// and grow() is used to remove them. Usually we always switch to the
// large rep here.
allocateBuckets(AtLeast);
this->moveFromOldBuckets(llvm::make_range(TmpBegin, TmpEnd));
return;
if (Tmp.Small) {
// Use moveFrom in those rare cases where we stay in the small mode. This
// can happen when we have many tombstones.
this->BaseT::initEmpty();
this->moveFrom(Tmp);
Tmp.Small = false;
Tmp.getLargeRep()->NumBuckets = 0;
} else {
Small = false;
NumTombstones = 0;
*getLargeRep() = std::move(*Tmp.getLargeRep());
Tmp.getLargeRep()->NumBuckets = 0;
}

LargeRep OldRep = std::move(*getLargeRep());
getLargeRep()->~LargeRep();
allocateBuckets(AtLeast);

this->moveFromOldBuckets(OldRep.buckets());

// Free the old table.
deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
alignof(BucketT));
}

// Plan how to shrink the bucket table. Return:
Expand Down
Loading