diff --git a/llvm/include/llvm/ADT/DenseMap.h b/llvm/include/llvm/ADT/DenseMap.h index d5b13e7731550..9d61a91631fab 100644 --- a/llvm/include/llvm/ADT/DenseMap.h +++ b/llvm/include/llvm/ADT/DenseMap.h @@ -413,9 +413,7 @@ class DenseMapBase : public DebugEpochBase { return NextPowerOf2(NumEntries * 4 / 3 + 1); } - void moveFromOldBuckets(iterator_range OldBuckets) { - initEmpty(); - + void moveFromImpl(iterator_range OldBuckets) { // Insert all the old elements. const KeyT EmptyKey = KeyInfoT::getEmptyKey(); const KeyT TombstoneKey = KeyInfoT::getTombstoneKey(); @@ -438,6 +436,14 @@ class DenseMapBase : public DebugEpochBase { } } + void moveFromOldBuckets(iterator_range OldBuckets) { + initEmpty(); + moveFromImpl(OldBuckets); + } + + // Move key/value from Other to *this. Other will be in a zombie state. + void moveFrom(DerivedT &Other) { moveFromImpl(Other.buckets()); } + void copyFrom(const DerivedT &other) { this->destroyAll(); derived().deallocateBuckets(); @@ -889,6 +895,12 @@ class SmallDenseMap /// a large bucket. This union will be discriminated by the 'Small' bit. AlignedCharArrayUnion storage; + struct ExactBucketCount {}; + SmallDenseMap(unsigned NumBuckets, ExactBucketCount) { + allocateBuckets(NumBuckets); + this->BaseT::initEmpty(); + } + public: explicit SmallDenseMap(unsigned NumElementsToReserve = 0) { init(NumElementsToReserve); @@ -1065,7 +1077,10 @@ class SmallDenseMap } void deallocateBuckets() { - if (Small) + // Fast path to deallocateBuckets in case getLargeRep()->NumBuckets == 0, + // just like destroyAll. This path is used to destruct zombie instances + // after moves. + if (Small || getLargeRep()->NumBuckets == 0) return; deallocate_buffer(getLargeRep()->Buckets, @@ -1096,46 +1111,22 @@ class SmallDenseMap if (AtLeast > InlineBuckets) AtLeast = std::max(64, NextPowerOf2(AtLeast - 1)); - if (Small) { - // First move the inline buckets into a temporary storage. - AlignedCharArrayUnion TmpStorage; - BucketT *TmpBegin = reinterpret_cast(&TmpStorage); - BucketT *TmpEnd = TmpBegin; + SmallDenseMap Tmp(AtLeast, ExactBucketCount{}); + Tmp.moveFrom(*this); - // Loop over the buckets, moving non-empty, non-tombstones into the - // temporary storage. Have the loop move the TmpEnd forward as it goes. - const KeyT EmptyKey = KeyInfoT::getEmptyKey(); - const KeyT TombstoneKey = KeyInfoT::getTombstoneKey(); - for (BucketT &B : inlineBuckets()) { - if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) && - !KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) { - assert(size_t(TmpEnd - TmpBegin) < InlineBuckets && - "Too many inline buckets!"); - ::new (&TmpEnd->getFirst()) KeyT(std::move(B.getFirst())); - ::new (&TmpEnd->getSecond()) ValueT(std::move(B.getSecond())); - ++TmpEnd; - B.getSecond().~ValueT(); - } - B.getFirst().~KeyT(); - } - - // AtLeast == InlineBuckets can happen if there are many tombstones, - // and grow() is used to remove them. Usually we always switch to the - // large rep here. - allocateBuckets(AtLeast); - this->moveFromOldBuckets(llvm::make_range(TmpBegin, TmpEnd)); - return; + if (Tmp.Small) { + // Use moveFrom in those rare cases where we stay in the small mode. This + // can happen when we have many tombstones. + this->BaseT::initEmpty(); + this->moveFrom(Tmp); + Tmp.Small = false; + Tmp.getLargeRep()->NumBuckets = 0; + } else { + Small = false; + NumTombstones = 0; + *getLargeRep() = std::move(*Tmp.getLargeRep()); + Tmp.getLargeRep()->NumBuckets = 0; } - - LargeRep OldRep = std::move(*getLargeRep()); - getLargeRep()->~LargeRep(); - allocateBuckets(AtLeast); - - this->moveFromOldBuckets(OldRep.buckets()); - - // Free the old table. - deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets, - alignof(BucketT)); } // Plan how to shrink the bucket table. Return: