Skip to content

Commit

Permalink
[Reland][ADT][ConcurrentHashTable] adapt ConcurrentHashTable and its …
Browse files Browse the repository at this point in the history
…users to LLVM_ENABLE_THREADS=0 mode.

This patch hides thread specific handling under LLVM_ENABLE_THREADS guard.
It also removes usages of thread_local variables, since it has a weak
support on some platforms. Instead, the patch uses single mutex for locking
allocator. That may be replaced with more effective allocator later.
f.e. D142318

Differential Revision: https://reviews.llvm.org/D147649
  • Loading branch information
avl-llvm committed Apr 12, 2023
1 parent b00fc5a commit 33c4421
Show file tree
Hide file tree
Showing 4 changed files with 59 additions and 36 deletions.
8 changes: 8 additions & 0 deletions llvm/include/llvm/ADT/ConcurrentHashtable.h
Expand Up @@ -175,8 +175,10 @@ class ConcurrentHashTableByPtr {
Bucket &CurBucket = BucketsArray[getBucketIdx(Hash)];
uint32_t ExtHashBits = getExtHashBits(Hash);

#if LLVM_ENABLE_THREADS
// Lock bucket.
CurBucket.Guard.lock();
#endif

HashesPtr BucketHashes = CurBucket.Hashes;
DataPtr BucketEntries = CurBucket.Entries;
Expand All @@ -194,7 +196,9 @@ class ConcurrentHashTableByPtr {
CurBucket.NumberOfEntries++;
RehashBucket(CurBucket);

#if LLVM_ENABLE_THREADS
CurBucket.Guard.unlock();
#endif

return {NewData, true};
}
Expand All @@ -204,7 +208,9 @@ class ConcurrentHashTableByPtr {
KeyDataTy *EntryData = BucketEntries[CurEntryIdx];
if (Info::isEqual(Info::getKey(*EntryData), NewValue)) {
// Already existed entry matched with inserted data is found.
#if LLVM_ENABLE_THREADS
CurBucket.Guard.unlock();
#endif

return {EntryData, false};
}
Expand Down Expand Up @@ -283,8 +289,10 @@ class ConcurrentHashTableByPtr {
// [Size] entries.
DataPtr Entries = nullptr;

#if LLVM_ENABLE_THREADS
// Mutex for this bucket.
std::mutex Guard;
#endif
};

// Reallocate and rehash bucket if this is full enough.
Expand Down
39 changes: 22 additions & 17 deletions llvm/include/llvm/DWARFLinkerParallel/StringPool.h
Expand Up @@ -22,19 +22,26 @@ namespace dwarflinker_parallel {
/// and a string body which is placed right after StringEntry.
using StringEntry = StringMapEntry<DwarfStringPoolEntry *>;

class PerThreadStringAllocator
: public AllocatorBase<PerThreadStringAllocator> {
class StringAllocator : public AllocatorBase<StringAllocator> {
public:
inline LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size,
size_t Alignment) {
return ThreadLocalAllocator.Allocate(Size, Align(Alignment));
#if LLVM_ENABLE_THREADS
std::lock_guard<std::mutex> Guard(AllocatorMutex);
#endif

return Allocator.Allocate(Size, Align(Alignment));
}

// Pull in base class overloads.
using AllocatorBase<PerThreadStringAllocator>::Allocate;
using AllocatorBase<StringAllocator>::Allocate;

private:
static thread_local BumpPtrAllocator ThreadLocalAllocator;
#if LLVM_ENABLE_THREADS
std::mutex AllocatorMutex;
#endif

BumpPtrAllocator Allocator;
};

class StringPoolEntryInfo {
Expand All @@ -56,29 +63,27 @@ class StringPoolEntryInfo {

/// \returns newly created object of KeyDataTy type.
static inline StringEntry *create(const StringRef &Key,
PerThreadStringAllocator &Allocator) {
StringAllocator &Allocator) {
return StringEntry::create(Key, Allocator);
}
};

class StringPool : public ConcurrentHashTableByPtr<StringRef, StringEntry,
PerThreadStringAllocator,
StringPoolEntryInfo> {
class StringPool
: public ConcurrentHashTableByPtr<StringRef, StringEntry, StringAllocator,
StringPoolEntryInfo> {
public:
StringPool()
: ConcurrentHashTableByPtr<StringRef, StringEntry,
PerThreadStringAllocator, StringPoolEntryInfo>(
Allocator) {}
: ConcurrentHashTableByPtr<StringRef, StringEntry, StringAllocator,
StringPoolEntryInfo>(Allocator) {}

StringPool(size_t InitialSize)
: ConcurrentHashTableByPtr<StringRef, StringEntry,
PerThreadStringAllocator, StringPoolEntryInfo>(
Allocator, InitialSize) {}
: ConcurrentHashTableByPtr<StringRef, StringEntry, StringAllocator,
StringPoolEntryInfo>(Allocator, InitialSize) {}

PerThreadStringAllocator &getAllocatorRef() { return Allocator; }
StringAllocator &getAllocatorRef() { return Allocator; }

private:
PerThreadStringAllocator Allocator;
StringAllocator Allocator;
};

} // end of namespace dwarflinker_parallel
Expand Down
3 changes: 0 additions & 3 deletions llvm/lib/DWARFLinkerParallel/StringPool.cpp
Expand Up @@ -7,6 +7,3 @@
//===----------------------------------------------------------------------===//

#include "llvm/DWARFLinkerParallel/StringPool.h"

thread_local llvm::BumpPtrAllocator
llvm::dwarflinker_parallel::PerThreadStringAllocator::ThreadLocalAllocator;
45 changes: 29 additions & 16 deletions llvm/unittests/ADT/ConcurrentHashtableTest.cpp
Expand Up @@ -36,25 +36,38 @@ class String {
std::array<char, 0x20> ExtraData;
};

static thread_local BumpPtrAllocator ThreadLocalAllocator;
class PerThreadAllocator : public AllocatorBase<PerThreadAllocator> {
class SimpleAllocator : public AllocatorBase<SimpleAllocator> {
public:
inline LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size,
size_t Alignment) {
return ThreadLocalAllocator.Allocate(Size, Align(Alignment));
#if LLVM_ENABLE_THREADS
std::lock_guard<std::mutex> Guard(AllocatorMutex);
#endif

return Allocator.Allocate(Size, Align(Alignment));
}
inline size_t getBytesAllocated() const {
return ThreadLocalAllocator.getBytesAllocated();
inline size_t getBytesAllocated() {
#if LLVM_ENABLE_THREADS
std::lock_guard<std::mutex> Guard(AllocatorMutex);
#endif

return Allocator.getBytesAllocated();
}

// Pull in base class overloads.
using AllocatorBase<PerThreadAllocator>::Allocate;
using AllocatorBase<SimpleAllocator>::Allocate;

protected:
#if LLVM_ENABLE_THREADS
std::mutex AllocatorMutex;
#endif
BumpPtrAllocator Allocator;
} Allocator;

TEST(ConcurrentHashTableTest, AddStringEntries) {
ConcurrentHashTableByPtr<
std::string, String, PerThreadAllocator,
ConcurrentHashTableInfoByPtr<std::string, String, PerThreadAllocator>>
std::string, String, SimpleAllocator,
ConcurrentHashTableInfoByPtr<std::string, String, SimpleAllocator>>
HashTable(Allocator, 10);

size_t AllocatedBytesAtStart = Allocator.getBytesAllocated();
Expand Down Expand Up @@ -102,8 +115,8 @@ TEST(ConcurrentHashTableTest, AddStringEntries) {
TEST(ConcurrentHashTableTest, AddStringMultiplueEntries) {
const size_t NumElements = 10000;
ConcurrentHashTableByPtr<
std::string, String, PerThreadAllocator,
ConcurrentHashTableInfoByPtr<std::string, String, PerThreadAllocator>>
std::string, String, SimpleAllocator,
ConcurrentHashTableInfoByPtr<std::string, String, SimpleAllocator>>
HashTable(Allocator);

// Check insertion.
Expand Down Expand Up @@ -147,8 +160,8 @@ TEST(ConcurrentHashTableTest, AddStringMultiplueEntriesWithResize) {
// Number of elements exceeds original size, thus hashtable should be resized.
const size_t NumElements = 20000;
ConcurrentHashTableByPtr<
std::string, String, PerThreadAllocator,
ConcurrentHashTableInfoByPtr<std::string, String, PerThreadAllocator>>
std::string, String, SimpleAllocator,
ConcurrentHashTableInfoByPtr<std::string, String, SimpleAllocator>>
HashTable(Allocator, 100);

// Check insertion.
Expand Down Expand Up @@ -191,8 +204,8 @@ TEST(ConcurrentHashTableTest, AddStringMultiplueEntriesWithResize) {
TEST(ConcurrentHashTableTest, AddStringEntriesParallel) {
const size_t NumElements = 10000;
ConcurrentHashTableByPtr<
std::string, String, PerThreadAllocator,
ConcurrentHashTableInfoByPtr<std::string, String, PerThreadAllocator>>
std::string, String, SimpleAllocator,
ConcurrentHashTableInfoByPtr<std::string, String, SimpleAllocator>>
HashTable(Allocator);

// Check parallel insertion.
Expand Down Expand Up @@ -235,8 +248,8 @@ TEST(ConcurrentHashTableTest, AddStringEntriesParallel) {
TEST(ConcurrentHashTableTest, AddStringEntriesParallelWithResize) {
const size_t NumElements = 20000;
ConcurrentHashTableByPtr<
std::string, String, PerThreadAllocator,
ConcurrentHashTableInfoByPtr<std::string, String, PerThreadAllocator>>
std::string, String, SimpleAllocator,
ConcurrentHashTableInfoByPtr<std::string, String, SimpleAllocator>>
HashTable(Allocator, 100);

// Check parallel insertion.
Expand Down

0 comments on commit 33c4421

Please sign in to comment.