diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp index bf52bba9ade1eb..b5d26b8c32be1a 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp @@ -33,9 +33,7 @@ struct ChainedOriginDepotNode { bool eq(hash_type hash, const args_type &args) const; - static uptr allocated(); - - static ChainedOriginDepotNode *allocate(const args_type &args); + static uptr allocated() { return 0; } static hash_type hash(const args_type &args); @@ -62,21 +60,12 @@ struct ChainedOriginDepotNode { } // namespace -static PersistentAllocator allocator; - static StackDepotBase depot; bool ChainedOriginDepotNode::eq(hash_type hash, const args_type &args) const { return here_id == args.here_id && prev_id == args.prev_id; } -uptr ChainedOriginDepotNode::allocated() { return allocator.allocated(); } - -ChainedOriginDepotNode *ChainedOriginDepotNode::allocate( - const args_type &args) { - return allocator.alloc(); -} - /* This is murmur2 hash for the 64->32 bit case. It does not behave all that well because the keys have a very biased distribution (I've seen 7-element buckets with the table only 14% full). diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_flat_map.h b/compiler-rt/lib/sanitizer_common/sanitizer_flat_map.h index a2cb0ea71c1948..dcd1bfdf81282e 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_flat_map.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_flat_map.h @@ -79,11 +79,22 @@ class TwoLevelMap { T *p = Get(i); if (!p) continue; - MapUnmapCallback().OnUnmap(reinterpret_cast(p), kSize2 * sizeof(T)); + MapUnmapCallback().OnUnmap(reinterpret_cast(p), MmapSize()); UnmapOrDie(p, kSize2); } } + uptr MemoryUsage() const { + uptr res = 0; + for (uptr i = 0; i < kSize1; i++) { + T *p = Get(i); + if (!p) + continue; + res += MmapSize(); + } + return res; + } + constexpr uptr size() const { return kSize1 * kSize2; } constexpr uptr size1() const { return kSize1; } constexpr uptr size2() const { return kSize2; } @@ -106,6 +117,10 @@ class TwoLevelMap { } private: + constexpr uptr MmapSize() const { + return RoundUpTo(kSize2 * sizeof(T), GetPageSizeCached()); + } + T *Get(uptr idx) const { CHECK_LT(idx, kSize1); return reinterpret_cast( @@ -123,7 +138,7 @@ class TwoLevelMap { SpinMutexLock l(&mu_); T *res = Get(idx); if (!res) { - res = reinterpret_cast(MmapOrDie(kSize2 * sizeof(T), "TwoLevelMap")); + res = reinterpret_cast(MmapOrDie(MmapSize(), "TwoLevelMap")); MapUnmapCallback().OnMap(reinterpret_cast(res), kSize2); atomic_store(&map1_[idx], reinterpret_cast(res), memory_order_release); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp index a067463f2f67d6..a0475220678fab 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp @@ -19,7 +19,6 @@ namespace __sanitizer { -static PersistentAllocator allocator; static PersistentAllocator traceAllocator; struct StackDepotNode { @@ -39,12 +38,7 @@ struct StackDepotNode { bool eq(hash_type hash, const args_type &args) const { return hash == stack_hash; } - static uptr allocated() { - return allocator.allocated() + traceAllocator.allocated(); - } - static StackDepotNode *allocate(const args_type &args) { - return allocator.alloc(); - } + static uptr allocated() { return traceAllocator.allocated(); } static hash_type hash(const args_type &args) { MurMur2Hash64Builder H(args.size * sizeof(uptr)); for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h index 397f19bedb9f6c..4d2c6c6a30a4dd 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h @@ -16,6 +16,7 @@ #include #include "sanitizer_atomic.h" +#include "sanitizer_flat_map.h" #include "sanitizer_internal_defs.h" #include "sanitizer_mutex.h" @@ -23,16 +24,29 @@ namespace __sanitizer { template class StackDepotBase { + static const u32 kIdSizeLog = sizeof(u32) * 8 - kReservedBits; + static const u32 kNodesSize1Log = kIdSizeLog / 2; + static const u32 kNodesSize2Log = kIdSizeLog - kNodesSize1Log; + public: typedef typename Node::args_type args_type; typedef typename Node::handle_type handle_type; typedef typename Node::hash_type hash_type; + + static const u64 kNodesSize1 = 1ull << kNodesSize1Log; + static const u64 kNodesSize2 = 1ull << kNodesSize2Log; + // Maps stack trace to an unique id. handle_type Put(args_type args, bool *inserted = nullptr); // Retrieves a stored stack trace by the id. args_type Get(u32 id); - StackDepotStats GetStats() const { return {n_uniq_ids, Node::allocated()}; } + StackDepotStats GetStats() const { + return { + atomic_load_relaxed(&n_uniq_ids), + nodes.MemoryUsage() + Node::allocated(), + }; + } void LockAll(); void UnlockAll(); @@ -44,17 +58,12 @@ class StackDepotBase { static void unlock(atomic_uintptr_t *p, Node *s); static const int kTabSize = 1 << kTabSizeLog; // Hash table size. - static const int kPartBits = 8; - static const int kPartShift = sizeof(u32) * 8 - kPartBits - kReservedBits; - static const int kPartCount = - 1 << kPartBits; // Number of subparts in the table. - static const int kPartSize = kTabSize / kPartCount; - static const int kMaxId = 1 << kPartShift; - atomic_uintptr_t tab[kTabSize]; // Hash table of Node's. - atomic_uint32_t seq[kPartCount]; // Unique id generators. + atomic_uintptr_t tab[kTabSize]; // Hash table of Node's. - uptr n_uniq_ids; + atomic_uint32_t n_uniq_ids; + + TwoLevelMap nodes; friend class StackDepotReverseMap; }; @@ -120,14 +129,10 @@ StackDepotBase::Put(args_type args, return node->get_handle(); } } - uptr part = (h % kTabSize) / kPartSize; - u32 id = atomic_fetch_add(&seq[part], 1, memory_order_relaxed) + 1; - n_uniq_ids++; - CHECK_LT(id, kMaxId); - id |= part << kPartShift; + u32 id = atomic_fetch_add(&n_uniq_ids, 1, memory_order_relaxed) + 1; CHECK_NE(id, 0); CHECK_EQ(id & (((u32)-1) >> kReservedBits), id); - s = Node::allocate(args); + s = &nodes[id]; s->id = id; s->store(args, h); s->link = s2; @@ -139,25 +144,15 @@ StackDepotBase::Put(args_type args, template typename StackDepotBase::args_type StackDepotBase::Get(u32 id) { - if (id == 0) { + if (id == 0) return args_type(); - } CHECK_EQ(id & (((u32)-1) >> kReservedBits), id); - // High kPartBits contain part id, so we need to scan at most kPartSize lists. - uptr part = id >> kPartShift; - for (int i = 0; i != kPartSize; i++) { - uptr idx = part * kPartSize + i; - CHECK_LT(idx, kTabSize); - atomic_uintptr_t *p = &tab[idx]; - uptr v = atomic_load(p, memory_order_consume); - Node *s = (Node *)(v & ~uptr(1)); - for (; s; s = s->link) { - if (s->id == id) { - return s->load(); - } - } - } - return args_type(); + if (!nodes.contains(id)) + return args_type(); + const Node &node = nodes[id]; + if (node.id != id) + return args_type(); + return node.load(); } template