Skip to content

Commit aa54750

Browse files
committed
8281015: Further simplify NMT backend
Reviewed-by: lucy Backport-of: b96b743727a628c1b33cc9b3374f010c2ea30b78
1 parent fa40b5f commit aa54750

10 files changed

+129
-219
lines changed

Diff for: src/hotspot/share/runtime/os.cpp

+11-18
Original file line numberDiff line numberDiff line change
@@ -669,23 +669,19 @@ void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
669669
return NULL;
670670
}
671671

672-
const NMT_TrackingLevel level = MemTracker::tracking_level();
673-
const size_t nmt_overhead =
674-
MemTracker::malloc_header_size(level) + MemTracker::malloc_footer_size(level);
675-
676-
const size_t outer_size = size + nmt_overhead;
672+
const size_t outer_size = size + MemTracker::overhead_per_malloc();
677673

678674
// Check for overflow.
679675
if (outer_size < size) {
680676
return NULL;
681677
}
682678

683-
void* const outer_ptr = (u_char*)::malloc(outer_size);
679+
void* const outer_ptr = ::malloc(outer_size);
684680
if (outer_ptr == NULL) {
685681
return NULL;
686682
}
687683

688-
void* inner_ptr = MemTracker::record_malloc((address)outer_ptr, size, memflags, stack, level);
684+
void* const inner_ptr = MemTracker::record_malloc((address)outer_ptr, size, memflags, stack);
689685

690686
DEBUG_ONLY(::memset(inner_ptr, uninitBlockPad, size);)
691687
DEBUG_ONLY(break_if_ptr_caught(inner_ptr);)
@@ -724,19 +720,17 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCa
724720
return NULL;
725721
}
726722

727-
const NMT_TrackingLevel level = MemTracker::tracking_level();
728-
const size_t nmt_overhead =
729-
MemTracker::malloc_header_size(level) + MemTracker::malloc_footer_size(level);
730-
731-
const size_t new_outer_size = size + nmt_overhead;
723+
const size_t new_outer_size = size + MemTracker::overhead_per_malloc();
732724

733725
// If NMT is enabled, this checks for heap overwrites, then de-accounts the old block.
734-
void* const old_outer_ptr = MemTracker::record_free(memblock, level);
726+
void* const old_outer_ptr = MemTracker::record_free(memblock);
735727

736728
void* const new_outer_ptr = ::realloc(old_outer_ptr, new_outer_size);
729+
if (new_outer_ptr == NULL) {
730+
return NULL;
731+
}
737732

738-
// If NMT is enabled, this checks for heap overwrites, then de-accounts the old block.
739-
void* const new_inner_ptr = MemTracker::record_malloc(new_outer_ptr, size, memflags, stack, level);
733+
void* const new_inner_ptr = MemTracker::record_malloc(new_outer_ptr, size, memflags, stack);
740734

741735
DEBUG_ONLY(break_if_ptr_caught(new_inner_ptr);)
742736

@@ -757,10 +751,9 @@ void os::free(void *memblock) {
757751

758752
DEBUG_ONLY(break_if_ptr_caught(memblock);)
759753

760-
const NMT_TrackingLevel level = MemTracker::tracking_level();
761-
762754
// If NMT is enabled, this checks for heap overwrites, then de-accounts the old block.
763-
void* const old_outer_ptr = MemTracker::record_free(memblock, level);
755+
void* const old_outer_ptr = MemTracker::record_free(memblock);
756+
764757
::free(old_outer_ptr);
765758
}
766759

Diff for: src/hotspot/share/services/mallocSiteTable.cpp

+13-9
Original file line numberDiff line numberDiff line change
@@ -106,13 +106,11 @@ bool MallocSiteTable::walk(MallocSiteWalker* walker) {
106106
* 2. Overflow hash bucket.
107107
* Under any of above circumstances, caller should handle the situation.
108108
*/
109-
MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx,
110-
size_t* pos_idx, MEMFLAGS flags) {
109+
MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, uint32_t* marker, MEMFLAGS flags) {
111110
assert(flags != mtNone, "Should have a real memory type");
112111
const unsigned int hash = key.calculate_hash();
113112
const unsigned int index = hash_to_index(hash);
114-
*bucket_idx = (size_t)index;
115-
*pos_idx = 0;
113+
*marker = 0;
116114

117115
// First entry for this hash bucket
118116
if (_table[index] == NULL) {
@@ -122,41 +120,47 @@ MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* b
122120

123121
// swap in the head
124122
if (Atomic::replace_if_null(&_table[index], entry)) {
123+
*marker = build_marker(index, 0);
125124
return entry->data();
126125
}
127126

128127
delete entry;
129128
}
130129

130+
unsigned pos_idx = 0;
131131
MallocSiteHashtableEntry* head = _table[index];
132-
while (head != NULL && (*pos_idx) <= MAX_BUCKET_LENGTH) {
132+
while (head != NULL && pos_idx < MAX_BUCKET_LENGTH) {
133133
if (head->hash() == hash) {
134134
MallocSite* site = head->data();
135135
if (site->flag() == flags && site->equals(key)) {
136+
*marker = build_marker(index, pos_idx);
136137
return head->data();
137138
}
138139
}
139140

140-
if (head->next() == NULL && (*pos_idx) < MAX_BUCKET_LENGTH) {
141+
if (head->next() == NULL && pos_idx < (MAX_BUCKET_LENGTH - 1)) {
141142
MallocSiteHashtableEntry* entry = new_entry(key, flags);
142143
// OOM check
143144
if (entry == NULL) return NULL;
144145
if (head->atomic_insert(entry)) {
145-
(*pos_idx) ++;
146+
pos_idx ++;
147+
*marker = build_marker(index, pos_idx);
146148
return entry->data();
147149
}
148150
// contended, other thread won
149151
delete entry;
150152
}
151153
head = (MallocSiteHashtableEntry*)head->next();
152-
(*pos_idx) ++;
154+
pos_idx ++;
153155
}
154156
return NULL;
155157
}
156158

157159
// Access malloc site
158-
MallocSite* MallocSiteTable::malloc_site(size_t bucket_idx, size_t pos_idx) {
160+
MallocSite* MallocSiteTable::malloc_site(uint32_t marker) {
161+
uint16_t bucket_idx = bucket_idx_from_marker(marker);
159162
assert(bucket_idx < table_size, "Invalid bucket index");
163+
const uint16_t pos_idx = pos_idx_from_marker(marker);
160164
MallocSiteHashtableEntry* head = _table[bucket_idx];
161165
for (size_t index = 0;
162166
index < pos_idx && head != NULL;

Diff for: src/hotspot/share/services/mallocSiteTable.hpp

+24-15
Original file line numberDiff line numberDiff line change
@@ -114,21 +114,31 @@ class MallocSiteTable : AllStatic {
114114
table_size = (table_base_size * NMT_TrackingStackDepth - 1)
115115
};
116116

117-
// The table must not be wider than the maximum value the bucket_idx field
118-
// in the malloc header can hold.
117+
// Table cannot be wider than a 16bit bucket idx can hold
118+
#define MAX_MALLOCSITE_TABLE_SIZE (USHRT_MAX - 1)
119+
// Each bucket chain cannot be longer than what a 16 bit pos idx can hold (hopefully way shorter)
120+
#define MAX_BUCKET_LENGTH (USHRT_MAX - 1)
121+
119122
STATIC_ASSERT(table_size <= MAX_MALLOCSITE_TABLE_SIZE);
120123

124+
static uint32_t build_marker(unsigned bucket_idx, unsigned pos_idx) {
125+
assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE && pos_idx < MAX_BUCKET_LENGTH, "overflow");
126+
return (uint32_t)bucket_idx << 16 | pos_idx;
127+
}
128+
static uint16_t bucket_idx_from_marker(uint32_t marker) { return marker >> 16; }
129+
static uint16_t pos_idx_from_marker(uint32_t marker) { return marker & 0xFFFF; }
130+
121131
public:
132+
122133
static bool initialize();
123134

124135
// Number of hash buckets
125136
static inline int hash_buckets() { return (int)table_size; }
126137

127138
// Access and copy a call stack from this table. Shared lock should be
128139
// acquired before access the entry.
129-
static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx,
130-
size_t pos_idx) {
131-
MallocSite* site = malloc_site(bucket_idx, pos_idx);
140+
static inline bool access_stack(NativeCallStack& stack, uint32_t marker) {
141+
MallocSite* site = malloc_site(marker);
132142
if (site != NULL) {
133143
stack = *site->call_stack();
134144
return true;
@@ -137,23 +147,22 @@ class MallocSiteTable : AllStatic {
137147
}
138148

139149
// Record a new allocation from specified call path.
140-
// Return true if the allocation is recorded successfully, bucket_idx
141-
// and pos_idx are also updated to indicate the entry where the allocation
142-
// information was recorded.
150+
// Return true if the allocation is recorded successfully and updates marker
151+
// to indicate the entry where the allocation information was recorded.
143152
// Return false only occurs under rare scenarios:
144153
// 1. out of memory
145154
// 2. overflow hash bucket
146155
static inline bool allocation_at(const NativeCallStack& stack, size_t size,
147-
size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) {
148-
MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx, flags);
156+
uint32_t* marker, MEMFLAGS flags) {
157+
MallocSite* site = lookup_or_add(stack, marker, flags);
149158
if (site != NULL) site->allocate(size);
150159
return site != NULL;
151160
}
152161

153-
// Record memory deallocation. bucket_idx and pos_idx indicate where the allocation
162+
// Record memory deallocation. marker indicates where the allocation
154163
// information was recorded.
155-
static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) {
156-
MallocSite* site = malloc_site(bucket_idx, pos_idx);
164+
static inline bool deallocation_at(size_t size, uint32_t marker) {
165+
MallocSite* site = malloc_site(marker);
157166
if (site != NULL) {
158167
site->deallocate(size);
159168
return true;
@@ -173,8 +182,8 @@ class MallocSiteTable : AllStatic {
173182
// Delete a bucket linked list
174183
static void delete_linked_list(MallocSiteHashtableEntry* head);
175184

176-
static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags);
177-
static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
185+
static MallocSite* lookup_or_add(const NativeCallStack& key, uint32_t* marker, MEMFLAGS flags);
186+
static MallocSite* malloc_site(uint32_t marker);
178187
static bool walk(MallocSiteWalker* walker);
179188

180189
static inline unsigned int hash_to_index(unsigned int hash) {

Diff for: src/hotspot/share/services/mallocTracker.cpp

+33-38
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@
2626
#include "runtime/os.hpp"
2727
#include "services/mallocSiteTable.hpp"
2828
#include "services/mallocTracker.hpp"
29-
#include "services/mallocTracker.inline.hpp"
3029
#include "services/memTracker.hpp"
3130
#include "utilities/debug.hpp"
3231
#include "utilities/ostream.hpp"
@@ -112,20 +111,6 @@ void MallocHeader::mark_block_as_dead() {
112111
set_footer(_footer_canary_dead_mark);
113112
}
114113

115-
void MallocHeader::release() {
116-
assert(MemTracker::enabled(), "Sanity");
117-
118-
check_block_integrity();
119-
120-
MallocMemorySummary::record_free(size(), flags());
121-
MallocMemorySummary::record_free_malloc_header(sizeof(MallocHeader));
122-
if (MemTracker::tracking_level() == NMT_detail) {
123-
MallocSiteTable::deallocation_at(size(), _bucket_idx, _pos_idx);
124-
}
125-
126-
mark_block_as_dead();
127-
}
128-
129114
void MallocHeader::print_block_on_error(outputStream* st, address bad_address) const {
130115
assert(bad_address >= (address)this, "sanity");
131116

@@ -219,13 +204,8 @@ void MallocHeader::check_block_integrity() const {
219204
#undef PREFIX
220205
}
221206

222-
bool MallocHeader::record_malloc_site(const NativeCallStack& stack, size_t size,
223-
size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) const {
224-
return MallocSiteTable::allocation_at(stack, size, bucket_idx, pos_idx, flags);
225-
}
226-
227207
bool MallocHeader::get_stack(NativeCallStack& stack) const {
228-
return MallocSiteTable::access_stack(stack, _bucket_idx, _pos_idx);
208+
return MallocSiteTable::access_stack(stack, _mst_marker);
229209
}
230210

231211
bool MallocTracker::initialize(NMT_TrackingLevel level) {
@@ -241,38 +221,53 @@ bool MallocTracker::initialize(NMT_TrackingLevel level) {
241221

242222
// Record a malloc memory allocation
243223
void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
244-
const NativeCallStack& stack, NMT_TrackingLevel level) {
245-
assert(level != NMT_off, "precondition");
246-
void* memblock; // the address for user data
247-
MallocHeader* header = NULL;
248-
249-
if (malloc_base == NULL) {
250-
return NULL;
224+
const NativeCallStack& stack)
225+
{
226+
assert(MemTracker::enabled(), "precondition");
227+
assert(malloc_base != NULL, "precondition");
228+
229+
MallocMemorySummary::record_malloc(size, flags);
230+
MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
231+
uint32_t mst_marker = 0;
232+
if (MemTracker::tracking_level() == NMT_detail) {
233+
MallocSiteTable::allocation_at(stack, size, &mst_marker, flags);
251234
}
252235

253236
// Uses placement global new operator to initialize malloc header
254-
255-
header = ::new (malloc_base)MallocHeader(size, flags, stack, level);
256-
memblock = (void*)((char*)malloc_base + sizeof(MallocHeader));
237+
MallocHeader* const header = ::new (malloc_base)MallocHeader(size, flags, stack, mst_marker);
238+
void* const memblock = (void*)((char*)malloc_base + sizeof(MallocHeader));
257239

258240
// The alignment check: 8 bytes alignment for 32 bit systems.
259241
// 16 bytes alignment for 64-bit systems.
260242
assert(((size_t)memblock & (sizeof(size_t) * 2 - 1)) == 0, "Alignment check");
261243

262244
#ifdef ASSERT
263-
if (level > NMT_off) {
264-
// Read back
265-
assert(get_size(memblock) == size, "Wrong size");
266-
assert(get_flags(memblock) == flags, "Wrong flags");
245+
// Read back
246+
{
247+
MallocHeader* const header2 = malloc_header(memblock);
248+
assert(header2->size() == size, "Wrong size");
249+
assert(header2->flags() == flags, "Wrong flags");
250+
header2->check_block_integrity();
267251
}
268252
#endif
269253

270254
return memblock;
271255
}
272256

273257
void* MallocTracker::record_free(void* memblock) {
274-
assert(MemTracker::tracking_level() != NMT_off && memblock != NULL, "precondition");
275-
MallocHeader* header = malloc_header(memblock);
276-
header->release();
258+
assert(MemTracker::enabled(), "Sanity");
259+
assert(memblock != NULL, "precondition");
260+
261+
MallocHeader* const header = malloc_header(memblock);
262+
header->check_block_integrity();
263+
264+
MallocMemorySummary::record_free(header->size(), header->flags());
265+
MallocMemorySummary::record_free_malloc_header(sizeof(MallocHeader));
266+
if (MemTracker::tracking_level() == NMT_detail) {
267+
MallocSiteTable::deallocation_at(header->size(), header->mst_marker());
268+
}
269+
270+
header->mark_block_as_dead();
271+
277272
return (void*)header;
278273
}

0 commit comments

Comments
 (0)