Skip to content

Commit

Permalink
8281015: Further simplify NMT backend
Browse files Browse the repository at this point in the history
Reviewed-by: lucy
Backport-of: b96b743727a628c1b33cc9b3374f010c2ea30b78
  • Loading branch information
GoeLin committed Oct 4, 2023
1 parent fa40b5f commit aa54750
Show file tree
Hide file tree
Showing 10 changed files with 129 additions and 219 deletions.
29 changes: 11 additions & 18 deletions src/hotspot/share/runtime/os.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -669,23 +669,19 @@ void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
return NULL;
}

const NMT_TrackingLevel level = MemTracker::tracking_level();
const size_t nmt_overhead =
MemTracker::malloc_header_size(level) + MemTracker::malloc_footer_size(level);

const size_t outer_size = size + nmt_overhead;
const size_t outer_size = size + MemTracker::overhead_per_malloc();

// Check for overflow.
if (outer_size < size) {
return NULL;
}

void* const outer_ptr = (u_char*)::malloc(outer_size);
void* const outer_ptr = ::malloc(outer_size);
if (outer_ptr == NULL) {
return NULL;
}

void* inner_ptr = MemTracker::record_malloc((address)outer_ptr, size, memflags, stack, level);
void* const inner_ptr = MemTracker::record_malloc((address)outer_ptr, size, memflags, stack);

DEBUG_ONLY(::memset(inner_ptr, uninitBlockPad, size);)
DEBUG_ONLY(break_if_ptr_caught(inner_ptr);)
Expand Down Expand Up @@ -724,19 +720,17 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCa
return NULL;
}

const NMT_TrackingLevel level = MemTracker::tracking_level();
const size_t nmt_overhead =
MemTracker::malloc_header_size(level) + MemTracker::malloc_footer_size(level);

const size_t new_outer_size = size + nmt_overhead;
const size_t new_outer_size = size + MemTracker::overhead_per_malloc();

// If NMT is enabled, this checks for heap overwrites, then de-accounts the old block.
void* const old_outer_ptr = MemTracker::record_free(memblock, level);
void* const old_outer_ptr = MemTracker::record_free(memblock);

void* const new_outer_ptr = ::realloc(old_outer_ptr, new_outer_size);
if (new_outer_ptr == NULL) {
return NULL;
}

// If NMT is enabled, this checks for heap overwrites, then de-accounts the old block.
void* const new_inner_ptr = MemTracker::record_malloc(new_outer_ptr, size, memflags, stack, level);
void* const new_inner_ptr = MemTracker::record_malloc(new_outer_ptr, size, memflags, stack);

DEBUG_ONLY(break_if_ptr_caught(new_inner_ptr);)

Expand All @@ -757,10 +751,9 @@ void os::free(void *memblock) {

DEBUG_ONLY(break_if_ptr_caught(memblock);)

const NMT_TrackingLevel level = MemTracker::tracking_level();

// If NMT is enabled, this checks for heap overwrites, then de-accounts the old block.
void* const old_outer_ptr = MemTracker::record_free(memblock, level);
void* const old_outer_ptr = MemTracker::record_free(memblock);

::free(old_outer_ptr);
}

Expand Down
22 changes: 13 additions & 9 deletions src/hotspot/share/services/mallocSiteTable.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -106,13 +106,11 @@ bool MallocSiteTable::walk(MallocSiteWalker* walker) {
* 2. Overflow hash bucket.
* Under any of above circumstances, caller should handle the situation.
*/
MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* bucket_idx,
size_t* pos_idx, MEMFLAGS flags) {
MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, uint32_t* marker, MEMFLAGS flags) {
assert(flags != mtNone, "Should have a real memory type");
const unsigned int hash = key.calculate_hash();
const unsigned int index = hash_to_index(hash);
*bucket_idx = (size_t)index;
*pos_idx = 0;
*marker = 0;

// First entry for this hash bucket
if (_table[index] == NULL) {
Expand All @@ -122,41 +120,47 @@ MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* b

// swap in the head
if (Atomic::replace_if_null(&_table[index], entry)) {
*marker = build_marker(index, 0);
return entry->data();
}

delete entry;
}

unsigned pos_idx = 0;
MallocSiteHashtableEntry* head = _table[index];
while (head != NULL && (*pos_idx) <= MAX_BUCKET_LENGTH) {
while (head != NULL && pos_idx < MAX_BUCKET_LENGTH) {
if (head->hash() == hash) {
MallocSite* site = head->data();
if (site->flag() == flags && site->equals(key)) {
*marker = build_marker(index, pos_idx);
return head->data();
}
}

if (head->next() == NULL && (*pos_idx) < MAX_BUCKET_LENGTH) {
if (head->next() == NULL && pos_idx < (MAX_BUCKET_LENGTH - 1)) {
MallocSiteHashtableEntry* entry = new_entry(key, flags);
// OOM check
if (entry == NULL) return NULL;
if (head->atomic_insert(entry)) {
(*pos_idx) ++;
pos_idx ++;
*marker = build_marker(index, pos_idx);
return entry->data();
}
// contended, other thread won
delete entry;
}
head = (MallocSiteHashtableEntry*)head->next();
(*pos_idx) ++;
pos_idx ++;
}
return NULL;
}

// Access malloc site
MallocSite* MallocSiteTable::malloc_site(size_t bucket_idx, size_t pos_idx) {
MallocSite* MallocSiteTable::malloc_site(uint32_t marker) {
uint16_t bucket_idx = bucket_idx_from_marker(marker);
assert(bucket_idx < table_size, "Invalid bucket index");
const uint16_t pos_idx = pos_idx_from_marker(marker);
MallocSiteHashtableEntry* head = _table[bucket_idx];
for (size_t index = 0;
index < pos_idx && head != NULL;
Expand Down
39 changes: 24 additions & 15 deletions src/hotspot/share/services/mallocSiteTable.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -114,21 +114,31 @@ class MallocSiteTable : AllStatic {
table_size = (table_base_size * NMT_TrackingStackDepth - 1)
};

// The table must not be wider than the maximum value the bucket_idx field
// in the malloc header can hold.
// Table cannot be wider than a 16bit bucket idx can hold
#define MAX_MALLOCSITE_TABLE_SIZE (USHRT_MAX - 1)
// Each bucket chain cannot be longer than what a 16 bit pos idx can hold (hopefully way shorter)
#define MAX_BUCKET_LENGTH (USHRT_MAX - 1)

STATIC_ASSERT(table_size <= MAX_MALLOCSITE_TABLE_SIZE);

static uint32_t build_marker(unsigned bucket_idx, unsigned pos_idx) {
assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE && pos_idx < MAX_BUCKET_LENGTH, "overflow");
return (uint32_t)bucket_idx << 16 | pos_idx;
}
static uint16_t bucket_idx_from_marker(uint32_t marker) { return marker >> 16; }
static uint16_t pos_idx_from_marker(uint32_t marker) { return marker & 0xFFFF; }

public:

static bool initialize();

// Number of hash buckets
static inline int hash_buckets() { return (int)table_size; }

// Access and copy a call stack from this table. Shared lock should be
// acquired before access the entry.
static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx,
size_t pos_idx) {
MallocSite* site = malloc_site(bucket_idx, pos_idx);
static inline bool access_stack(NativeCallStack& stack, uint32_t marker) {
MallocSite* site = malloc_site(marker);
if (site != NULL) {
stack = *site->call_stack();
return true;
Expand All @@ -137,23 +147,22 @@ class MallocSiteTable : AllStatic {
}

// Record a new allocation from specified call path.
// Return true if the allocation is recorded successfully, bucket_idx
// and pos_idx are also updated to indicate the entry where the allocation
// information was recorded.
// Return true if the allocation is recorded successfully and updates marker
// to indicate the entry where the allocation information was recorded.
// Return false only occurs under rare scenarios:
// 1. out of memory
// 2. overflow hash bucket
static inline bool allocation_at(const NativeCallStack& stack, size_t size,
size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) {
MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx, flags);
uint32_t* marker, MEMFLAGS flags) {
MallocSite* site = lookup_or_add(stack, marker, flags);
if (site != NULL) site->allocate(size);
return site != NULL;
}

// Record memory deallocation. bucket_idx and pos_idx indicate where the allocation
// Record memory deallocation. marker indicates where the allocation
// information was recorded.
static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) {
MallocSite* site = malloc_site(bucket_idx, pos_idx);
static inline bool deallocation_at(size_t size, uint32_t marker) {
MallocSite* site = malloc_site(marker);
if (site != NULL) {
site->deallocate(size);
return true;
Expand All @@ -173,8 +182,8 @@ class MallocSiteTable : AllStatic {
// Delete a bucket linked list
static void delete_linked_list(MallocSiteHashtableEntry* head);

static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags);
static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
static MallocSite* lookup_or_add(const NativeCallStack& key, uint32_t* marker, MEMFLAGS flags);
static MallocSite* malloc_site(uint32_t marker);
static bool walk(MallocSiteWalker* walker);

static inline unsigned int hash_to_index(unsigned int hash) {
Expand Down
71 changes: 33 additions & 38 deletions src/hotspot/share/services/mallocTracker.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
#include "runtime/os.hpp"
#include "services/mallocSiteTable.hpp"
#include "services/mallocTracker.hpp"
#include "services/mallocTracker.inline.hpp"
#include "services/memTracker.hpp"
#include "utilities/debug.hpp"
#include "utilities/ostream.hpp"
Expand Down Expand Up @@ -112,20 +111,6 @@ void MallocHeader::mark_block_as_dead() {
set_footer(_footer_canary_dead_mark);
}

void MallocHeader::release() {
assert(MemTracker::enabled(), "Sanity");

check_block_integrity();

MallocMemorySummary::record_free(size(), flags());
MallocMemorySummary::record_free_malloc_header(sizeof(MallocHeader));
if (MemTracker::tracking_level() == NMT_detail) {
MallocSiteTable::deallocation_at(size(), _bucket_idx, _pos_idx);
}

mark_block_as_dead();
}

void MallocHeader::print_block_on_error(outputStream* st, address bad_address) const {
assert(bad_address >= (address)this, "sanity");

Expand Down Expand Up @@ -219,13 +204,8 @@ void MallocHeader::check_block_integrity() const {
#undef PREFIX
}

bool MallocHeader::record_malloc_site(const NativeCallStack& stack, size_t size,
size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) const {
return MallocSiteTable::allocation_at(stack, size, bucket_idx, pos_idx, flags);
}

bool MallocHeader::get_stack(NativeCallStack& stack) const {
return MallocSiteTable::access_stack(stack, _bucket_idx, _pos_idx);
return MallocSiteTable::access_stack(stack, _mst_marker);
}

bool MallocTracker::initialize(NMT_TrackingLevel level) {
Expand All @@ -241,38 +221,53 @@ bool MallocTracker::initialize(NMT_TrackingLevel level) {

// Record a malloc memory allocation
void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
const NativeCallStack& stack, NMT_TrackingLevel level) {
assert(level != NMT_off, "precondition");
void* memblock; // the address for user data
MallocHeader* header = NULL;

if (malloc_base == NULL) {
return NULL;
const NativeCallStack& stack)
{
assert(MemTracker::enabled(), "precondition");
assert(malloc_base != NULL, "precondition");

MallocMemorySummary::record_malloc(size, flags);
MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
uint32_t mst_marker = 0;
if (MemTracker::tracking_level() == NMT_detail) {
MallocSiteTable::allocation_at(stack, size, &mst_marker, flags);
}

// Uses placement global new operator to initialize malloc header

header = ::new (malloc_base)MallocHeader(size, flags, stack, level);
memblock = (void*)((char*)malloc_base + sizeof(MallocHeader));
MallocHeader* const header = ::new (malloc_base)MallocHeader(size, flags, stack, mst_marker);
void* const memblock = (void*)((char*)malloc_base + sizeof(MallocHeader));

// The alignment check: 8 bytes alignment for 32 bit systems.
// 16 bytes alignment for 64-bit systems.
assert(((size_t)memblock & (sizeof(size_t) * 2 - 1)) == 0, "Alignment check");

#ifdef ASSERT
if (level > NMT_off) {
// Read back
assert(get_size(memblock) == size, "Wrong size");
assert(get_flags(memblock) == flags, "Wrong flags");
// Read back
{
MallocHeader* const header2 = malloc_header(memblock);
assert(header2->size() == size, "Wrong size");
assert(header2->flags() == flags, "Wrong flags");
header2->check_block_integrity();
}
#endif

return memblock;
}

void* MallocTracker::record_free(void* memblock) {
assert(MemTracker::tracking_level() != NMT_off && memblock != NULL, "precondition");
MallocHeader* header = malloc_header(memblock);
header->release();
assert(MemTracker::enabled(), "Sanity");
assert(memblock != NULL, "precondition");

MallocHeader* const header = malloc_header(memblock);
header->check_block_integrity();

MallocMemorySummary::record_free(header->size(), header->flags());
MallocMemorySummary::record_free_malloc_header(sizeof(MallocHeader));
if (MemTracker::tracking_level() == NMT_detail) {
MallocSiteTable::deallocation_at(header->size(), header->mst_marker());
}

header->mark_block_as_dead();

return (void*)header;
}
Loading

1 comment on commit aa54750

@openjdk-notifier
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.