Skip to content
Permalink
Browse files
8277990: NMT: Remove NMT shutdown capability
Reviewed-by: stuefe, shade
  • Loading branch information
zhengyu123 committed Dec 7, 2021
1 parent 7217cb7 commit 5a036ace01ebedd88fb3d9100e3a0c19871677ac
Showing 20 changed files with 118 additions and 433 deletions.
@@ -1028,7 +1028,7 @@ static char* mmap_create_shared(size_t size) {
//
static void unmap_shared(char* addr, size_t bytes) {
int res;
if (MemTracker::tracking_level() > NMT_minimal) {
if (MemTracker::enabled()) {
// Note: Tracker contains a ThreadCritical.
Tracker tkr(Tracker::release);
res = ::munmap(addr, bytes);
@@ -1834,7 +1834,7 @@ void PerfMemory::detach(char* addr, size_t bytes) {
return;
}

if (MemTracker::tracking_level() > NMT_minimal) {
if (MemTracker::enabled()) {
// it does not go through os api, the operation has to record from here
Tracker tkr(Tracker::release);
remove_file_mapping(addr);
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -282,7 +282,7 @@ void ZPhysicalMemoryManager::nmt_commit(uintptr_t offset, size_t size) const {
}

void ZPhysicalMemoryManager::nmt_uncommit(uintptr_t offset, size_t size) const {
if (MemTracker::tracking_level() > NMT_minimal) {
if (MemTracker::enabled()) {
const uintptr_t addr = ZAddress::marked0(offset);
Tracker tracker(Tracker::uncommit);
tracker.record((address)addr, size);
@@ -703,37 +703,6 @@ WB_ENTRY(void, WB_NMTReleaseMemory(JNIEnv* env, jobject o, jlong addr, jlong siz
os::release_memory((char *)(uintptr_t)addr, size);
WB_END

WB_ENTRY(jboolean, WB_NMTChangeTrackingLevel(JNIEnv* env))
// Test that we can downgrade NMT levels but not upgrade them.
if (MemTracker::tracking_level() == NMT_off) {
MemTracker::transition_to(NMT_off);
return MemTracker::tracking_level() == NMT_off;
} else {
assert(MemTracker::tracking_level() == NMT_detail, "Should start out as detail tracking");
MemTracker::transition_to(NMT_summary);
assert(MemTracker::tracking_level() == NMT_summary, "Should be summary now");

// Can't go to detail once NMT is set to summary.
MemTracker::transition_to(NMT_detail);
assert(MemTracker::tracking_level() == NMT_summary, "Should still be summary now");

// Shutdown sets tracking level to minimal.
MemTracker::shutdown();
assert(MemTracker::tracking_level() == NMT_minimal, "Should be minimal now");

// Once the tracking level is minimal, we cannot increase to summary.
// The code ignores this request instead of asserting because if the malloc site
// table overflows in another thread, it tries to change the code to summary.
MemTracker::transition_to(NMT_summary);
assert(MemTracker::tracking_level() == NMT_minimal, "Should still be minimal now");

// Really can never go up to detail, verify that the code would never do this.
MemTracker::transition_to(NMT_detail);
assert(MemTracker::tracking_level() == NMT_minimal, "Should still be minimal now");
return MemTracker::tracking_level() == NMT_minimal;
}
WB_END

WB_ENTRY(jint, WB_NMTGetHashSize(JNIEnv* env, jobject o))
int hash_size = MallocSiteTable::hash_buckets();
assert(hash_size > 0, "NMT hash_size should be > 0");
@@ -2567,7 +2536,6 @@ static JNINativeMethod methods[] = {
{CC"NMTCommitMemory", CC"(JJ)V", (void*)&WB_NMTCommitMemory },
{CC"NMTUncommitMemory", CC"(JJ)V", (void*)&WB_NMTUncommitMemory },
{CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory },
{CC"NMTChangeTrackingLevel", CC"()Z", (void*)&WB_NMTChangeTrackingLevel},
{CC"NMTGetHashSize", CC"()I", (void*)&WB_NMTGetHashSize },
{CC"NMTNewArena", CC"(J)J", (void*)&WB_NMTNewArena },
{CC"NMTFreeArena", CC"(J)V", (void*)&WB_NMTFreeArena },
@@ -1786,7 +1786,7 @@ void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,

bool os::uncommit_memory(char* addr, size_t bytes, bool executable) {
bool res;
if (MemTracker::tracking_level() > NMT_minimal) {
if (MemTracker::enabled()) {
Tracker tkr(Tracker::uncommit);
res = pd_uncommit_memory(addr, bytes, executable);
if (res) {
@@ -1800,7 +1800,7 @@ bool os::uncommit_memory(char* addr, size_t bytes, bool executable) {

bool os::release_memory(char* addr, size_t bytes) {
bool res;
if (MemTracker::tracking_level() > NMT_minimal) {
if (MemTracker::enabled()) {
// Note: Tracker contains a ThreadCritical.
Tracker tkr(Tracker::release);
res = pd_release_memory(addr, bytes);
@@ -1869,7 +1869,7 @@ char* os::remap_memory(int fd, const char* file_name, size_t file_offset,

bool os::unmap_memory(char *addr, size_t bytes) {
bool result;
if (MemTracker::tracking_level() > NMT_minimal) {
if (MemTracker::enabled()) {
Tracker tkr(Tracker::release);
result = pd_unmap_memory(addr, bytes);
if (result) {
@@ -1905,7 +1905,7 @@ char* os::reserve_memory_special(size_t size, size_t alignment, size_t page_size

bool os::release_memory_special(char* addr, size_t bytes) {
bool res;
if (MemTracker::tracking_level() > NMT_minimal) {
if (MemTracker::enabled()) {
// Note: Tracker contains a ThreadCritical.
Tracker tkr(Tracker::release);
res = pd_release_memory_special(addr, bytes);
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,12 +33,6 @@ MallocSiteHashtableEntry* MallocSiteTable::_table[MallocSiteTable::table_size];
const NativeCallStack* MallocSiteTable::_hash_entry_allocation_stack = NULL;
const MallocSiteHashtableEntry* MallocSiteTable::_hash_entry_allocation_site = NULL;

// concurrent access counter
volatile int MallocSiteTable::_access_count = 0;

// Tracking hashtable contention
NOT_PRODUCT(int MallocSiteTable::_peak_count = 0;)

/*
* Initialize malloc site table.
* Hashtable entry is malloc'd, so it can cause infinite recursion.
@@ -202,122 +196,81 @@ void MallocSiteTable::delete_linked_list(MallocSiteHashtableEntry* head) {
}
}

void MallocSiteTable::shutdown() {
AccessLock locker(&_access_count);
locker.exclusiveLock();
reset();
}

bool MallocSiteTable::walk_malloc_site(MallocSiteWalker* walker) {
assert(walker != NULL, "NuLL walker");
AccessLock locker(&_access_count);
if (locker.sharedLock()) {
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
return walk(walker);
}
return false;
}


void MallocSiteTable::AccessLock::exclusiveLock() {
int target;
int val;

assert(_lock_state != ExclusiveLock, "Can only call once");
assert(*_lock >= 0, "Can not content exclusive lock");

// make counter negative to block out shared locks
do {
val = *_lock;
target = _MAGIC_ + *_lock;
} while (Atomic::cmpxchg(_lock, val, target) != val);

// wait for all readers to exit
while (*_lock != _MAGIC_) {
#ifdef _WINDOWS
os::naked_short_sleep(1);
#else
os::naked_yield();
#endif
}
_lock_state = ExclusiveLock;
return walk(walker);
}

void MallocSiteTable::print_tuning_statistics(outputStream* st) {

AccessLock locker(&_access_count);
if (locker.sharedLock()) {
// Total number of allocation sites, include empty sites
int total_entries = 0;
// Number of allocation sites that have all memory freed
int empty_entries = 0;
// Number of captured call stack distribution
int stack_depth_distribution[NMT_TrackingStackDepth + 1] = { 0 };
// Chain lengths
int lengths[table_size] = { 0 };

for (int i = 0; i < table_size; i ++) {
int this_chain_length = 0;
const MallocSiteHashtableEntry* head = _table[i];
while (head != NULL) {
total_entries ++;
this_chain_length ++;
if (head->size() == 0) {
empty_entries ++;
}
const int callstack_depth = head->peek()->call_stack()->frames();
assert(callstack_depth >= 0 && callstack_depth <= NMT_TrackingStackDepth,
"Sanity (%d)", callstack_depth);
stack_depth_distribution[callstack_depth] ++;
head = head->next();
}
lengths[i] = this_chain_length;
}

st->print_cr("Malloc allocation site table:");
st->print_cr("\tTotal entries: %d", total_entries);
st->print_cr("\tEmpty entries: %d (%2.2f%%)", empty_entries, ((float)empty_entries * 100) / total_entries);
st->cr();

// We report the hash distribution (chain length distribution) of the n shortest chains
// - under the assumption that this usually contains all lengths. Reporting threshold
// is 20, and the expected avg chain length is 5..6 (see table size).
static const int chain_length_threshold = 20;
int chain_length_distribution[chain_length_threshold] = { 0 };
int over_threshold = 0;
int longest_chain_length = 0;
for (int i = 0; i < table_size; i ++) {
if (lengths[i] >= chain_length_threshold) {
over_threshold ++;
} else {
chain_length_distribution[lengths[i]] ++;
// Total number of allocation sites, include empty sites
int total_entries = 0;
// Number of allocation sites that have all memory freed
int empty_entries = 0;
// Number of captured call stack distribution
int stack_depth_distribution[NMT_TrackingStackDepth + 1] = { 0 };
// Chain lengths
int lengths[table_size] = { 0 };

for (int i = 0; i < table_size; i ++) {
int this_chain_length = 0;
const MallocSiteHashtableEntry* head = _table[i];
while (head != NULL) {
total_entries ++;
this_chain_length ++;
if (head->size() == 0) {
empty_entries ++;
}
longest_chain_length = MAX2(longest_chain_length, lengths[i]);
const int callstack_depth = head->peek()->call_stack()->frames();
assert(callstack_depth >= 0 && callstack_depth <= NMT_TrackingStackDepth,
"Sanity (%d)", callstack_depth);
stack_depth_distribution[callstack_depth] ++;
head = head->next();
}
lengths[i] = this_chain_length;
}

st->print_cr("Hash distribution:");
if (chain_length_distribution[0] == 0) {
st->print_cr("no empty buckets.");
st->print_cr("Malloc allocation site table:");
st->print_cr("\tTotal entries: %d", total_entries);
st->print_cr("\tEmpty entries: %d (%2.2f%%)", empty_entries, ((float)empty_entries * 100) / total_entries);
st->cr();

// We report the hash distribution (chain length distribution) of the n shortest chains
// - under the assumption that this usually contains all lengths. Reporting threshold
// is 20, and the expected avg chain length is 5..6 (see table size).
static const int chain_length_threshold = 20;
int chain_length_distribution[chain_length_threshold] = { 0 };
int over_threshold = 0;
int longest_chain_length = 0;
for (int i = 0; i < table_size; i ++) {
if (lengths[i] >= chain_length_threshold) {
over_threshold ++;
} else {
st->print_cr("%d buckets are empty.", chain_length_distribution[0]);
}
for (int len = 1; len < MIN2(longest_chain_length + 1, chain_length_threshold); len ++) {
st->print_cr("%2d %s: %d.", len, (len == 1 ? " entry" : "entries"), chain_length_distribution[len]);
chain_length_distribution[lengths[i]] ++;
}
if (longest_chain_length >= chain_length_threshold) {
st->print_cr(">=%2d entries: %d.", chain_length_threshold, over_threshold);
}
st->print_cr("most entries: %d.", longest_chain_length);
st->cr();
longest_chain_length = MAX2(longest_chain_length, lengths[i]);
}

st->print_cr("Call stack depth distribution:");
for (int i = 0; i <= NMT_TrackingStackDepth; i ++) {
st->print_cr("\t%d: %d", i, stack_depth_distribution[i]);
}
st->cr();
} // lock
}
st->print_cr("Hash distribution:");
if (chain_length_distribution[0] == 0) {
st->print_cr("no empty buckets.");
} else {
st->print_cr("%d buckets are empty.", chain_length_distribution[0]);
}
for (int len = 1; len < MIN2(longest_chain_length + 1, chain_length_threshold); len ++) {
st->print_cr("%2d %s: %d.", len, (len == 1 ? " entry" : "entries"), chain_length_distribution[len]);
}
if (longest_chain_length >= chain_length_threshold) {
st->print_cr(">=%2d entries: %d.", chain_length_threshold, over_threshold);
}
st->print_cr("most entries: %d.", longest_chain_length);
st->cr();

st->print_cr("Call stack depth distribution:");
for (int i = 0; i <= NMT_TrackingStackDepth; i ++) {
st->print_cr("\t%d: %d", i, stack_depth_distribution[i]);
}
st->cr();
}

bool MallocSiteHashtableEntry::atomic_insert(MallocSiteHashtableEntry* entry) {
return Atomic::replace_if_null(&_next, entry);

1 comment on commit 5a036ac

@openjdk-notifier
Copy link

@openjdk-notifier openjdk-notifier bot commented on 5a036ac Dec 7, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.