Skip to content
Permalink
Browse files
8219586: CodeHeap State Analytics processes dead nmethods
Reviewed-by: thartmann, eosterlund
  • Loading branch information
RealLucy committed Sep 24, 2020
1 parent 154b8cf commit 4440bda3253c5b552f2c45383d13489dba14b637
@@ -99,6 +99,7 @@ CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& la
// probably wrong for tiered
assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
#endif // COMPILER1
S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields
}

CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
@@ -128,6 +129,7 @@ CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& la
// probably wrong for tiered
assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
#endif // COMPILER1
S390_ONLY(_ctable_offset = 0;) // avoid uninitialized fields
}


Large diffs are not rendered by default.

@@ -47,6 +47,8 @@ class CodeHeapState : public CHeapObj<mtCode> {
// The nMethod_* values correspond to the CompiledMethod enum values.
// We can't use the CompiledMethod values 1:1 because we depend on noType == 0.
nMethod_inconstruction, // under construction. Very soon, the type will transition to "in_use".
// can't be observed while holding Compile_lock and CodeCache_lock simultaneously.
// left in here for completeness (and to document we spent a thought).
nMethod_inuse, // executable. This is the "normal" state for a nmethod.
nMethod_notused, // assumed inactive, marked not entrant. Could be revived if necessary.
nMethod_notentrant, // no new activations allowed, marked for deoptimization. Old activations may still exist.
@@ -95,7 +97,9 @@ class CodeHeapState : public CHeapObj<mtCode> {
static void print_line_delim(outputStream* out, bufferedStream *sst, char* low_bound, unsigned int ix, unsigned int gpl);
static void print_line_delim(outputStream* out, outputStream *sst, char* low_bound, unsigned int ix, unsigned int gpl);
static blobType get_cbType(CodeBlob* cb);
static bool blob_access_is_safe(CodeBlob* this_blob, CodeBlob* prev_blob);
static bool blob_access_is_safe(CodeBlob* this_blob);
static bool nmethod_access_is_safe(nmethod* nm);
static bool holding_required_locks();

public:
static void discard(outputStream* out, CodeHeap* heap);
@@ -164,12 +168,16 @@ struct FreeBlk : public CHeapObj<mtCode> {
// know about those largest blocks.
// All TopSizeBlks of a heap segment are stored in the related TopSizeArray.
struct TopSizeBlk : public CHeapObj<mtCode> {
HeapBlock* start; // address of block
HeapBlock* start; // address of block
const char* blob_name; // name of blob (mostly: name_and_sig of nmethod)
unsigned int len; // length of block, in _segment_size units. Will never overflow int.

unsigned int index; // ordering index, 0 is largest block
// contains array index of next smaller block
// -1 indicates end of list

unsigned int nm_size; // nmeethod total size (if nmethod, 0 otherwise)
int temperature; // nmethod temperature (if nmethod, 0 otherwise)
CompLevel level; // optimization level (see globalDefinitions.hpp)
u2 compiler; // compiler which generated this blob
u2 type; // blob type
@@ -216,7 +224,6 @@ struct CodeHeapStat {
unsigned int nBlocks_t2;
unsigned int nBlocks_alive;
unsigned int nBlocks_dead;
unsigned int nBlocks_inconstr;
unsigned int nBlocks_unloaded;
unsigned int nBlocks_stub;
// FreeBlk data
@@ -69,6 +69,13 @@ CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType ty
}

void CompiledMethod::init_defaults() {
{ // avoid uninitialized fields, even for short time periods
_is_far_code = false;
_scopes_data_begin = NULL;
_deopt_handler_begin = NULL;
_deopt_mh_handler_begin = NULL;
_exception_cache = NULL;
}
_has_unsafe_access = 0;
_has_method_handle_invokes = 0;
_lazy_critical_native = 0;
@@ -692,21 +699,6 @@ bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool cl
return true;
}

// Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found
// to not be inherently safe. There is a chance that fields are seen which are not properly
// initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock
// to be held.
// To bundle knowledge about necessary checks in one place, this function was introduced.
// It is not claimed that these checks are sufficient, but they were found to be necessary.
bool CompiledMethod::nmethod_access_is_safe(nmethod* nm) {
Method* method = (nm == NULL) ? NULL : nm->method(); // nm->method() may be uninitialized, i.e. != NULL, but invalid
return (nm != NULL) && (method != NULL) && (method->signature() != NULL) &&
!nm->is_zombie() && !nm->is_not_installed() &&
os::is_readable_pointer(method) &&
os::is_readable_pointer(method->constants()) &&
os::is_readable_pointer(method->signature());
}

address CompiledMethod::continuation_for_implicit_exception(address pc, bool for_div0_check) {
// Exception happened outside inline-cache check code => we are inside
// an active nmethod => use cpc to determine a return address
@@ -254,8 +254,6 @@ class CompiledMethod : public CodeBlob {
return _mark_for_deoptimization_status != deoptimize_noupdate;
}

static bool nmethod_access_is_safe(nmethod* nm);

// tells whether frames described by this nmethod can be deoptimized
// note: native wrappers cannot be deoptimized.
bool can_be_deoptimized() const { return is_java_method(); }
@@ -2764,42 +2764,59 @@ void CompileBroker::print_heapinfo(outputStream* out, const char* function, size
}

// We hold the CodeHeapStateAnalytics_lock all the time, from here until we leave this function.
// That prevents another thread from destroying our view on the CodeHeap.
// That prevents other threads from destroying (making inconsistent) our view on the CodeHeap.
// When we request individual parts of the analysis via the jcmd interface, it is possible
// that in between another thread (another jcmd user or the vm running into CodeCache OOM)
// updated the aggregated data. That's a tolerable tradeoff because we can't hold a lock
// across user interaction.
// Acquire this lock before acquiring the CodeCache_lock.
// CodeHeapStateAnalytics_lock could be held by a concurrent thread for a long time,
// leading to an unnecessarily long hold time of the CodeCache_lock.
// updated the aggregated data. We will then see a modified, but again consistent, view
// on the CodeHeap. That's a tolerable tradeoff we have to accept because we can't hold
// a lock across user interaction.

// We should definitely acquire this lock before acquiring Compile_lock and CodeCache_lock.
// CodeHeapStateAnalytics_lock may be held by a concurrent thread for a long time,
// leading to an unnecessarily long hold time of the other locks we acquired before.
ts.update(); // record starting point
MutexLocker mu1(CodeHeapStateAnalytics_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu0(CodeHeapStateAnalytics_lock, Mutex::_safepoint_check_flag);
out->print_cr("\n__ CodeHeapStateAnalytics lock wait took %10.3f seconds _________\n", ts.seconds());

// If we serve an "allFun" call, it is beneficial to hold the CodeCache_lock
// for the entire duration of aggregation and printing. That makes sure
// we see a consistent picture and do not run into issues caused by
// the CodeHeap being altered concurrently.
Mutex* global_lock = allFun ? CodeCache_lock : NULL;
Mutex* function_lock = allFun ? NULL : CodeCache_lock;
// Holding the CodeCache_lock protects from concurrent alterations of the CodeCache.
// Unfortunately, such protection is not sufficient:
// When a new nmethod is created via ciEnv::register_method(), the
// Compile_lock is taken first. After some initializations,
// nmethod::new_nmethod() takes over, grabbing the CodeCache_lock
// immediately (after finalizing the oop references). To lock out concurrent
// modifiers, we have to grab both locks as well in the described sequence.
//
// If we serve an "allFun" call, it is beneficial to hold CodeCache_lock and Compile_lock
// for the entire duration of aggregation and printing. That makes sure we see
// a consistent picture and do not run into issues caused by concurrent alterations.
bool should_take_Compile_lock = !SafepointSynchronize::is_at_safepoint() &&
!Compile_lock->owned_by_self();
bool should_take_CodeCache_lock = !SafepointSynchronize::is_at_safepoint() &&
!CodeCache_lock->owned_by_self();
Mutex* global_lock_1 = allFun ? (should_take_Compile_lock ? Compile_lock : NULL) : NULL;
Monitor* global_lock_2 = allFun ? (should_take_CodeCache_lock ? CodeCache_lock : NULL) : NULL;
Mutex* function_lock_1 = allFun ? NULL : (should_take_Compile_lock ? Compile_lock : NULL);
Monitor* function_lock_2 = allFun ? NULL : (should_take_CodeCache_lock ? CodeCache_lock : NULL);
ts_global.update(); // record starting point
MutexLocker mu2(global_lock, Mutex::_no_safepoint_check_flag);
if (global_lock != NULL) {
out->print_cr("\n__ CodeCache (global) lock wait took %10.3f seconds _________\n", ts_global.seconds());
MutexLocker mu1(global_lock_1, Mutex::_safepoint_check_flag);
MutexLocker mu2(global_lock_2, Mutex::_no_safepoint_check_flag);
if ((global_lock_1 != NULL) || (global_lock_2 != NULL)) {
out->print_cr("\n__ Compile & CodeCache (global) lock wait took %10.3f seconds _________\n", ts_global.seconds());
ts_global.update(); // record starting point
}

if (aggregate) {
ts.update(); // record starting point
MutexLocker mu3(function_lock, Mutex::_no_safepoint_check_flag);
if (function_lock != NULL) {
out->print_cr("\n__ CodeCache (function) lock wait took %10.3f seconds _________\n", ts.seconds());
MutexLocker mu11(function_lock_1, Mutex::_safepoint_check_flag);
MutexLocker mu22(function_lock_2, Mutex::_no_safepoint_check_flag);
if ((function_lock_1 != NULL) || (function_lock_1 != NULL)) {
out->print_cr("\n__ Compile & CodeCache (function) lock wait took %10.3f seconds _________\n", ts.seconds());
}

ts.update(); // record starting point
CodeCache::aggregate(out, granularity);
if (function_lock != NULL) {
out->print_cr("\n__ CodeCache (function) lock hold took %10.3f seconds _________\n", ts.seconds());
if ((function_lock_1 != NULL) || (function_lock_1 != NULL)) {
out->print_cr("\n__ Compile & CodeCache (function) lock hold took %10.3f seconds _________\n", ts.seconds());
}
}

@@ -2809,15 +2826,18 @@ void CompileBroker::print_heapinfo(outputStream* out, const char* function, size
if (methodSpace) CodeCache::print_space(out);
if (methodAge) CodeCache::print_age(out);
if (methodNames) {
// print_names() has shown to be sensitive to concurrent CodeHeap modifications.
// Therefore, request the CodeCache_lock before calling...
MutexLocker mu3(function_lock, Mutex::_no_safepoint_check_flag);
CodeCache::print_names(out);
if (allFun) {
// print_names() can only be used safely if the locks have been continuously held
// since aggregation begin. That is true only for function "all".
CodeCache::print_names(out);
} else {
out->print_cr("\nCodeHeapStateAnalytics: Function 'MethodNames' is only available as part of function 'all'");
}
}
if (discard) CodeCache::discard(out);

if (global_lock != NULL) {
out->print_cr("\n__ CodeCache (global) lock hold took %10.3f seconds _________\n", ts_global.seconds());
if ((global_lock_1 != NULL) || (global_lock_2 != NULL)) {
out->print_cr("\n__ Compile & CodeCache (global) lock hold took %10.3f seconds _________\n", ts_global.seconds());
}
out->print_cr("\n__ CodeHeapStateAnalytics total duration %10.3f seconds _________\n", ts_total.seconds());
}
@@ -205,6 +205,7 @@ bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_s
assert(rs.size() >= committed_size, "reserved < committed");
assert(segment_size >= sizeof(FreeBlock), "segment size is too small");
assert(is_power_of_2(segment_size), "segment_size must be a power of 2");
assert_locked_or_safepoint(CodeCache_lock);

_segment_size = segment_size;
_log2_segment_size = exact_log2(segment_size);
@@ -253,6 +254,8 @@ bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_s


bool CodeHeap::expand_by(size_t size) {
assert_locked_or_safepoint(CodeCache_lock);

// expand _memory space
size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size();
if (dm > 0) {
@@ -283,6 +286,7 @@ bool CodeHeap::expand_by(size_t size) {
void* CodeHeap::allocate(size_t instance_size) {
size_t number_of_segments = size_to_segments(instance_size + header_size());
assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
assert_locked_or_safepoint(CodeCache_lock);

// First check if we can satisfy request from freelist
NOT_PRODUCT(verify());
@@ -347,6 +351,8 @@ HeapBlock* CodeHeap::split_block(HeapBlock *b, size_t split_at) {

void CodeHeap::deallocate_tail(void* p, size_t used_size) {
assert(p == find_start(p), "illegal deallocation");
assert_locked_or_safepoint(CodeCache_lock);

// Find start of HeapBlock
HeapBlock* b = (((HeapBlock *)p) - 1);
assert(b->allocated_space() == p, "sanity check");
@@ -363,6 +369,8 @@ void CodeHeap::deallocate_tail(void* p, size_t used_size) {

void CodeHeap::deallocate(void* p) {
assert(p == find_start(p), "illegal deallocation");
assert_locked_or_safepoint(CodeCache_lock);

// Find start of HeapBlock
HeapBlock* b = (((HeapBlock *)p) - 1);
assert(b->allocated_space() == p, "sanity check");
@@ -790,6 +798,7 @@ void CodeHeap::print() {

void CodeHeap::verify() {
if (VerifyCodeCache) {
assert_locked_or_safepoint(CodeCache_lock);
size_t len = 0;
int count = 0;
for(FreeBlock* b = _freelist; b != NULL; b = b->link()) {
@@ -332,7 +332,7 @@ void mutex_init() {
def(UnsafeJlong_lock , PaddedMutex , special, false, _safepoint_check_never);
#endif

def(CodeHeapStateAnalytics_lock , PaddedMutex , leaf, true, _safepoint_check_never);
def(CodeHeapStateAnalytics_lock , PaddedMutex , nonleaf+6, false, _safepoint_check_always);
def(NMethodSweeperStats_lock , PaddedMutex , special, true, _safepoint_check_never);
def(ThreadsSMRDelete_lock , PaddedMonitor, special, true, _safepoint_check_never);
def(ThreadIdTableCreate_lock , PaddedMutex , leaf, false, _safepoint_check_always);

1 comment on commit 4440bda

@bridgekeeper

This comment has been minimized.

Copy link

@bridgekeeper bridgekeeper bot commented on 4440bda Sep 24, 2020

Please sign in to comment.