Skip to content
This repository has been archived by the owner. It is now read-only.
Permalink
Browse files
8244660: Code cache sweeper heuristics is broken
Reviewed-by: thartmann, rehn
  • Loading branch information
Nils Eliasson committed Jun 3, 2020
1 parent 06b49fa commit 99d6bea20d27fed0f29dfc7ac6e76376ccfbc311
@@ -484,7 +484,7 @@ CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
*/
CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) {
// Possibly wakes up the sweeper thread.
NMethodSweeper::notify(code_blob_type);
NMethodSweeper::report_allocation(code_blob_type);
assert_locked_or_safepoint(CodeCache_lock);
assert(size > 0, "Code cache allocation request must be > 0 but is %d", size);
if (size <= 0) {
@@ -885,6 +885,9 @@ JavaThread* CompileBroker::make_thread(jobject thread_handle, CompileQueue* queu


void CompileBroker::init_compiler_sweeper_threads() {
NMethodSweeper::set_sweep_threshold_bytes(static_cast<size_t>(SweeperThreshold * ReservedCodeCacheSize / 100.0));
log_info(codecache, sweep)("Sweeper threshold: " SIZE_FORMAT " bytes", NMethodSweeper::sweep_threshold_bytes());

// Ensure any exceptions lead to vm_exit_during_initialization.
EXCEPTION_MARK;
#if !defined(ZERO)
@@ -484,6 +484,13 @@ void CompilerConfig::ergo_initialize() {
}
}

if (FLAG_IS_DEFAULT(SweeperThreshold)) {
if ((SweeperThreshold * ReservedCodeCacheSize / 100) > (1.2 * M)) {
// Cap default SweeperThreshold value to an equivalent of 1.2 Mb
FLAG_SET_ERGO(SweeperThreshold, (1.2 * M * 100) / ReservedCodeCacheSize);
}
}

if (UseOnStackReplacement && !UseLoopCounter) {
warning("On-stack-replacement requires loop counters; enabling loop counters");
FLAG_SET_DEFAULT(UseLoopCounter, true);
@@ -890,6 +890,7 @@
<Event name="CodeSweeperConfiguration" category="Java Virtual Machine, Code Sweeper" label="Code Sweeper Configuration" thread="false" period="endChunk" startTime="false">
<Field type="boolean" name="sweeperEnabled" label="Code Sweeper Enabled" />
<Field type="boolean" name="flushingEnabled" label="Code Cache Flushing Enabled" />
<Field type="ulong" contentType="bytes" name="sweepThreshold" label="Sweep Threshold" />
</Event>

<Event name="IntFlag" category="Java Virtual Machine, Flag" period="endChunk" label="Int Flag">
@@ -617,6 +617,7 @@ TRACE_REQUEST_FUNC(CodeSweeperConfiguration) {
EventCodeSweeperConfiguration event;
event.set_sweeperEnabled(MethodFlushing);
event.set_flushingEnabled(UseCodeCacheFlushing);
event.set_sweepThreshold(NMethodSweeper::sweep_threshold_bytes());
event.commit();
}

@@ -1692,6 +1692,11 @@ const size_t minimumSymbolTableSize = 1024;
product(bool, UseCodeCacheFlushing, true, \
"Remove cold/old nmethods from the code cache") \
\
product(double, SweeperThreshold, 0.5, \
"Threshold controlling when code cache sweeper is invoked." \
"Value is percentage of ReservedCodeCacheSize.") \
range(0.0, 100.0) \
\
product(uintx, StartAggressiveSweepingAt, 10, \
"Start aggressive sweeping if X[%] of the code cache is free." \
"Segmented code cache: X[%] of the non-profiled heap." \
@@ -64,7 +64,7 @@ class Mutex : public CHeapObj<mtSynchronizer> {
event,
access = event + 1,
tty = access + 2,
special = tty + 2,
special = tty + 3,
suspend_resume = special + 1,
oopstorage = suspend_resume + 2,
leaf = oopstorage + 2,
@@ -61,6 +61,7 @@ Mutex* SymbolArena_lock = NULL;
Monitor* StringDedupQueue_lock = NULL;
Mutex* StringDedupTable_lock = NULL;
Monitor* CodeCache_lock = NULL;
Monitor* CodeSweeper_lock = NULL;
Mutex* MethodData_lock = NULL;
Mutex* TouchedMethodLog_lock = NULL;
Mutex* RetData_lock = NULL;
@@ -232,8 +233,9 @@ void mutex_init() {
def(StringDedupQueue_lock , PaddedMonitor, leaf, true, _safepoint_check_never);
def(StringDedupTable_lock , PaddedMutex , leaf + 1, true, _safepoint_check_never);
}
def(ParGCRareEvent_lock , PaddedMutex , leaf , true, _safepoint_check_always);
def(ParGCRareEvent_lock , PaddedMutex , leaf, true, _safepoint_check_always);
def(CodeCache_lock , PaddedMonitor, special, true, _safepoint_check_never);
def(CodeSweeper_lock , PaddedMonitor, special-2, true, _safepoint_check_never);
def(RawMonitor_lock , PaddedMutex , special, true, _safepoint_check_never);
def(OopMapCacheAlloc_lock , PaddedMutex , leaf, true, _safepoint_check_always); // used for oop_map_cache allocation.

@@ -54,6 +54,7 @@ extern Mutex* SymbolArena_lock; // a lock on the symbol table a
extern Monitor* StringDedupQueue_lock; // a lock on the string deduplication queue
extern Mutex* StringDedupTable_lock; // a lock on the string deduplication table
extern Monitor* CodeCache_lock; // a lock on the CodeCache, rank is special
extern Monitor* CodeSweeper_lock; // a lock used by the sweeper only for wait notify
extern Mutex* MethodData_lock; // a lock on installation of method data
extern Mutex* TouchedMethodLog_lock; // a lock on allocation of LogExecutedMethods info
extern Mutex* RetData_lock; // a lock on installation of RetData inside method data
@@ -109,13 +109,12 @@ void NMethodSweeper::init_sweeper_log() {
CompiledMethodIterator NMethodSweeper::_current(CompiledMethodIterator::all_blobs); // Current compiled method
long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID.
long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache
long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper
long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened
int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache
size_t NMethodSweeper::_sweep_threshold_bytes = 0; // Threshold for when to sweep. Updated after ergonomics

volatile bool NMethodSweeper::_should_sweep = false;// Indicates if we should invoke the sweeper
volatile bool NMethodSweeper::_force_sweep = false;// Indicates if we should force a sweep
volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
volatile bool NMethodSweeper::_should_sweep = false;// Indicates if a normal sweep will be done
volatile bool NMethodSweeper::_force_sweep = false;// Indicates if a forced sweep will be done
volatile size_t NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
// 1) alive -> not_entrant
// 2) not_entrant -> zombie
int NMethodSweeper::_hotness_counter_reset_val = 0;
@@ -188,9 +187,6 @@ CodeBlobClosure* NMethodSweeper::prepare_mark_active_nmethods() {
return NULL;
}

// Increase time so that we can estimate when to invoke the sweeper again.
_time_counter++;

// Check for restart
assert(_current.method() == NULL, "should only happen between sweeper cycles");
assert(wait_for_stack_scanning(), "should only happen between sweeper cycles");
@@ -217,9 +213,6 @@ CodeBlobClosure* NMethodSweeper::prepare_reset_hotness_counters() {
return NULL;
}

// Increase time so that we can estimate when to invoke the sweeper again.
_time_counter++;

// Check for restart
if (_current.method() != NULL) {
if (_current.method()->is_nmethod()) {
@@ -258,42 +251,47 @@ void NMethodSweeper::sweeper_loop() {
while (true) {
{
ThreadBlockInVM tbivm(JavaThread::current());
MonitorLocker waiter(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MonitorLocker waiter(CodeSweeper_lock, Mutex::_no_safepoint_check_flag);
const long wait_time = 60*60*24 * 1000;
timeout = waiter.wait(wait_time);
}
if (!timeout) {
possibly_sweep();
if (!timeout && (_should_sweep || _force_sweep)) {
sweep();
}
}
}

/**
* Wakes up the sweeper thread to possibly sweep.
* Wakes up the sweeper thread to sweep if code cache space runs low
*/
void NMethodSweeper::notify(int code_blob_type) {
void NMethodSweeper::report_allocation(int code_blob_type) {
if (should_start_aggressive_sweep(code_blob_type)) {
MonitorLocker waiter(CodeSweeper_lock, Mutex::_no_safepoint_check_flag);
_should_sweep = true;
CodeSweeper_lock->notify();
}
}

bool NMethodSweeper::should_start_aggressive_sweep(int code_blob_type) {
// Makes sure that we do not invoke the sweeper too often during startup.
double start_threshold = 100.0 / (double)StartAggressiveSweepingAt;
double aggressive_sweep_threshold = MIN2(start_threshold, 1.1);
if (CodeCache::reverse_free_ratio(code_blob_type) >= aggressive_sweep_threshold) {
assert_locked_or_safepoint(CodeCache_lock);
CodeCache_lock->notify();
}
return (CodeCache::reverse_free_ratio(code_blob_type) >= aggressive_sweep_threshold);
}

/**
* Wakes up the sweeper thread and forces a sweep. Blocks until it finished.
*/
void NMethodSweeper::force_sweep() {
ThreadBlockInVM tbivm(JavaThread::current());
MonitorLocker waiter(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MonitorLocker waiter(CodeSweeper_lock, Mutex::_no_safepoint_check_flag);
// Request forced sweep
_force_sweep = true;
while (_force_sweep) {
// Notify sweeper that we want to force a sweep and wait for completion.
// In case a sweep currently takes place we timeout and try again because
// we want to enforce a full sweep.
CodeCache_lock->notify();
CodeSweeper_lock->notify();
waiter.wait(1000);
}
}
@@ -314,87 +312,28 @@ void NMethodSweeper::handle_safepoint_request() {
}
}

/**
* This function invokes the sweeper if at least one of the three conditions is met:
* (1) The code cache is getting full
* (2) There are sufficient state changes in/since the last sweep.
* (3) We have not been sweeping for 'some time'
*/
void NMethodSweeper::possibly_sweep() {
void NMethodSweeper::sweep() {
assert(_should_sweep || _force_sweep, "must have been set");
assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
// If there was no state change while nmethod sweeping, 'should_sweep' will be false.
// This is one of the two places where should_sweep can be set to true. The general
// idea is as follows: If there is enough free space in the code cache, there is no
// need to invoke the sweeper. The following formula (which determines whether to invoke
// the sweeper or not) depends on the assumption that for larger ReservedCodeCacheSizes
// we need less frequent sweeps than for smaller ReservedCodecCacheSizes. Furthermore,
// the formula considers how much space in the code cache is currently used. Here are
// some examples that will (hopefully) help in understanding.
//
// Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since
// the result of the division is 0. This
// keeps the used code cache size small
// (important for embedded Java)
// Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula
// computes: (256 / 16) - 1 = 15
// As a result, we invoke the sweeper after
// 15 invocations of 'mark_active_nmethods.
// Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula
// computes: (256 / 16) - 10 = 6.
if (!_should_sweep) {
const int time_since_last_sweep = _time_counter - _last_sweep;
// ReservedCodeCacheSize has an 'unsigned' type. We need a 'signed' type for max_wait_time,
// since 'time_since_last_sweep' can be larger than 'max_wait_time'. If that happens using
// an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive
// value) that disables the intended periodic sweeps.
const int max_wait_time = ReservedCodeCacheSize / (16 * M);
double wait_until_next_sweep = max_wait_time - time_since_last_sweep -
MAX2(CodeCache::reverse_free_ratio(CodeBlobType::MethodProfiled),
CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled));
assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect");

if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) {
_should_sweep = true;
}
Atomic::store(&_bytes_changed, static_cast<size_t>(0)); // reset regardless of sleep reason
if (_should_sweep) {
MutexLocker mu(CodeSweeper_lock, Mutex::_no_safepoint_check_flag);
_should_sweep = false;
}

// Remember if this was a forced sweep
bool forced = _force_sweep;
do_stack_scanning();

// Force stack scanning if there is only 10% free space in the code cache.
// We force stack scanning only if the non-profiled code heap gets full, since critical
// allocations go to the non-profiled heap and we must be make sure that there is
// enough space.
double free_percent = 1 / CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled) * 100;
if (free_percent <= StartAggressiveSweepingAt || forced || _should_sweep) {
do_stack_scanning();
}

if (_should_sweep || forced) {
init_sweeper_log();
sweep_code_cache();
}
init_sweeper_log();
sweep_code_cache();

// We are done with sweeping the code cache once.
_total_nof_code_cache_sweeps++;
_last_sweep = _time_counter;
// Reset flag; temporarily disables sweeper
_should_sweep = false;
// If there was enough state change, 'possibly_enable_sweeper()'
// sets '_should_sweep' to true
possibly_enable_sweeper();
// Reset _bytes_changed only if there was enough state change. _bytes_changed
// can further increase by calls to 'report_state_change'.
if (_should_sweep) {
_bytes_changed = 0;
}

if (forced) {
if (_force_sweep) {
// Notify requester that forced sweep finished
assert(_force_sweep, "Should be a forced sweep");
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeSweeper_lock, Mutex::_no_safepoint_check_flag);
_force_sweep = false;
CodeCache_lock->notify();
CodeSweeper_lock->notify();
}
}

@@ -537,28 +476,16 @@ void NMethodSweeper::sweep_code_cache() {
}
}

/**
* This function updates the sweeper statistics that keep track of nmethods
* state changes. If there is 'enough' state change, the sweeper is invoked
* as soon as possible. There can be data races on _bytes_changed. The data
* races are benign, since it does not matter if we loose a couple of bytes.
* In the worst case we call the sweeper a little later. Also, we are guaranteed
* to invoke the sweeper if the code cache gets full.
*/
// This function updates the sweeper statistics that keep track of nmethods
// state changes. If there is 'enough' state change, the sweeper is invoked
// as soon as possible. Also, we are guaranteed to invoke the sweeper if
// the code cache gets full.
void NMethodSweeper::report_state_change(nmethod* nm) {
_bytes_changed += nm->total_size();
possibly_enable_sweeper();
}

/**
* Function determines if there was 'enough' state change in the code cache to invoke
* the sweeper again. Currently, we determine 'enough' as more than 1% state change in
* the code cache since the last sweep.
*/
void NMethodSweeper::possibly_enable_sweeper() {
double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100;
if (percent_changed > 1.0) {
Atomic::add(&_bytes_changed, (size_t)nm->total_size());
if (Atomic::load(&_bytes_changed) > _sweep_threshold_bytes) {
MutexLocker mu(CodeSweeper_lock, Mutex::_no_safepoint_check_flag);
_should_sweep = true;
CodeSweeper_lock->notify(); // Wake up sweeper.
}
}

0 comments on commit 99d6bea

Please sign in to comment.