Skip to content
This repository has been archived by the owner. It is now read-only.
Permalink
Browse files
8248401: Refactor/unify RMI gc support functionality
Move recent timestamp of most recent whole heap liveness analysis into CollectedHeap, removing the duplicates in all collectors

Reviewed-by: kbarrett, ayang, stefank
  • Loading branch information
Thomas Schatzl committed Aug 7, 2020
1 parent ea873c5 commit e05a51a0091b335c7369746153cd0d3b4600264b
Show file tree
Hide file tree
Showing 25 changed files with 56 additions and 166 deletions.
@@ -129,11 +129,6 @@ class EpsilonHeap : public CollectedHeap {
virtual void prepare_for_verify() {}
virtual void verify(VerifyOption option) {}

virtual jlong millis_since_last_gc() {
// Report time since the VM start
return os::elapsed_counter() / NANOSECS_PER_MILLISEC;
}

MemRegion reserved_region() const { return _reserved; }
bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }

@@ -1417,6 +1417,7 @@ G1CollectedHeap::G1CollectedHeap() :
_young_gen_sampling_thread(NULL),
_workers(NULL),
_card_table(NULL),
_collection_pause_end(Ticks::now()),
_soft_ref_policy(),
_old_set("Old Region Set", new OldRegionSetChecker()),
_archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
@@ -1966,7 +1967,8 @@ void G1CollectedHeap::increment_old_marking_cycles_started() {
_old_marking_cycles_started++;
}

void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent,
bool whole_heap_examined) {
MonitorLocker ml(G1OldGCCount_lock, Mutex::_no_safepoint_check_flag);

// We assume that if concurrent == true, then the caller is a
@@ -1998,6 +2000,10 @@ void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
_old_marking_cycles_started, _old_marking_cycles_completed);

_old_marking_cycles_completed += 1;
if (whole_heap_examined) {
// Signal that we have completed a visit to all live objects.
record_whole_heap_examined_timestamp();
}

// We need to clear the "in_progress" flag in the CM thread before
// we wake up any waiters (especially when ExplicitInvokesConcurrent
@@ -2366,19 +2372,6 @@ size_t G1CollectedHeap::max_reserved_capacity() const {
return _hrm->max_length() * HeapRegion::GrainBytes;
}

jlong G1CollectedHeap::millis_since_last_gc() {
// See the notes in GenCollectedHeap::millis_since_last_gc()
// for more information about the implementation.
jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) -
_policy->collection_pause_end_millis();
if (ret_val < 0) {
log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
". returning zero instead.", ret_val);
return 0;
}
return ret_val;
}

void G1CollectedHeap::deduplicate_string(oop str) {
assert(java_lang_String::is_instance(str), "invariant");

@@ -2641,7 +2634,7 @@ void G1CollectedHeap::gc_epilogue(bool full) {
// Update common counters.
if (full) {
// Update the number of full collections that have been completed.
increment_old_marking_cycles_completed(false /* concurrent */);
increment_old_marking_cycles_completed(false /* concurrent */, true /* liveness_completed */);
}

// We are at the end of the GC. Total collections has already been increased.
@@ -2665,6 +2658,8 @@ void G1CollectedHeap::gc_epilogue(bool full) {

// Print NUMA statistics.
_numa->print_statistics();

_collection_pause_end = Ticks::now();
}

void G1CollectedHeap::verify_numa_regions(const char* desc) {
@@ -159,6 +159,8 @@ class G1CollectedHeap : public CollectedHeap {
WorkGang* _workers;
G1CardTable* _card_table;

Ticks _collection_pause_end;

SoftRefPolicy _soft_ref_policy;

static size_t _humongous_object_threshold_in_words;
@@ -644,7 +646,10 @@ class G1CollectedHeap : public CollectedHeap {
// the G1OldGCCount_lock in case a Java thread is waiting for a full
// GC to happen (e.g., it called System.gc() with
// +ExplicitGCInvokesConcurrent).
void increment_old_marking_cycles_completed(bool concurrent);
// whole_heap_examined should indicate that during that old marking
// cycle the whole heap has been examined for live objects (as opposed
// to only parts, or aborted before completion).
void increment_old_marking_cycles_completed(bool concurrent, bool whole_heap_examined);

uint old_marking_cycles_completed() {
return _old_marking_cycles_completed;
@@ -1288,8 +1293,7 @@ class G1CollectedHeap : public CollectedHeap {
// Return the size of reserved memory. Returns different value than max_capacity() when AllocateOldGenAt is used.
virtual size_t max_reserved_capacity() const;

virtual jlong millis_since_last_gc();

Tickspan time_since_last_collection() const { return Ticks::now() - _collection_pause_end; }

// Convenience function to be used in situations where the heap type can be
// asserted to be this type.
@@ -268,7 +268,8 @@ void G1ConcurrentMarkThread::run_service() {
// called System.gc() with +ExplicitGCInvokesConcurrent).
{
SuspendibleThreadSetJoiner sts_join;
g1h->increment_old_marking_cycles_completed(true /* concurrent */);
g1h->increment_old_marking_cycles_completed(true /* concurrent */,
!_cm->has_aborted() /* liveness_completed */);

_cm->concurrent_cycle_end();
ConcurrentGCBreakpoints::notify_active_to_idle();
@@ -60,7 +60,6 @@ G1Policy::G1Policy(STWGCTimer* gc_timer) :
_ihop_control(create_ihop_control(&_predictor)),
_policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
_full_collection_start_sec(0.0),
_collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
_young_list_target_length(0),
_young_list_fixed_length(0),
_young_list_max_length(0),
@@ -648,8 +647,6 @@ void G1Policy::record_collection_pause_end(double pause_time_ms) {

record_pause(this_pause, end_time_sec - pause_time_ms / 1000.0, end_time_sec);

_collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;

if (is_concurrent_start_pause(this_pause)) {
record_concurrent_mark_init_end(0.0);
} else {
@@ -74,8 +74,6 @@ class G1Policy: public CHeapObj<mtGC> {

double _full_collection_start_sec;

jlong _collection_pause_end_millis;

uint _young_list_target_length;
uint _young_list_fixed_length;

@@ -260,8 +258,6 @@ class G1Policy: public CHeapObj<mtGC> {
// percentage of the current heap capacity.
double reclaimable_bytes_percent(size_t reclaimable_bytes) const;

jlong collection_pause_end_millis() { return _collection_pause_end_millis; }

private:
void clear_collection_set_candidates();
// Sets up marking if proper conditions are met.
@@ -56,14 +56,15 @@ void G1YoungRemSetSamplingThread::sleep_before_next_cycle() {
}

bool G1YoungRemSetSamplingThread::should_start_periodic_gc() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
// If we are currently in a concurrent mark we are going to uncommit memory soon.
if (G1CollectedHeap::heap()->concurrent_mark()->cm_thread()->during_cycle()) {
if (g1h->concurrent_mark()->cm_thread()->during_cycle()) {
log_debug(gc, periodic)("Concurrent cycle in progress. Skipping.");
return false;
}

// Check if enough time has passed since the last GC.
uintx time_since_last_gc = (uintx)Universe::heap()->millis_since_last_gc();
uintx time_since_last_gc = (uintx)g1h->time_since_last_collection().milliseconds();
if ((time_since_last_gc < G1PeriodicGCInterval)) {
log_debug(gc, periodic)("Last GC occurred " UINTX_FORMAT "ms before which is below threshold " UINTX_FORMAT "ms. Skipping.",
time_since_last_gc, G1PeriodicGCInterval);
@@ -559,10 +559,6 @@ bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
return block_start(addr) == addr;
}

jlong ParallelScavengeHeap::millis_since_last_gc() {
return PSParallelCompact::millis_since_last_gc();
}

void ParallelScavengeHeap::prepare_for_verify() {
ensure_parsability(false); // no need to retire TLABs for verification
}
@@ -213,8 +213,6 @@ class ParallelScavengeHeap : public CollectedHeap {
HeapWord* block_start(const void* addr) const;
bool block_is_obj(const HeapWord* addr) const;

jlong millis_since_last_gc();

void prepare_for_verify();
PSHeapSummary create_ps_heap_summary();
virtual void print_on(outputStream* st) const;
@@ -843,7 +843,6 @@ ParallelOldTracer PSParallelCompact::_gc_tracer;
elapsedTimer PSParallelCompact::_accumulated_time;
unsigned int PSParallelCompact::_total_invocations = 0;
unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
jlong PSParallelCompact::_time_of_last_gc = 0;
CollectorCounters* PSParallelCompact::_counters = NULL;
ParMarkBitMap PSParallelCompact::_mark_bitmap;
ParallelCompactData PSParallelCompact::_summary_data;
@@ -1070,8 +1069,8 @@ void PSParallelCompact::post_compact()
heap->gen_mangle_unused_area();
}

// Update time of last GC
reset_millis_since_last_gc();
// Signal that we have completed a visit to all live objects.
Universe::heap()->record_whole_heap_examined_timestamp();
}

HeapWord*
@@ -3192,25 +3191,6 @@ void PSParallelCompact::fill_blocks(size_t region_idx)
}
}

jlong PSParallelCompact::millis_since_last_gc() {
// We need a monotonically non-decreasing time in ms but
// os::javaTimeMillis() does not guarantee monotonicity.
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
jlong ret_val = now - _time_of_last_gc;
// XXX See note in genCollectedHeap::millis_since_last_gc().
if (ret_val < 0) {
NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);)
return 0;
}
return ret_val;
}

void PSParallelCompact::reset_millis_since_last_gc() {
// We need a monotonically non-decreasing time in ms but
// os::javaTimeMillis() does not guarantee monotonicity.
_time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
}

ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
{
if (source() != copy_destination()) {
@@ -1009,7 +1009,6 @@ class PSParallelCompact : AllStatic {
static elapsedTimer _accumulated_time;
static unsigned int _total_invocations;
static unsigned int _maximum_compaction_gc_num;
static jlong _time_of_last_gc; // ms
static CollectorCounters* _counters;
static ParMarkBitMap _mark_bitmap;
static ParallelCompactData _summary_data;
@@ -1123,9 +1122,6 @@ class PSParallelCompact : AllStatic {
static void enqueue_dense_prefix_tasks(TaskQueue& task_queue,
uint parallel_gc_threads);

// Reset time since last full gc
static void reset_millis_since_last_gc();

#ifndef PRODUCT
// Print generic summary data
static void print_generic_summary_data(ParallelCompactData& summary_data,
@@ -1249,9 +1245,6 @@ class PSParallelCompact : AllStatic {
// Return the SpaceId for the given address.
static SpaceId space_id(HeapWord* addr);

// Time since last full gc (in milliseconds).
static jlong millis_since_last_gc();

static void print_on_error(outputStream* st);

#ifndef PRODUCT
@@ -680,12 +680,6 @@ void DefNewGeneration::collect(bool full,
from()->set_concurrent_iteration_safe_limit(from()->top());
to()->set_concurrent_iteration_safe_limit(to()->top());

// We need to use a monotonically non-decreasing time in ms
// or we will see time-warp warnings and os::javaTimeMillis()
// does not guarantee monotonicity.
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
update_time_of_last_gc(now);

heap->trace_heap_after_gc(&gc_tracer);

_gc_timer->register_gc_end();
@@ -137,13 +137,8 @@ void GenMarkSweep::invoke_at_safepoint(ReferenceProcessor* rp, bool clear_all_so
// input to soft ref clearing policy at the next gc.
Universe::update_heap_info_at_gc();

// Update time of last gc for all generations we collected
// (which currently is all the generations in the heap).
// We need to use a monotonically non-decreasing time in ms
// or we will see time-warp warnings and os::javaTimeMillis()
// does not guarantee monotonicity.
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
gch->update_time_of_last_gc(now);
// Signal that we have completed a visit to all live objects.
Universe::heap()->record_whole_heap_examined_timestamp();

gch->trace_heap_after_gc(_gc_tracer);
}
@@ -191,6 +191,7 @@ bool CollectedHeap::is_oop(oop object) const {

CollectedHeap::CollectedHeap() :
_is_gc_active(false),
_last_whole_heap_examined_time_ns(os::javaTimeNanos()),
_total_collections(0),
_total_full_collections(0),
_gc_cause(GCCause::_no_gc),
@@ -488,6 +489,14 @@ void CollectedHeap::resize_all_tlabs() {
}
}

jlong CollectedHeap::millis_since_last_whole_heap_examined() {
return (os::javaTimeNanos() - _last_whole_heap_examined_time_ns) / NANOSECS_PER_MILLISEC;
}

void CollectedHeap::record_whole_heap_examined_timestamp() {
_last_whole_heap_examined_time_ns = os::javaTimeNanos();
}

void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {
assert(timer != NULL, "timer is null");
if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {
@@ -112,6 +112,12 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// Used for filler objects (static, but initialized in ctor).
static size_t _filler_array_max_size;

// Last time the whole heap has been examined in support of RMI
// MaxObjectInspectionAge.
// This timestamp must be monotonically non-decreasing to avoid
// time-warp warnings.
jlong _last_whole_heap_examined_time_ns;

unsigned int _total_collections; // ... started
unsigned int _total_full_collections; // ... started
NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
@@ -404,15 +410,18 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// Keep alive an object that was loaded with AS_NO_KEEPALIVE.
virtual void keep_alive(oop obj) {}

// Returns the longest time (in ms) that has elapsed since the last
// time that any part of the heap was examined by a garbage collection.
virtual jlong millis_since_last_gc() = 0;

// Perform any cleanup actions necessary before allowing a verification.
virtual void prepare_for_verify() = 0;

// Generate any dumps preceding or following a full gc
// Returns the longest time (in ms) that has elapsed since the last
// time that the whole heap has been examined by a garbage collection.
jlong millis_since_last_whole_heap_examined();
// GC should call this when the next whole heap analysis has completed to
// satisfy above requirement.
void record_whole_heap_examined_timestamp();

private:
// Generate any dumps preceding or following a full gc
void full_gc_dump(GCTimer* timer, bool before);

virtual void initialize_serviceability() = 0;
@@ -1354,37 +1354,3 @@ oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
}
return oop(result);
}

class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
jlong _time; // in ms
jlong _now; // in ms

public:
GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }

jlong time() { return _time; }

void do_generation(Generation* gen) {
_time = MIN2(_time, gen->time_of_last_gc(_now));
}
};

jlong GenCollectedHeap::millis_since_last_gc() {
// javaTimeNanos() is guaranteed to be monotonically non-decreasing
// provided the underlying platform provides such a time source
// (and it is bug free). So we still have to guard against getting
// back a time later than 'now'.
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
GenTimeOfLastGCClosure tolgc_cl(now);
// iterate over generations getting the oldest
// time that a generation was collected
generation_iterate(&tolgc_cl, false);

jlong retVal = now - tolgc_cl.time();
if (retVal < 0) {
log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT
". returning zero instead.", retVal);
return 0;
}
return retVal;
}

0 comments on commit e05a51a

Please sign in to comment.