Skip to content

Commit c93204c

Browse files
author
Thomas Schatzl
committed
8269914: Factor out heap printing for G1 young and full gc
8270018: Add scoped object for g1 young gc JFR notification Reviewed-by: ayang, iwalulya, kbarrett
1 parent dfd6b2b commit c93204c

File tree

7 files changed

+137
-92
lines changed

7 files changed

+137
-92
lines changed

src/hotspot/share/gc/g1/g1Allocator.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ bool G1Allocator::is_retained_old_region(HeapRegion* hr) {
9191
return _retained_old_gc_alloc_region == hr;
9292
}
9393

94-
void G1Allocator::reuse_retained_old_region(G1EvacuationInfo& evacuation_info,
94+
void G1Allocator::reuse_retained_old_region(G1EvacuationInfo* evacuation_info,
9595
OldGCAllocRegion* old,
9696
HeapRegion** retained_old) {
9797
HeapRegion* retained_region = *retained_old;
@@ -120,11 +120,11 @@ void G1Allocator::reuse_retained_old_region(G1EvacuationInfo& evacuation_info,
120120
_g1h->old_set_remove(retained_region);
121121
old->set(retained_region);
122122
_g1h->hr_printer()->reuse(retained_region);
123-
evacuation_info.set_alloc_regions_used_before(retained_region->used());
123+
evacuation_info->set_alloc_regions_used_before(retained_region->used());
124124
}
125125
}
126126

127-
void G1Allocator::init_gc_alloc_regions(G1EvacuationInfo& evacuation_info) {
127+
void G1Allocator::init_gc_alloc_regions(G1EvacuationInfo* evacuation_info) {
128128
assert_at_safepoint_on_vm_thread();
129129

130130
_survivor_is_full = false;
@@ -140,14 +140,14 @@ void G1Allocator::init_gc_alloc_regions(G1EvacuationInfo& evacuation_info) {
140140
&_retained_old_gc_alloc_region);
141141
}
142142

143-
void G1Allocator::release_gc_alloc_regions(G1EvacuationInfo& evacuation_info) {
143+
void G1Allocator::release_gc_alloc_regions(G1EvacuationInfo* evacuation_info) {
144144
uint survivor_region_count = 0;
145145
for (uint node_index = 0; node_index < _num_alloc_regions; node_index++) {
146146
survivor_region_count += survivor_gc_alloc_region(node_index)->count();
147147
survivor_gc_alloc_region(node_index)->release();
148148
}
149-
evacuation_info.set_allocation_regions(survivor_region_count +
150-
old_gc_alloc_region()->count());
149+
evacuation_info->set_allocation_regions(survivor_region_count +
150+
old_gc_alloc_region()->count());
151151

152152
// If we have an old GC alloc region to release, we'll save it in
153153
// _retained_old_gc_alloc_region. If we don't

src/hotspot/share/gc/g1/g1Allocator.hpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ class G1Allocator : public CHeapObj<mtGC> {
6868
void set_survivor_full();
6969
void set_old_full();
7070

71-
void reuse_retained_old_region(G1EvacuationInfo& evacuation_info,
71+
void reuse_retained_old_region(G1EvacuationInfo* evacuation_info,
7272
OldGCAllocRegion* old,
7373
HeapRegion** retained);
7474

@@ -105,8 +105,8 @@ class G1Allocator : public CHeapObj<mtGC> {
105105
void init_mutator_alloc_regions();
106106
void release_mutator_alloc_regions();
107107

108-
void init_gc_alloc_regions(G1EvacuationInfo& evacuation_info);
109-
void release_gc_alloc_regions(G1EvacuationInfo& evacuation_info);
108+
void init_gc_alloc_regions(G1EvacuationInfo* evacuation_info);
109+
void release_gc_alloc_regions(G1EvacuationInfo* evacuation_info);
110110
void abandon_gc_alloc_regions();
111111
bool is_retained_old_region(HeapRegion* hr);
112112

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Lines changed: 77 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -980,7 +980,11 @@ class PostCompactionPrinterClosure: public HeapRegionClosure {
980980
: _hr_printer(hr_printer) { }
981981
};
982982

983-
void G1CollectedHeap::print_hrm_post_compaction() {
983+
void G1CollectedHeap::print_heap_after_full_collection() {
984+
// Post collection region logging.
985+
// We should do this after we potentially resize the heap so
986+
// that all the COMMIT / UNCOMMIT events are generated before
987+
// the compaction events.
984988
if (_hr_printer.is_active()) {
985989
PostCompactionPrinterClosure cl(hr_printer());
986990
heap_region_iterate(&cl);
@@ -1092,17 +1096,6 @@ void G1CollectedHeap::verify_after_full_collection() {
10921096
_ref_processor_cm->verify_no_references_recorded();
10931097
}
10941098

1095-
void G1CollectedHeap::print_heap_after_full_collection(G1HeapTransition* heap_transition) {
1096-
// Post collection logging.
1097-
// We should do this after we potentially resize the heap so
1098-
// that all the COMMIT / UNCOMMIT events are generated before
1099-
// the compaction events.
1100-
print_hrm_post_compaction();
1101-
heap_transition->print();
1102-
print_heap_after_gc();
1103-
print_heap_regions();
1104-
}
1105-
11061099
bool G1CollectedHeap::do_full_collection(bool explicit_gc,
11071100
bool clear_all_soft_refs,
11081101
bool do_maximum_compaction) {
@@ -2572,9 +2565,6 @@ void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
25722565
void G1CollectedHeap::gc_prologue(bool full) {
25732566
assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
25742567

2575-
// This summary needs to be printed before incrementing total collections.
2576-
rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections());
2577-
25782568
// Update common counters.
25792569
increment_total_collections(full /* full gc */);
25802570
if (full || collector_state()->in_concurrent_start_gc()) {
@@ -2607,9 +2597,6 @@ void G1CollectedHeap::gc_epilogue(bool full) {
26072597
increment_old_marking_cycles_completed(false /* concurrent */, true /* liveness_completed */);
26082598
}
26092599

2610-
// We are at the end of the GC. Total collections has already been increased.
2611-
rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1);
2612-
26132600
#if COMPILER2_OR_JVMCI
26142601
assert(DerivedPointerTable::is_empty(), "derived pointer present");
26152602
#endif
@@ -2622,9 +2609,6 @@ void G1CollectedHeap::gc_epilogue(bool full) {
26222609
// policy with the new heap occupancy
26232610
Universe::heap()->update_capacity_and_used_at_gc();
26242611

2625-
// Print NUMA statistics.
2626-
_numa->print_statistics();
2627-
26282612
_collection_pause_end = Ticks::now();
26292613

26302614
_free_card_set_memory_task->notify_new_stats(&_young_gen_card_set_stats,
@@ -2813,10 +2797,10 @@ void G1CollectedHeap::start_new_collection_set() {
28132797
phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
28142798
}
28152799

2816-
void G1CollectedHeap::calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms) {
2800+
void G1CollectedHeap::calculate_collection_set(G1EvacuationInfo* evacuation_info, double target_pause_time_ms) {
28172801

28182802
_collection_set.finalize_initial_collection_set(target_pause_time_ms, &_survivor);
2819-
evacuation_info.set_collectionset_regions(collection_set()->region_length() +
2803+
evacuation_info->set_collectionset_regions(collection_set()->region_length() +
28202804
collection_set()->optional_region_length());
28212805

28222806
_cm->verify_no_collection_set_oops();
@@ -2946,6 +2930,64 @@ class G1YoungGCTraceTime {
29462930
}
29472931
};
29482932

2933+
G1HeapPrinterMark::G1HeapPrinterMark(G1CollectedHeap* g1h) : _g1h(g1h), _heap_transition(g1h) {
2934+
// This summary needs to be printed before incrementing total collections.
2935+
_g1h->rem_set()->print_periodic_summary_info("Before GC RS summary", _g1h->total_collections());
2936+
_g1h->print_heap_before_gc();
2937+
_g1h->print_heap_regions();
2938+
}
2939+
2940+
G1HeapPrinterMark::~G1HeapPrinterMark() {
2941+
_g1h->policy()->print_age_table();
2942+
_g1h->rem_set()->print_coarsen_stats();
2943+
// We are at the end of the GC. Total collections has already been increased.
2944+
_g1h->rem_set()->print_periodic_summary_info("After GC RS summary", _g1h->total_collections() - 1);
2945+
2946+
_heap_transition.print();
2947+
_g1h->print_heap_regions();
2948+
_g1h->print_heap_after_gc();
2949+
// Print NUMA statistics.
2950+
_g1h->numa()->print_statistics();
2951+
}
2952+
2953+
G1JFRTracerMark::G1JFRTracerMark(STWGCTimer* timer, GCTracer* tracer) :
2954+
_timer(timer), _tracer(tracer) {
2955+
2956+
_timer->register_gc_start();
2957+
_tracer->report_gc_start(G1CollectedHeap::heap()->gc_cause(), _timer->gc_start());
2958+
G1CollectedHeap::heap()->trace_heap_before_gc(_tracer);
2959+
}
2960+
2961+
G1JFRTracerMark::~G1JFRTracerMark() {
2962+
G1CollectedHeap::heap()->trace_heap_after_gc(_tracer);
2963+
_timer->register_gc_end();
2964+
_tracer->report_gc_end(_timer->gc_end(), _timer->time_partitions());
2965+
}
2966+
2967+
class G1YoungGCJFRTracerMark : public G1JFRTracerMark {
2968+
G1EvacuationInfo _evacuation_info;
2969+
2970+
G1NewTracer* tracer() const { return (G1NewTracer*)_tracer; }
2971+
2972+
public:
2973+
2974+
G1EvacuationInfo* evacuation_info() { return &_evacuation_info; }
2975+
2976+
G1YoungGCJFRTracerMark(STWGCTimer* gc_timer_stw, G1NewTracer* gc_tracer_stw, GCCause::Cause cause) :
2977+
G1JFRTracerMark(gc_timer_stw, gc_tracer_stw), _evacuation_info() { }
2978+
2979+
void report_pause_type(G1GCPauseType type) {
2980+
tracer()->report_young_gc_pause(type);
2981+
}
2982+
2983+
~G1YoungGCJFRTracerMark() {
2984+
G1CollectedHeap* g1h = G1CollectedHeap::heap();
2985+
2986+
tracer()->report_evacuation_info(&_evacuation_info);
2987+
tracer()->report_tenuring_threshold(g1h->policy()->tenuring_threshold());
2988+
}
2989+
};
2990+
29492991
void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_pause_time_ms) {
29502992
GCIdMark gc_id_mark;
29512993

@@ -2954,14 +2996,8 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
29542996

29552997
policy()->note_gc_start();
29562998

2957-
gc_tracer_report_gc_start();
2958-
29592999
wait_for_root_region_scanning();
29603000

2961-
print_heap_before_gc();
2962-
print_heap_regions();
2963-
trace_heap_before_gc(_gc_tracer_stw);
2964-
29653001
// We should not be doing concurrent start unless the concurrent mark thread is running
29663002
if (!_cm_thread->should_terminate()) {
29673003
// This call will decide whether this pause is a concurrent start
@@ -2984,8 +3020,6 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
29843020

29853021
// Inner scope for scope based logging, timers, and stats collection
29863022
{
2987-
G1EvacuationInfo evacuation_info;
2988-
29893023
GCTraceCPUTime tcpu;
29903024

29913025
G1YoungGCTraceTime tm(gc_cause());
@@ -2996,12 +3030,14 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
29963030
active_workers = workers()->update_active_workers(active_workers);
29973031
log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
29983032

3033+
// JFR
3034+
G1YoungGCJFRTracerMark jtm(_gc_timer_stw, _gc_tracer_stw, gc_cause());
29993035
// JStat/MXBeans
30003036
G1MonitoringScope ms(g1mm(),
30013037
false /* full_gc */,
30023038
collector_state()->in_mixed_phase() /* all_memory_pools_affected */);
30033039

3004-
G1HeapTransition heap_transition(this);
3040+
G1HeapPrinterMark hpm(this);
30053041

30063042
{
30073043
IsGCActiveMark x;
@@ -3025,15 +3061,15 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
30253061
// of the collection set!).
30263062
_allocator->release_mutator_alloc_regions();
30273063

3028-
calculate_collection_set(evacuation_info, target_pause_time_ms);
3064+
calculate_collection_set(jtm.evacuation_info(), target_pause_time_ms);
30293065

30303066
G1RedirtyCardsQueueSet rdcqs(G1BarrierSet::dirty_card_queue_set().allocator());
30313067
G1ParScanThreadStateSet per_thread_states(this,
30323068
&rdcqs,
30333069
workers()->active_workers(),
30343070
collection_set()->young_region_length(),
30353071
collection_set()->optional_region_length());
3036-
pre_evacuate_collection_set(evacuation_info, &per_thread_states);
3072+
pre_evacuate_collection_set(jtm.evacuation_info(), &per_thread_states);
30373073

30383074
bool may_do_optional_evacuation = _collection_set.optional_region_length() != 0;
30393075
// Actually do the work...
@@ -3042,7 +3078,7 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
30423078
if (may_do_optional_evacuation) {
30433079
evacuate_optional_collection_set(&per_thread_states);
30443080
}
3045-
post_evacuate_collection_set(evacuation_info, &rdcqs, &per_thread_states);
3081+
post_evacuate_collection_set(jtm.evacuation_info(), &rdcqs, &per_thread_states);
30463082

30473083
start_new_collection_set();
30483084

@@ -3061,7 +3097,7 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
30613097

30623098
// Need to report the collection pause now since record_collection_pause_end()
30633099
// modifies it to the next state.
3064-
_gc_tracer_stw->report_young_gc_pause(collector_state()->young_gc_pause_type(concurrent_operation_is_full_mark));
3100+
jtm.report_pause_type(collector_state()->young_gc_pause_type(concurrent_operation_is_full_mark));
30653101

30663102
double sample_end_time_sec = os::elapsedTime();
30673103
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
@@ -3074,16 +3110,9 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
30743110
}
30753111

30763112
policy()->print_phases();
3077-
heap_transition.print();
30783113

30793114
TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
30803115
TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
3081-
3082-
print_heap_after_gc();
3083-
print_heap_regions();
3084-
trace_heap_after_gc(_gc_tracer_stw);
3085-
3086-
gc_tracer_report_gc_end(concurrent_operation_is_full_mark, evacuation_info);
30873116
}
30883117
// It should now be safe to tell the concurrent mark thread to start
30893118
// without its logging output interfering with the logging output
@@ -3514,7 +3543,7 @@ class G1PrepareEvacuationTask : public AbstractGangTask {
35143543
}
35153544
};
35163545

3517-
void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
3546+
void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo* evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
35183547
_bytes_used_during_gc = 0;
35193548

35203549
_expand_heap_after_alloc_failure = true;
@@ -3782,7 +3811,7 @@ void G1CollectedHeap::evacuate_optional_collection_set(G1ParScanThreadStateSet*
37823811
_collection_set.abandon_optional_collection_set(per_thread_states);
37833812
}
37843813

3785-
void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo& evacuation_info,
3814+
void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo* evacuation_info,
37863815
G1RedirtyCardsQueueSet* rdcqs,
37873816
G1ParScanThreadStateSet* per_thread_states) {
37883817
G1GCPhaseTimes* p = phase_times();
@@ -3803,19 +3832,16 @@ void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo& evacuation_
38033832

38043833
post_evacuate_cleanup_1(per_thread_states, rdcqs);
38053834

3806-
post_evacuate_cleanup_2(&_preserved_marks_set, rdcqs, &evacuation_info, per_thread_states->surviving_young_words());
3835+
post_evacuate_cleanup_2(&_preserved_marks_set, rdcqs, evacuation_info, per_thread_states->surviving_young_words());
38073836

38083837
assert_used_and_recalculate_used_equal(this);
38093838

38103839
rebuild_free_region_list();
38113840

38123841
record_obj_copy_mem_stats();
38133842

3814-
evacuation_info.set_collectionset_used_before(collection_set()->bytes_used_before());
3815-
evacuation_info.set_bytes_used(_bytes_used_during_gc);
3816-
3817-
policy()->print_age_table();
3818-
rem_set()->print_coarsen_stats();
3843+
evacuation_info->set_collectionset_used_before(collection_set()->bytes_used_before());
3844+
evacuation_info->set_bytes_used(_bytes_used_during_gc);
38193845
}
38203846

38213847
void G1CollectedHeap::record_obj_copy_mem_stats() {

0 commit comments

Comments
 (0)