Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8270009: Factor out and shuffle methods in G1CollectedHeap::do_collection_pause_at_safepoint_helper #4744

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
@@ -2570,24 +2570,6 @@ void G1CollectedHeap::gc_prologue(bool full) {
if (full || collector_state()->in_concurrent_start_gc()) {
increment_old_marking_cycles_started();
}

// Fill TLAB's and such
{
Ticks start = Ticks::now();
ensure_parsability(true);
Tickspan dt = Ticks::now() - start;
phase_times()->record_prepare_tlab_time_ms(dt.seconds() * MILLIUNITS);
}

if (!full) {
// Flush dirty card queues to qset, so later phases don't need to account
// for partially filled per-thread queues and such. Not needed for full
// collections, which ignore those logs.
Ticks start = Ticks::now();
G1BarrierSet::dirty_card_queue_set().concatenate_logs();
Tickspan dt = Ticks::now() - start;
phase_times()->record_concatenate_dirty_card_logs_time_ms(dt.seconds() * MILLIUNITS);
}
}

void G1CollectedHeap::gc_epilogue(bool full) {
@@ -2601,10 +2583,6 @@ void G1CollectedHeap::gc_epilogue(bool full) {
assert(DerivedPointerTable::is_empty(), "derived pointer present");
#endif

double start = os::elapsedTime();
resize_all_tlabs();
phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);

// We have just completed a GC. Update the soft reference
// policy with the new heap occupancy
Universe::heap()->update_capacity_and_used_at_gc();
@@ -2798,6 +2776,9 @@ void G1CollectedHeap::start_new_collection_set() {
}

void G1CollectedHeap::calculate_collection_set(G1EvacuationInfo* evacuation_info, double target_pause_time_ms) {
// Forget the current allocation region (we might even choose it to be part
// of the collection set!) before finalizing the collection set.
_allocator->release_mutator_alloc_regions();

_collection_set.finalize_initial_collection_set(target_pause_time_ms, &_survivor);
evacuation_info->set_collectionset_regions(collection_set()->region_length() +
@@ -2988,16 +2969,7 @@ class G1YoungGCJFRTracerMark : public G1JFRTracerMark {
}
};

void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_pause_time_ms) {
GCIdMark gc_id_mark;

SvcGCMarker sgcm(SvcGCMarker::MINOR);
ResourceMark rm;

policy()->note_gc_start();

wait_for_root_region_scanning();

bool G1CollectedHeap::determine_start_concurrent_mark_gc(){
// We should not be doing concurrent start unless the concurrent mark thread is running
if (!_cm_thread->should_terminate()) {
// This call will decide whether this pause is a concurrent start
@@ -3012,24 +2984,66 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
// We also do not allow mixed GCs during marking.
assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");

return collector_state()->in_concurrent_start_gc();
}

void G1CollectedHeap::set_young_collection_default_active_worker_threads(){
uint active_workers = WorkerPolicy::calc_active_workers(workers()->total_workers(),
workers()->active_workers(),
Threads::number_of_non_daemon_threads());
active_workers = workers()->update_active_workers(active_workers);
log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
}

void G1CollectedHeap::prepare_tlabs_for_mutator() {
Ticks start = Ticks::now();

_survivor_evac_stats.adjust_desired_plab_sz();
_old_evac_stats.adjust_desired_plab_sz();

allocate_dummy_regions();

_allocator->init_mutator_alloc_regions();

resize_all_tlabs();

phase_times()->record_resize_tlab_time_ms((Ticks::now() - start).seconds() * 1000.0);
}

void G1CollectedHeap::retire_tlabs() {
ensure_parsability(true);
}

void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_pause_time_ms) {
ResourceMark rm;

IsGCActiveMark active_gc_mark;

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Moving the IsGCActiveMark here expands the scope of is_gc_active() being true to cover areas it didn't used to. I think this is okay, but I didn't check all callers carefully.

Copy link
Contributor Author

@tschatzl tschatzl Jul 12, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

None of the code before that (verification, timing setup, heap printing) should be dependent on that. Actually I was considering moving the mark right after the guarantee in G1CollectedHeap::do_collection_pause_at_safepoint, but then I reconsidered due to the GCLocker check there.

GCIdMark gc_id_mark;
SvcGCMarker sgcm(SvcGCMarker::MINOR);

GCTraceCPUTime tcpu;

// Record whether this pause may need to trigger a concurrent operation. Later,
// when we signal the G1ConcurrentMarkThread, the collector state has already
// been reset for the next pause.
bool should_start_concurrent_mark_operation = collector_state()->in_concurrent_start_gc();
bool should_start_concurrent_mark_operation = determine_start_concurrent_mark_gc();
bool concurrent_operation_is_full_mark = false;

// Inner scope for scope based logging, timers, and stats collection
// Verification may use the gang workers, so they must be set up before.
// Individual parallel phases may override this.
set_young_collection_default_active_worker_threads();

{
GCTraceCPUTime tcpu;
// Do timing/tracing/statistics/pre- and post-logging/verification work not
// directly related to the collection. They should not be accounted for in
// collection work timing.

// The G1YoungGCTraceTime message depends on collector state, so must come after
// determining collector state.
G1YoungGCTraceTime tm(gc_cause());

uint active_workers = WorkerPolicy::calc_active_workers(workers()->total_workers(),
workers()->active_workers(),
Threads::number_of_non_daemon_threads());
active_workers = workers()->update_active_workers(active_workers);
log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());

// Young GC internal timing
policy()->note_gc_start();
// JFR
G1YoungGCJFRTracerMark jtm(_gc_timer_stw, _gc_tracer_stw, gc_cause());
// JStat/MXBeans
@@ -3039,75 +3053,53 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus

G1HeapPrinterMark hpm(this);

{
IsGCActiveMark x;
gc_prologue(false);

G1HeapVerifier::G1VerifyType verify_type = young_collection_verify_type();
verify_before_young_collection(verify_type);
{
// The elapsed time induced by the start time below deliberately elides
// the possible verification above.
tschatzl marked this conversation as resolved.
Show resolved Hide resolved
double sample_start_time_sec = os::elapsedTime();

// Please see comment in g1CollectedHeap.hpp and
// G1CollectedHeap::ref_processing_init() to see how
// reference processing currently works in G1.
_ref_processor_stw->start_discovery(false /* always_clear */);

policy()->record_collection_pause_start(sample_start_time_sec);

// Forget the current allocation region (we might even choose it to be part
// of the collection set!).
_allocator->release_mutator_alloc_regions();

calculate_collection_set(jtm.evacuation_info(), target_pause_time_ms);

G1RedirtyCardsQueueSet rdcqs(G1BarrierSet::dirty_card_queue_set().allocator());
G1ParScanThreadStateSet per_thread_states(this,
&rdcqs,
workers()->active_workers(),
collection_set()->young_region_length(),
collection_set()->optional_region_length());
pre_evacuate_collection_set(jtm.evacuation_info(), &per_thread_states);

bool may_do_optional_evacuation = _collection_set.optional_region_length() != 0;
// Actually do the work...
evacuate_initial_collection_set(&per_thread_states, may_do_optional_evacuation);

if (may_do_optional_evacuation) {
evacuate_optional_collection_set(&per_thread_states);
}
post_evacuate_collection_set(jtm.evacuation_info(), &rdcqs, &per_thread_states);

start_new_collection_set();

_survivor_evac_stats.adjust_desired_plab_sz();
_old_evac_stats.adjust_desired_plab_sz();
// Wait for root region scan here to make sure that it is done before any
// use of the STW work gang to maximize cpu use (i.e. all cores are available
// just to do that).
wait_for_root_region_scanning();

allocate_dummy_regions();
G1HeapVerifier::G1VerifyType verify_type = young_collection_verify_type();
verify_before_young_collection(verify_type);
{
// Actual collection work starts and is executed (only) in this scope.

_allocator->init_mutator_alloc_regions();
// The elapsed time induced by the start time below deliberately elides
// the possible verification above.
double sample_start_time_sec = os::elapsedTime();
policy()->record_collection_pause_start(sample_start_time_sec);

expand_heap_after_young_collection();
calculate_collection_set(jtm.evacuation_info(), target_pause_time_ms);

// Refine the type of a concurrent mark operation now that we did the
// evacuation, eventually aborting it.
concurrent_operation_is_full_mark = policy()->concurrent_operation_is_full_mark("Revise IHOP");
G1RedirtyCardsQueueSet rdcqs(G1BarrierSet::dirty_card_queue_set().allocator());
G1ParScanThreadStateSet per_thread_states(this,
&rdcqs,
workers()->active_workers(),
collection_set()->young_region_length(),
collection_set()->optional_region_length());
pre_evacuate_collection_set(jtm.evacuation_info(), &per_thread_states);

// Need to report the collection pause now since record_collection_pause_end()
// modifies it to the next state.
jtm.report_pause_type(collector_state()->young_gc_pause_type(concurrent_operation_is_full_mark));
bool may_do_optional_evacuation = _collection_set.optional_region_length() != 0;
// Actually do the work...
evacuate_initial_collection_set(&per_thread_states, may_do_optional_evacuation);

double sample_end_time_sec = os::elapsedTime();
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
policy()->record_collection_pause_end(pause_time_ms, concurrent_operation_is_full_mark);
if (may_do_optional_evacuation) {
evacuate_optional_collection_set(&per_thread_states);
}
post_evacuate_collection_set(jtm.evacuation_info(), &rdcqs, &per_thread_states);

// Refine the type of a concurrent mark operation now that we did the
// evacuation, eventually aborting it.
concurrent_operation_is_full_mark = policy()->concurrent_operation_is_full_mark("Revise IHOP");

verify_after_young_collection(verify_type);
// Need to report the collection pause now since record_collection_pause_end()
// modifies it to the next state.
jtm.report_pause_type(collector_state()->young_gc_pause_type(concurrent_operation_is_full_mark));

gc_epilogue(false);
double sample_end_time_sec = os::elapsedTime();
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
policy()->record_collection_pause_end(pause_time_ms, concurrent_operation_is_full_mark);
}
verify_after_young_collection(verify_type);

policy()->print_phases();

@@ -3117,7 +3109,6 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
// It should now be safe to tell the concurrent mark thread to start
// without its logging output interfering with the logging output
// that came from the pause.

if (should_start_concurrent_mark_operation) {
// CAUTION: after the start_concurrent_cycle() call below, the concurrent marking
// thread(s) could be running concurrently with us. Make sure that anything
@@ -3544,11 +3535,33 @@ class G1PrepareEvacuationTask : public AbstractGangTask {
};

void G1CollectedHeap::pre_evacuate_collection_set(G1EvacuationInfo* evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
// Please see comment in g1CollectedHeap.hpp and
// G1CollectedHeap::ref_processing_init() to see how
// reference processing currently works in G1.
_ref_processor_stw->start_discovery(false /* always_clear */);

_bytes_used_during_gc = 0;

_expand_heap_after_alloc_failure = true;
Atomic::store(&_num_regions_failed_evacuation, 0u);

gc_prologue(false);

{
Ticks start = Ticks::now();
retire_tlabs();
phase_times()->record_prepare_tlab_time_ms((Ticks::now() - start).seconds() * 1000.0);
}

{
// Flush dirty card queues to qset, so later phases don't need to account
// for partially filled per-thread queues and such.
Ticks start = Ticks::now();
G1BarrierSet::dirty_card_queue_set().concatenate_logs();
Tickspan dt = Ticks::now() - start;
phase_times()->record_concatenate_dirty_card_logs_time_ms(dt.seconds() * MILLIUNITS);
}

_regions_failed_evacuation.clear();

// Disable the hot card cache.
@@ -3842,6 +3855,14 @@ void G1CollectedHeap::post_evacuate_collection_set(G1EvacuationInfo* evacuation_

evacuation_info->set_collectionset_used_before(collection_set()->bytes_used_before());
evacuation_info->set_bytes_used(_bytes_used_during_gc);

start_new_collection_set();

prepare_tlabs_for_mutator();

gc_epilogue(false);
tschatzl marked this conversation as resolved.
Show resolved Hide resolved

expand_heap_after_young_collection();
}

void G1CollectedHeap::record_obj_copy_mem_stats() {
@@ -800,6 +800,14 @@ class G1CollectedHeap : public CollectedHeap {
// of the incremental collection pause, executed by the vm thread.
void do_collection_pause_at_safepoint_helper(double target_pause_time_ms);

void set_young_collection_default_active_worker_threads();

bool determine_start_concurrent_mark_gc();

void prepare_tlabs_for_mutator();

void retire_tlabs();

G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
@@ -1451,7 +1459,6 @@ class G1CollectedHeap : public CollectedHeap {
void print_regions_on(outputStream* st) const;

public:

virtual void print_on(outputStream* st) const;
virtual void print_extended_on(outputStream* st) const;
virtual void print_on_error(outputStream* st) const;
@@ -174,6 +174,7 @@ void G1FullCollector::prepare_collection() {
_heap->verify_before_full_collection(scope()->is_explicit_gc());

_heap->gc_prologue(true);
_heap->retire_tlabs();
_heap->prepare_heap_for_full_collection();

PrepareRegionsClosure cl(this);
@@ -213,12 +214,12 @@ void G1FullCollector::complete_collection() {

_heap->prepare_heap_for_mutators();

_heap->resize_all_tlabs();

_heap->policy()->record_full_collection_end();
_heap->gc_epilogue(true);

_heap->verify_after_full_collection();

_heap->print_heap_after_full_collection();
}

void G1FullCollector::before_marking_update_attribute_table(HeapRegion* hr) {