Skip to content

Commit

Permalink
8297639: Remove preventive GCs in G1
Browse files Browse the repository at this point in the history
Reviewed-by: ayang, iwalulya
  • Loading branch information
Thomas Schatzl committed Dec 14, 2022
1 parent 7241e35 commit 8ff2928
Show file tree
Hide file tree
Showing 16 changed files with 26 additions and 179 deletions.
9 changes: 4 additions & 5 deletions src/hotspot/share/gc/g1/g1AllocRegion.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,10 @@ class G1AllocRegion : public CHeapObj<mtGC> {
// to allocate a new region even if the max has been reached.
HeapWord* new_alloc_region_and_allocate(size_t word_size, bool force);

// Perform an allocation out of a new allocation region, retiring the current one.
inline HeapWord* attempt_allocation_using_new_region(size_t min_word_size,
size_t desired_word_size,
size_t* actual_word_size);
protected:
// The memory node index this allocation region belongs to.
uint _node_index;
Expand Down Expand Up @@ -172,11 +176,6 @@ class G1AllocRegion : public CHeapObj<mtGC> {
size_t desired_word_size,
size_t* actual_word_size);

// Perform an allocation out of a new allocation region, retiring the current one.
inline HeapWord* attempt_allocation_using_new_region(size_t min_word_size,
size_t desired_word_size,
size_t* actual_word_size);

// Should be called to allocate a new region even if the max of this
// type of regions has been reached. Should only be called if other
// allocation attempts have failed and we are not holding a valid
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/share/gc/g1/g1Allocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -254,8 +254,8 @@ HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
// the memory may have been used up as the threads waited to acquire the lock.
if (!survivor_is_full()) {
result = survivor_gc_alloc_region(node_index)->attempt_allocation_locked(min_word_size,
desired_word_size,
actual_word_size);
desired_word_size,
actual_word_size);
if (result == NULL) {
set_survivor_full();
}
Expand Down
4 changes: 0 additions & 4 deletions src/hotspot/share/gc/g1/g1Allocator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -117,10 +117,6 @@ class G1Allocator : public CHeapObj<mtGC> {
size_t desired_word_size,
size_t* actual_word_size);

// Attempt allocation, retiring the current region and allocating a new one. It is
// assumed that attempt_allocation() has been tried and failed already first.
inline HeapWord* attempt_allocation_using_new_region(size_t word_size);

// This is to be called when holding an appropriate lock. It first tries in the
// current allocation region, and then attempts an allocation using a new region.
inline HeapWord* attempt_allocation_locked(size_t word_size);
Expand Down
10 changes: 0 additions & 10 deletions src/hotspot/share/gc/g1/g1Allocator.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,16 +62,6 @@ inline HeapWord* G1Allocator::attempt_allocation(size_t min_word_size,
return mutator_alloc_region(node_index)->attempt_allocation(min_word_size, desired_word_size, actual_word_size);
}

inline HeapWord* G1Allocator::attempt_allocation_using_new_region(size_t word_size) {
uint node_index = current_node_index();
size_t temp;
HeapWord* result = mutator_alloc_region(node_index)->attempt_allocation_using_new_region(word_size, word_size, &temp);
assert(result != NULL || mutator_alloc_region(node_index)->get() == NULL,
"Must not have a mutator alloc region if there is no memory, but is " PTR_FORMAT,
p2i(mutator_alloc_region(node_index)->get()));
return result;
}

inline HeapWord* G1Allocator::attempt_allocation_locked(size_t word_size) {
uint node_index = current_node_index();
HeapWord* result = mutator_alloc_region(node_index)->attempt_allocation_locked(word_size);
Expand Down
56 changes: 18 additions & 38 deletions src/hotspot/share/gc/g1/g1CollectedHeap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -398,40 +398,28 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
HeapWord* result = NULL;
for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
bool should_try_gc;
bool preventive_collection_required = false;
uint gc_count_before;

{
MutexLocker x(Heap_lock);

// Now that we have the lock, we first retry the allocation in case another
// thread changed the region while we were waiting to acquire the lock.
size_t actual_size;
result = _allocator->attempt_allocation(word_size, word_size, &actual_size);
result = _allocator->attempt_allocation_locked(word_size);
if (result != NULL) {
return result;
}

preventive_collection_required = policy()->preventive_collection_required(1);
if (!preventive_collection_required) {
// We've already attempted a lock-free allocation above, so we don't want to
// do it again. Let's jump straight to replacing the active region.
result = _allocator->attempt_allocation_using_new_region(word_size);
// If the GCLocker is active and we are bound for a GC, try expanding young gen.
// This is different to when only GCLocker::needs_gc() is set: try to avoid
// waiting because the GCLocker is active to not wait too long.
if (GCLocker::is_active_and_needs_gc() && policy()->can_expand_young_list()) {
// No need for an ergo message here, can_expand_young_list() does this when
// it returns true.
result = _allocator->attempt_allocation_force(word_size);
if (result != NULL) {
return result;
}

// If the GCLocker is active and we are bound for a GC, try expanding young gen.
// This is different to when only GCLocker::needs_gc() is set: try to avoid
// waiting because the GCLocker is active to not wait too long.
if (GCLocker::is_active_and_needs_gc() && policy()->can_expand_young_list()) {
// No need for an ergo message here, can_expand_young_list() does this when
// it returns true.
result = _allocator->attempt_allocation_force(word_size);
if (result != NULL) {
return result;
}
}
}

// Only try a GC if the GCLocker does not signal the need for a GC. Wait until
Expand All @@ -443,10 +431,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
}

if (should_try_gc) {
GCCause::Cause gc_cause = preventive_collection_required ? GCCause::_g1_preventive_collection
: GCCause::_g1_inc_collection_pause;
bool succeeded;
result = do_collection_pause(word_size, gc_count_before, &succeeded, gc_cause);
result = do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_inc_collection_pause);
if (result != NULL) {
assert(succeeded, "only way to get back a non-NULL result");
log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
Expand Down Expand Up @@ -860,25 +846,21 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
HeapWord* result = NULL;
for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
bool should_try_gc;
bool preventive_collection_required = false;
uint gc_count_before;


{
MutexLocker x(Heap_lock);

size_t size_in_regions = humongous_obj_size_in_regions(word_size);
preventive_collection_required = policy()->preventive_collection_required((uint)size_in_regions);
if (!preventive_collection_required) {
// Given that humongous objects are not allocated in young
// regions, we'll first try to do the allocation without doing a
// collection hoping that there's enough space in the heap.
result = humongous_obj_allocate(word_size);
if (result != NULL) {
policy()->old_gen_alloc_tracker()->
add_allocated_humongous_bytes_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
return result;
}
// Given that humongous objects are not allocated in young
// regions, we'll first try to do the allocation without doing a
// collection hoping that there's enough space in the heap.
result = humongous_obj_allocate(word_size);
if (result != NULL) {
policy()->old_gen_alloc_tracker()->
add_allocated_humongous_bytes_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
return result;
}

// Only try a GC if the GCLocker does not signal the need for a GC. Wait until
Expand All @@ -890,10 +872,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
}

if (should_try_gc) {
GCCause::Cause gc_cause = preventive_collection_required ? GCCause::_g1_preventive_collection
: GCCause::_g1_humongous_allocation;
bool succeeded;
result = do_collection_pause(word_size, gc_count_before, &succeeded, gc_cause);
result = do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_humongous_allocation);
if (result != NULL) {
assert(succeeded, "only way to get back a non-NULL result");
log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
Expand Down
4 changes: 0 additions & 4 deletions src/hotspot/share/gc/g1/g1CollectionSet.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,10 +100,6 @@ void G1CollectionSet::clear_candidates() {
_candidates = NULL;
}

bool G1CollectionSet::has_candidates() {
return _candidates != NULL && !_candidates->is_empty();
}

// Add the heap region at the head of the non-incremental collection set
void G1CollectionSet::add_old_region(HeapRegion* hr) {
assert_at_safepoint_on_vm_thread();
Expand Down
1 change: 0 additions & 1 deletion src/hotspot/share/gc/g1/g1CollectionSet.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,6 @@ class G1CollectionSet {
void initialize(uint max_region_length);

void clear_candidates();
bool has_candidates();

void set_candidates(G1CollectionSetCandidates* candidates) {
assert(_candidates == NULL, "Trying to replace collection set candidates.");
Expand Down
87 changes: 0 additions & 87 deletions src/hotspot/share/gc/g1/g1Policy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,6 @@ G1Policy::G1Policy(STWGCTimer* gc_timer) :
_reserve_regions(0),
_young_gen_sizer(),
_free_regions_at_end_of_collection(0),
_predicted_surviving_bytes_from_survivor(0),
_predicted_surviving_bytes_from_old(0),
_rs_length(0),
_pending_cards_at_gc_start(0),
_concurrent_start_to_mixed(),
Expand Down Expand Up @@ -583,7 +581,6 @@ void G1Policy::record_full_collection_end() {
// also call this on any additional surv rate groups

_free_regions_at_end_of_collection = _g1h->num_free_regions();
update_survival_estimates_for_next_collection();
_survivor_surv_rate_group->reset();
update_young_length_bounds();

Expand Down Expand Up @@ -898,8 +895,6 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar

_free_regions_at_end_of_collection = _g1h->num_free_regions();

update_survival_estimates_for_next_collection();

// Do not update dynamic IHOP due to G1 periodic collection as it is highly likely
// that in this case we are not running in a "normal" operating mode.
if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
Expand Down Expand Up @@ -1578,88 +1573,6 @@ void G1Policy::calculate_optional_collection_set_regions(G1CollectionSetCandidat
num_optional_regions, max_optional_regions, total_prediction_ms);
}

// Number of regions required to store the given number of bytes, taking
// into account the target amount of wasted space in PLABs.
static size_t get_num_regions_adjust_for_plab_waste(size_t byte_count) {
size_t byte_count_adjusted = byte_count * (size_t)(100 + TargetPLABWastePct) / 100.0;

// Round up the region count
return (byte_count_adjusted + HeapRegion::GrainBytes - 1) / HeapRegion::GrainBytes;
}

bool G1Policy::preventive_collection_required(uint alloc_region_count) {
if (!G1UsePreventiveGC || !Universe::is_fully_initialized()) {
// Don't attempt any preventive GC's if the feature is disabled,
// or before initialization is complete.
return false;
}

if (_g1h->young_regions_count() == 0 && !_collection_set->has_candidates()) {
return false;
}

uint eden_count = _g1h->eden_regions_count();
size_t const eden_surv_bytes_pred = _eden_surv_rate_group->accum_surv_rate_pred(eden_count) * HeapRegion::GrainBytes;
size_t const total_young_predicted_surviving_bytes = eden_surv_bytes_pred + _predicted_surviving_bytes_from_survivor;

uint required_regions = (uint)(get_num_regions_adjust_for_plab_waste(total_young_predicted_surviving_bytes) +
get_num_regions_adjust_for_plab_waste(_predicted_surviving_bytes_from_old));

if (required_regions > _g1h->num_free_or_available_regions() - alloc_region_count) {
log_debug(gc, ergo, cset)("Preventive GC, insufficient free or available regions. "
"Predicted need %u. Curr Eden %u (Pred %u). Curr Survivor %u (Pred %u). Curr Old %u (Pred %u) Free or Avail %u (Free %u) Alloc %u",
required_regions,
eden_count,
(uint)get_num_regions_adjust_for_plab_waste(eden_surv_bytes_pred),
_g1h->survivor_regions_count(),
(uint)get_num_regions_adjust_for_plab_waste(_predicted_surviving_bytes_from_survivor),
_g1h->old_regions_count(),
(uint)get_num_regions_adjust_for_plab_waste(_predicted_surviving_bytes_from_old),
_g1h->num_free_or_available_regions(),
_g1h->num_free_regions(),
alloc_region_count);

return true;
}

return false;
}

void G1Policy::update_survival_estimates_for_next_collection() {
// Predict the number of bytes of surviving objects from survivor and old
// regions and update the associated members.

// Survivor regions
size_t survivor_bytes = 0;
const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions();
for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
it != survivor_regions->end();
++it) {
survivor_bytes += predict_bytes_to_copy(*it);
}

_predicted_surviving_bytes_from_survivor = survivor_bytes;

// Old regions
if (!_collection_set->has_candidates()) {
_predicted_surviving_bytes_from_old = 0;
return;
}

// Use the minimum old gen collection set as conservative estimate for the number
// of regions to take for this calculation.
G1CollectionSetCandidates *candidates = _collection_set->candidates();
uint iterate_count = MIN2(candidates->num_remaining(), calc_min_old_cset_length(candidates));
uint current_index = candidates->cur_idx();
size_t old_bytes = 0;
for (uint i = 0; i < iterate_count; i++) {
HeapRegion *region = candidates->at(current_index + i);
old_bytes += predict_bytes_to_copy(region);
}

_predicted_surviving_bytes_from_old = old_bytes;
}

void G1Policy::transfer_survivors_to_cset(const G1SurvivorRegions* survivors) {
start_adding_survivor_regions();

Expand Down
10 changes: 0 additions & 10 deletions src/hotspot/share/gc/g1/g1Policy.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,11 +101,6 @@ class G1Policy: public CHeapObj<mtGC> {

uint _free_regions_at_end_of_collection;

// These values are predictions of how much we think will survive in each
// section of the heap.
size_t _predicted_surviving_bytes_from_survivor;
size_t _predicted_surviving_bytes_from_old;

size_t _rs_length;

size_t _pending_cards_at_gc_start;
Expand Down Expand Up @@ -360,11 +355,6 @@ class G1Policy: public CHeapObj<mtGC> {
double time_remaining_ms,
uint& num_optional_regions);

// Returns whether a collection should be done proactively, taking into
// account the current number of free regions and the expected survival
// rates in each section of the heap.
bool preventive_collection_required(uint region_count);

private:

// Predict the number of bytes of surviving objects from survivor and old
Expand Down
7 changes: 1 addition & 6 deletions src/hotspot/share/gc/g1/g1VMOperations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -123,15 +123,10 @@ VM_G1CollectForAllocation::VM_G1CollectForAllocation(size_t word_size,
VM_CollectForAllocation(word_size, gc_count_before, gc_cause),
_gc_succeeded(false) {}

bool VM_G1CollectForAllocation::should_try_allocation_before_gc() {
// Don't allocate before a preventive GC.
return _gc_cause != GCCause::_g1_preventive_collection;
}

void VM_G1CollectForAllocation::doit() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();

if (should_try_allocation_before_gc() && _word_size > 0) {
if (_word_size > 0) {
// An allocation has been requested. So, try to do that first.
_result = g1h->attempt_allocation_at_safepoint(_word_size,
false /* expect_null_cur_alloc_region */);
Expand Down
3 changes: 0 additions & 3 deletions src/hotspot/share/gc/g1/g1VMOperations.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,9 +77,6 @@ class VM_G1CollectForAllocation : public VM_CollectForAllocation {
virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; }
virtual void doit();
bool gc_succeeded() const { return _gc_succeeded; }

private:
bool should_try_allocation_before_gc();
};

// Concurrent G1 stop-the-world operations such as remark and cleanup.
Expand Down
3 changes: 0 additions & 3 deletions src/hotspot/share/gc/shared/gcCause.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -99,9 +99,6 @@ const char* GCCause::to_string(GCCause::Cause cause) {
case _g1_periodic_collection:
return "G1 Periodic Collection";

case _g1_preventive_collection:
return "G1 Preventive Collection";

case _dcmd_gc_run:
return "Diagnostic Command";

Expand Down
1 change: 0 additions & 1 deletion src/hotspot/share/gc/shared/gcCause.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,6 @@ class GCCause : public AllStatic {
_g1_compaction_pause,
_g1_humongous_allocation,
_g1_periodic_collection,
_g1_preventive_collection,

_dcmd_gc_run,

Expand Down
1 change: 0 additions & 1 deletion test/hotspot/jtreg/gc/g1/TestEvacuationFailure.java
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ public static void main(String[] args) throws Exception {
"-XX:G1EvacuationFailureALotCount=100",
"-XX:G1EvacuationFailureALotInterval=1",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:-G1UsePreventiveGC",
"-Xlog:gc",
GCTestWithEvacuationFailure.class.getName());

Expand Down
2 changes: 0 additions & 2 deletions test/hotspot/jtreg/gc/g1/TestGCLogMessages.java
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,6 @@ private void testWithEvacuationFailureLogs() throws Exception {
"-XX:G1EvacuationFailureALotCount=100",
"-XX:G1EvacuationFailureALotInterval=1",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:-G1UsePreventiveGC",
"-Xlog:gc+phases=debug",
GCTestWithEvacuationFailure.class.getName());

Expand All @@ -291,7 +290,6 @@ private void testWithEvacuationFailureLogs() throws Exception {
"-Xmn16M",
"-Xms32M",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:-G1UsePreventiveGC",
"-Xlog:gc+phases=trace",
GCTestWithEvacuationFailure.class.getName());

Expand Down
Loading

3 comments on commit 8ff2928

@openjdk-notifier
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JesperIRL
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

/tag jdk-21+2

@openjdk
Copy link

@openjdk openjdk bot commented on 8ff2928 Dec 15, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JesperIRL The tag jdk-21+2 was successfully created.

Please sign in to comment.