Skip to content

Commit

Permalink
8318706: Implement JEP 423: Region Pinning for G1
Browse files Browse the repository at this point in the history
Reviewed-by: ayang, iwalulya, sjohanss
  • Loading branch information
Thomas Schatzl committed Nov 29, 2023
1 parent e44d4b2 commit 38cfb22
Show file tree
Hide file tree
Showing 59 changed files with 1,388 additions and 673 deletions.
184 changes: 40 additions & 144 deletions src/hotspot/share/gc/g1/g1CollectedHeap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,6 @@
#include "gc/shared/gcBehaviours.hpp"
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/gcLocker.inline.hpp"
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/isGCActiveMark.hpp"
Expand Down Expand Up @@ -411,13 +410,11 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
// We should only get here after the first-level allocation attempt
// (attempt_allocation()) failed to allocate.

// We will loop until a) we manage to successfully perform the
// allocation or b) we successfully schedule a collection which
// fails to perform the allocation. b) is the only case when we'll
// return null.
// We will loop until a) we manage to successfully perform the allocation or b)
// successfully schedule a collection which fails to perform the allocation.
// Case b) is the only case when we'll return null.
HeapWord* result = nullptr;
for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
bool should_try_gc;
for (uint try_count = 1; /* we'll return */; try_count++) {
uint gc_count_before;

{
Expand All @@ -430,67 +427,26 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
return result;
}

// If the GCLocker is active and we are bound for a GC, try expanding young gen.
// This is different to when only GCLocker::needs_gc() is set: try to avoid
// waiting because the GCLocker is active to not wait too long.
if (GCLocker::is_active_and_needs_gc() && policy()->can_expand_young_list()) {
// No need for an ergo message here, can_expand_young_list() does this when
// it returns true.
result = _allocator->attempt_allocation_force(word_size);
if (result != nullptr) {
return result;
}
}

// Only try a GC if the GCLocker does not signal the need for a GC. Wait until
// the GCLocker initiated GC has been performed and then retry. This includes
// the case when the GC Locker is not active but has not been performed.
should_try_gc = !GCLocker::needs_gc();
// Read the GC count while still holding the Heap_lock.
gc_count_before = total_collections();
}

if (should_try_gc) {
bool succeeded;
result = do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_inc_collection_pause);
if (result != nullptr) {
assert(succeeded, "only way to get back a non-null result");
log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
Thread::current()->name(), p2i(result));
return result;
}

if (succeeded) {
// We successfully scheduled a collection which failed to allocate. No
// point in trying to allocate further. We'll just return null.
log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
SIZE_FORMAT " words", Thread::current()->name(), word_size);
return nullptr;
}
log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT " words",
Thread::current()->name(), word_size);
} else {
// Failed to schedule a collection.
if (gclocker_retry_count > GCLockerRetryAllocationCount) {
log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
SIZE_FORMAT " words", Thread::current()->name(), word_size);
return nullptr;
}
log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
// The GCLocker is either active or the GCLocker initiated
// GC has not yet been performed. Stall until it is and
// then retry the allocation.
GCLocker::stall_until_clear();
gclocker_retry_count += 1;
bool succeeded;
result = do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_inc_collection_pause);
if (succeeded) {
log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
Thread::current()->name(), p2i(result));
return result;
}

// We can reach here if we were unsuccessful in scheduling a
// collection (because another thread beat us to it) or if we were
// stalled due to the GC locker. In either can we should retry the
// allocation attempt in case another thread successfully
// performed a collection and reclaimed enough space. We do the
// first attempt (without holding the Heap_lock) here and the
// follow-on attempt will be at the start of the next loop
log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT " words",
Thread::current()->name(), word_size);

// We can reach here if we were unsuccessful in scheduling a collection (because
// another thread beat us to it). In this case immeditealy retry the allocation
// attempt because another thread successfully performed a collection and possibly
// reclaimed enough space. The first attempt (without holding the Heap_lock) is
// here and the follow-on attempt will be at the start of the next loop
// iteration (after taking the Heap_lock).
size_t dummy = 0;
result = _allocator->attempt_allocation(word_size, word_size, &dummy);
Expand Down Expand Up @@ -673,13 +629,11 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
collect(GCCause::_g1_humongous_allocation);
}

// We will loop until a) we manage to successfully perform the
// allocation or b) we successfully schedule a collection which
// fails to perform the allocation. b) is the only case when we'll
// return null.
// We will loop until a) we manage to successfully perform the allocation or b)
// successfully schedule a collection which fails to perform the allocation.
// Case b) is the only case when we'll return null.
HeapWord* result = nullptr;
for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
bool should_try_gc;
for (uint try_count = 1; /* we'll return */; try_count++) {
uint gc_count_before;


Expand All @@ -697,64 +651,35 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
return result;
}

// Only try a GC if the GCLocker does not signal the need for a GC. Wait until
// the GCLocker initiated GC has been performed and then retry. This includes
// the case when the GC Locker is not active but has not been performed.
should_try_gc = !GCLocker::needs_gc();
// Read the GC count while still holding the Heap_lock.
gc_count_before = total_collections();
}

if (should_try_gc) {
bool succeeded;
result = do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_humongous_allocation);
bool succeeded;
result = do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_humongous_allocation);
if (succeeded) {
log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
Thread::current()->name(), p2i(result));
if (result != nullptr) {
assert(succeeded, "only way to get back a non-null result");
log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
Thread::current()->name(), p2i(result));
size_t size_in_regions = humongous_obj_size_in_regions(word_size);
policy()->old_gen_alloc_tracker()->
record_collection_pause_humongous_allocation(size_in_regions * HeapRegion::GrainBytes);
return result;
}

if (succeeded) {
// We successfully scheduled a collection which failed to allocate. No
// point in trying to allocate further. We'll just return null.
log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
SIZE_FORMAT " words", Thread::current()->name(), word_size);
return nullptr;
}
log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT "",
Thread::current()->name(), word_size);
} else {
// Failed to schedule a collection.
if (gclocker_retry_count > GCLockerRetryAllocationCount) {
log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
SIZE_FORMAT " words", Thread::current()->name(), word_size);
return nullptr;
}
log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
// The GCLocker is either active or the GCLocker initiated
// GC has not yet been performed. Stall until it is and
// then retry the allocation.
GCLocker::stall_until_clear();
gclocker_retry_count += 1;
return result;
}

log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT "",
Thread::current()->name(), word_size);

// We can reach here if we were unsuccessful in scheduling a
// collection (because another thread beat us to it) or if we were
// stalled due to the GC locker. In either can we should retry the
// allocation attempt in case another thread successfully
// performed a collection and reclaimed enough space.
// We can reach here if we were unsuccessful in scheduling a collection (because
// another thread beat us to it).
// Humongous object allocation always needs a lock, so we wait for the retry
// in the next iteration of the loop, unlike for the regular iteration case.
// Give a warning if we seem to be looping forever.

if ((QueuedAllocationWarningCount > 0) &&
(try_count % QueuedAllocationWarningCount == 0)) {
log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
log_warning(gc, alloc)("%s: Retried allocation %u times for %zu words",
Thread::current()->name(), try_count, word_size);
}
}
Expand Down Expand Up @@ -908,11 +833,6 @@ bool G1CollectedHeap::do_full_collection(bool clear_all_soft_refs,
bool do_maximal_compaction) {
assert_at_safepoint_on_vm_thread();

if (GCLocker::check_active_before_gc()) {
// Full GC was not completed.
return false;
}

const bool do_clear_all_soft_refs = clear_all_soft_refs ||
soft_ref_policy()->should_clear_all_soft_refs();

Expand Down Expand Up @@ -1269,9 +1189,11 @@ G1CollectedHeap::G1CollectedHeap() :

_humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);

// Override the default _filler_array_max_size so that no humongous filler
// objects are created.
_filler_array_max_size = _humongous_object_threshold_in_words;
// Since filler arrays are never referenced, we can make them region sized.
// This simplifies filling up the region in case we have some potentially
// unreferenced (by Java code, but still in use by native code) pinned objects
// in there.
_filler_array_max_size = HeapRegion::GrainWords;

// Override the default _stack_chunk_max_size so that no humongous stack chunks are created
_stack_chunk_max_size = _humongous_object_threshold_in_words;
Expand Down Expand Up @@ -1904,12 +1826,6 @@ bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
// Collection failed and should be retried.
assert(op.transient_failure(), "invariant");

if (GCLocker::is_active_and_needs_gc()) {
// If GCLocker is active, wait until clear before retrying.
LOG_COLLECT_CONCURRENTLY(cause, "gc-locker stall");
GCLocker::stall_until_clear();
}

LOG_COLLECT_CONCURRENTLY(cause, "retry");
}
}
Expand All @@ -1935,11 +1851,6 @@ bool G1CollectedHeap::try_collect_fullgc(GCCause::Cause cause,
return true;
}
}

if (GCLocker::is_active_and_needs_gc()) {
// If GCLocker is active, wait until clear before retrying.
GCLocker::stall_until_clear();
}
}
}

Expand All @@ -1949,11 +1860,6 @@ bool G1CollectedHeap::try_collect(GCCause::Cause cause,
return try_collect_concurrently(cause,
counters_before.total_collections(),
counters_before.old_marking_cycles_started());
} else if (GCLocker::should_discard(cause, counters_before.total_collections())) {
// Indicate failure to be consistent with VMOp failure due to
// another collection slipping in after our gc_count but before
// our request is processed.
return false;
} else if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {

Expand Down Expand Up @@ -2179,14 +2085,6 @@ bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
return false; // keep some compilers happy
}

void G1CollectedHeap::pin_object(JavaThread* thread, oop obj) {
GCLocker::lock_critical(thread);
}

void G1CollectedHeap::unpin_object(JavaThread* thread, oop obj) {
GCLocker::unlock_critical(thread);
}

void G1CollectedHeap::print_heap_regions() const {
LogTarget(Trace, gc, heap, region) lt;
if (lt.is_enabled()) {
Expand Down Expand Up @@ -2489,10 +2387,6 @@ bool G1CollectedHeap::do_collection_pause_at_safepoint() {
assert_at_safepoint_on_vm_thread();
guarantee(!is_gc_active(), "collection is not reentrant");

if (GCLocker::check_active_before_gc()) {
return false;
}

do_collection_pause_at_safepoint_helper();
return true;
}
Expand Down Expand Up @@ -2647,6 +2541,8 @@ void G1CollectedHeap::free_region(HeapRegion* hr, FreeRegionList* free_list) {
assert(!hr->is_free(), "the region should not be free");
assert(!hr->is_empty(), "the region should not be empty");
assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
assert(!hr->has_pinned_objects(),
"must not free a region which contains pinned objects");

// Reset region metadata to allow reuse.
hr->hr_clear(true /* clear_space */);
Expand Down
8 changes: 4 additions & 4 deletions src/hotspot/share/gc/g1/g1CollectedHeap.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -560,6 +560,9 @@ class G1CollectedHeap : public CollectedHeap {
return _monitoring_support;
}

void pin_object(JavaThread* thread, oop obj) override;
void unpin_object(JavaThread* thread, oop obj) override;

void resize_heap_if_necessary();

// Check if there is memory to uncommit and if so schedule a task to do it.
Expand Down Expand Up @@ -613,7 +616,7 @@ class G1CollectedHeap : public CollectedHeap {
// We register a region with the fast "in collection set" test. We
// simply set to true the array slot corresponding to this region.
void register_young_region_with_region_attr(HeapRegion* r) {
_region_attr.set_in_young(r->hrm_index());
_region_attr.set_in_young(r->hrm_index(), r->has_pinned_objects());
}
inline void register_new_survivor_region_with_region_attr(HeapRegion* r);
inline void register_region_with_region_attr(HeapRegion* r);
Expand Down Expand Up @@ -1292,9 +1295,6 @@ class G1CollectedHeap : public CollectedHeap {
G1HeapSummary create_g1_heap_summary();
G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);

void pin_object(JavaThread* thread, oop obj) override;
void unpin_object(JavaThread* thread, oop obj) override;

// Printing
private:
void print_heap_regions() const;
Expand Down
20 changes: 20 additions & 0 deletions src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,8 @@ G1HeapRegionAttr G1CollectedHeap::region_attr(uint idx) const {
}

void G1CollectedHeap::register_humongous_candidate_region_with_region_attr(uint index) {
assert(!region_at(index)->has_pinned_objects(), "must be");
assert(region_at(index)->rem_set()->is_complete(), "must be");
_region_attr.set_humongous_candidate(index);
}

Expand All @@ -218,9 +220,12 @@ void G1CollectedHeap::register_new_survivor_region_with_region_attr(HeapRegion*

void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
_region_attr.set_remset_is_tracked(r->hrm_index(), r->rem_set()->is_tracked());
_region_attr.set_is_pinned(r->hrm_index(), r->has_pinned_objects());
}

void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {
assert(!r->has_pinned_objects(), "must be");
assert(r->rem_set()->is_complete(), "must be");
_region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());
_rem_set->exclude_region_from_scan(r->hrm_index());
}
Expand Down Expand Up @@ -257,6 +262,21 @@ inline bool G1CollectedHeap::is_obj_dead(const oop obj, const HeapRegion* hr) co
}
}

inline void G1CollectedHeap::pin_object(JavaThread* thread, oop obj) {
assert(obj != nullptr, "obj must not be null");
assert(!is_gc_active(), "must not pin objects during a GC");
assert(obj->is_typeArray(), "must be typeArray");
HeapRegion *r = heap_region_containing(obj);
r->increment_pinned_object_count();
}

inline void G1CollectedHeap::unpin_object(JavaThread* thread, oop obj) {
assert(obj != nullptr, "obj must not be null");
assert(!is_gc_active(), "must not unpin objects during a GC");
HeapRegion *r = heap_region_containing(obj);
r->decrement_pinned_object_count();
}

inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
assert(obj != nullptr, "precondition");

Expand Down

1 comment on commit 38cfb22

@openjdk-notifier
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.