Skip to content

Commit 38cfb22

Browse files
author
Thomas Schatzl
committed
8318706: Implement JEP 423: Region Pinning for G1
Reviewed-by: ayang, iwalulya, sjohanss
1 parent e44d4b2 commit 38cfb22

File tree

59 files changed

+1388
-673
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

59 files changed

+1388
-673
lines changed

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Lines changed: 40 additions & 144 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,6 @@
7878
#include "gc/shared/gcBehaviours.hpp"
7979
#include "gc/shared/gcHeapSummary.hpp"
8080
#include "gc/shared/gcId.hpp"
81-
#include "gc/shared/gcLocker.inline.hpp"
8281
#include "gc/shared/gcTimer.hpp"
8382
#include "gc/shared/gcTraceTime.inline.hpp"
8483
#include "gc/shared/isGCActiveMark.hpp"
@@ -411,13 +410,11 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
411410
// We should only get here after the first-level allocation attempt
412411
// (attempt_allocation()) failed to allocate.
413412

414-
// We will loop until a) we manage to successfully perform the
415-
// allocation or b) we successfully schedule a collection which
416-
// fails to perform the allocation. b) is the only case when we'll
417-
// return null.
413+
// We will loop until a) we manage to successfully perform the allocation or b)
414+
// successfully schedule a collection which fails to perform the allocation.
415+
// Case b) is the only case when we'll return null.
418416
HeapWord* result = nullptr;
419-
for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
420-
bool should_try_gc;
417+
for (uint try_count = 1; /* we'll return */; try_count++) {
421418
uint gc_count_before;
422419

423420
{
@@ -430,67 +427,26 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
430427
return result;
431428
}
432429

433-
// If the GCLocker is active and we are bound for a GC, try expanding young gen.
434-
// This is different to when only GCLocker::needs_gc() is set: try to avoid
435-
// waiting because the GCLocker is active to not wait too long.
436-
if (GCLocker::is_active_and_needs_gc() && policy()->can_expand_young_list()) {
437-
// No need for an ergo message here, can_expand_young_list() does this when
438-
// it returns true.
439-
result = _allocator->attempt_allocation_force(word_size);
440-
if (result != nullptr) {
441-
return result;
442-
}
443-
}
444-
445-
// Only try a GC if the GCLocker does not signal the need for a GC. Wait until
446-
// the GCLocker initiated GC has been performed and then retry. This includes
447-
// the case when the GC Locker is not active but has not been performed.
448-
should_try_gc = !GCLocker::needs_gc();
449430
// Read the GC count while still holding the Heap_lock.
450431
gc_count_before = total_collections();
451432
}
452433

453-
if (should_try_gc) {
454-
bool succeeded;
455-
result = do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_inc_collection_pause);
456-
if (result != nullptr) {
457-
assert(succeeded, "only way to get back a non-null result");
458-
log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
459-
Thread::current()->name(), p2i(result));
460-
return result;
461-
}
462-
463-
if (succeeded) {
464-
// We successfully scheduled a collection which failed to allocate. No
465-
// point in trying to allocate further. We'll just return null.
466-
log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
467-
SIZE_FORMAT " words", Thread::current()->name(), word_size);
468-
return nullptr;
469-
}
470-
log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT " words",
471-
Thread::current()->name(), word_size);
472-
} else {
473-
// Failed to schedule a collection.
474-
if (gclocker_retry_count > GCLockerRetryAllocationCount) {
475-
log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
476-
SIZE_FORMAT " words", Thread::current()->name(), word_size);
477-
return nullptr;
478-
}
479-
log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
480-
// The GCLocker is either active or the GCLocker initiated
481-
// GC has not yet been performed. Stall until it is and
482-
// then retry the allocation.
483-
GCLocker::stall_until_clear();
484-
gclocker_retry_count += 1;
434+
bool succeeded;
435+
result = do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_inc_collection_pause);
436+
if (succeeded) {
437+
log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
438+
Thread::current()->name(), p2i(result));
439+
return result;
485440
}
486441

487-
// We can reach here if we were unsuccessful in scheduling a
488-
// collection (because another thread beat us to it) or if we were
489-
// stalled due to the GC locker. In either can we should retry the
490-
// allocation attempt in case another thread successfully
491-
// performed a collection and reclaimed enough space. We do the
492-
// first attempt (without holding the Heap_lock) here and the
493-
// follow-on attempt will be at the start of the next loop
442+
log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT " words",
443+
Thread::current()->name(), word_size);
444+
445+
// We can reach here if we were unsuccessful in scheduling a collection (because
446+
// another thread beat us to it). In this case immeditealy retry the allocation
447+
// attempt because another thread successfully performed a collection and possibly
448+
// reclaimed enough space. The first attempt (without holding the Heap_lock) is
449+
// here and the follow-on attempt will be at the start of the next loop
494450
// iteration (after taking the Heap_lock).
495451
size_t dummy = 0;
496452
result = _allocator->attempt_allocation(word_size, word_size, &dummy);
@@ -673,13 +629,11 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
673629
collect(GCCause::_g1_humongous_allocation);
674630
}
675631

676-
// We will loop until a) we manage to successfully perform the
677-
// allocation or b) we successfully schedule a collection which
678-
// fails to perform the allocation. b) is the only case when we'll
679-
// return null.
632+
// We will loop until a) we manage to successfully perform the allocation or b)
633+
// successfully schedule a collection which fails to perform the allocation.
634+
// Case b) is the only case when we'll return null.
680635
HeapWord* result = nullptr;
681-
for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
682-
bool should_try_gc;
636+
for (uint try_count = 1; /* we'll return */; try_count++) {
683637
uint gc_count_before;
684638

685639

@@ -697,64 +651,35 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
697651
return result;
698652
}
699653

700-
// Only try a GC if the GCLocker does not signal the need for a GC. Wait until
701-
// the GCLocker initiated GC has been performed and then retry. This includes
702-
// the case when the GC Locker is not active but has not been performed.
703-
should_try_gc = !GCLocker::needs_gc();
704654
// Read the GC count while still holding the Heap_lock.
705655
gc_count_before = total_collections();
706656
}
707657

708-
if (should_try_gc) {
709-
bool succeeded;
710-
result = do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_humongous_allocation);
658+
bool succeeded;
659+
result = do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_humongous_allocation);
660+
if (succeeded) {
661+
log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
662+
Thread::current()->name(), p2i(result));
711663
if (result != nullptr) {
712-
assert(succeeded, "only way to get back a non-null result");
713-
log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
714-
Thread::current()->name(), p2i(result));
715664
size_t size_in_regions = humongous_obj_size_in_regions(word_size);
716665
policy()->old_gen_alloc_tracker()->
717666
record_collection_pause_humongous_allocation(size_in_regions * HeapRegion::GrainBytes);
718-
return result;
719-
}
720-
721-
if (succeeded) {
722-
// We successfully scheduled a collection which failed to allocate. No
723-
// point in trying to allocate further. We'll just return null.
724-
log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
725-
SIZE_FORMAT " words", Thread::current()->name(), word_size);
726-
return nullptr;
727-
}
728-
log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT "",
729-
Thread::current()->name(), word_size);
730-
} else {
731-
// Failed to schedule a collection.
732-
if (gclocker_retry_count > GCLockerRetryAllocationCount) {
733-
log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
734-
SIZE_FORMAT " words", Thread::current()->name(), word_size);
735-
return nullptr;
736667
}
737-
log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
738-
// The GCLocker is either active or the GCLocker initiated
739-
// GC has not yet been performed. Stall until it is and
740-
// then retry the allocation.
741-
GCLocker::stall_until_clear();
742-
gclocker_retry_count += 1;
668+
return result;
743669
}
744670

671+
log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT "",
672+
Thread::current()->name(), word_size);
745673

746-
// We can reach here if we were unsuccessful in scheduling a
747-
// collection (because another thread beat us to it) or if we were
748-
// stalled due to the GC locker. In either can we should retry the
749-
// allocation attempt in case another thread successfully
750-
// performed a collection and reclaimed enough space.
674+
// We can reach here if we were unsuccessful in scheduling a collection (because
675+
// another thread beat us to it).
751676
// Humongous object allocation always needs a lock, so we wait for the retry
752677
// in the next iteration of the loop, unlike for the regular iteration case.
753678
// Give a warning if we seem to be looping forever.
754679

755680
if ((QueuedAllocationWarningCount > 0) &&
756681
(try_count % QueuedAllocationWarningCount == 0)) {
757-
log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
682+
log_warning(gc, alloc)("%s: Retried allocation %u times for %zu words",
758683
Thread::current()->name(), try_count, word_size);
759684
}
760685
}
@@ -908,11 +833,6 @@ bool G1CollectedHeap::do_full_collection(bool clear_all_soft_refs,
908833
bool do_maximal_compaction) {
909834
assert_at_safepoint_on_vm_thread();
910835

911-
if (GCLocker::check_active_before_gc()) {
912-
// Full GC was not completed.
913-
return false;
914-
}
915-
916836
const bool do_clear_all_soft_refs = clear_all_soft_refs ||
917837
soft_ref_policy()->should_clear_all_soft_refs();
918838

@@ -1269,9 +1189,11 @@ G1CollectedHeap::G1CollectedHeap() :
12691189

12701190
_humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
12711191

1272-
// Override the default _filler_array_max_size so that no humongous filler
1273-
// objects are created.
1274-
_filler_array_max_size = _humongous_object_threshold_in_words;
1192+
// Since filler arrays are never referenced, we can make them region sized.
1193+
// This simplifies filling up the region in case we have some potentially
1194+
// unreferenced (by Java code, but still in use by native code) pinned objects
1195+
// in there.
1196+
_filler_array_max_size = HeapRegion::GrainWords;
12751197

12761198
// Override the default _stack_chunk_max_size so that no humongous stack chunks are created
12771199
_stack_chunk_max_size = _humongous_object_threshold_in_words;
@@ -1904,12 +1826,6 @@ bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
19041826
// Collection failed and should be retried.
19051827
assert(op.transient_failure(), "invariant");
19061828

1907-
if (GCLocker::is_active_and_needs_gc()) {
1908-
// If GCLocker is active, wait until clear before retrying.
1909-
LOG_COLLECT_CONCURRENTLY(cause, "gc-locker stall");
1910-
GCLocker::stall_until_clear();
1911-
}
1912-
19131829
LOG_COLLECT_CONCURRENTLY(cause, "retry");
19141830
}
19151831
}
@@ -1935,11 +1851,6 @@ bool G1CollectedHeap::try_collect_fullgc(GCCause::Cause cause,
19351851
return true;
19361852
}
19371853
}
1938-
1939-
if (GCLocker::is_active_and_needs_gc()) {
1940-
// If GCLocker is active, wait until clear before retrying.
1941-
GCLocker::stall_until_clear();
1942-
}
19431854
}
19441855
}
19451856

@@ -1949,11 +1860,6 @@ bool G1CollectedHeap::try_collect(GCCause::Cause cause,
19491860
return try_collect_concurrently(cause,
19501861
counters_before.total_collections(),
19511862
counters_before.old_marking_cycles_started());
1952-
} else if (GCLocker::should_discard(cause, counters_before.total_collections())) {
1953-
// Indicate failure to be consistent with VMOp failure due to
1954-
// another collection slipping in after our gc_count but before
1955-
// our request is processed.
1956-
return false;
19571863
} else if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
19581864
DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
19591865

@@ -2179,14 +2085,6 @@ bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
21792085
return false; // keep some compilers happy
21802086
}
21812087

2182-
void G1CollectedHeap::pin_object(JavaThread* thread, oop obj) {
2183-
GCLocker::lock_critical(thread);
2184-
}
2185-
2186-
void G1CollectedHeap::unpin_object(JavaThread* thread, oop obj) {
2187-
GCLocker::unlock_critical(thread);
2188-
}
2189-
21902088
void G1CollectedHeap::print_heap_regions() const {
21912089
LogTarget(Trace, gc, heap, region) lt;
21922090
if (lt.is_enabled()) {
@@ -2489,10 +2387,6 @@ bool G1CollectedHeap::do_collection_pause_at_safepoint() {
24892387
assert_at_safepoint_on_vm_thread();
24902388
guarantee(!is_gc_active(), "collection is not reentrant");
24912389

2492-
if (GCLocker::check_active_before_gc()) {
2493-
return false;
2494-
}
2495-
24962390
do_collection_pause_at_safepoint_helper();
24972391
return true;
24982392
}
@@ -2647,6 +2541,8 @@ void G1CollectedHeap::free_region(HeapRegion* hr, FreeRegionList* free_list) {
26472541
assert(!hr->is_free(), "the region should not be free");
26482542
assert(!hr->is_empty(), "the region should not be empty");
26492543
assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
2544+
assert(!hr->has_pinned_objects(),
2545+
"must not free a region which contains pinned objects");
26502546

26512547
// Reset region metadata to allow reuse.
26522548
hr->hr_clear(true /* clear_space */);

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -560,6 +560,9 @@ class G1CollectedHeap : public CollectedHeap {
560560
return _monitoring_support;
561561
}
562562

563+
void pin_object(JavaThread* thread, oop obj) override;
564+
void unpin_object(JavaThread* thread, oop obj) override;
565+
563566
void resize_heap_if_necessary();
564567

565568
// Check if there is memory to uncommit and if so schedule a task to do it.
@@ -613,7 +616,7 @@ class G1CollectedHeap : public CollectedHeap {
613616
// We register a region with the fast "in collection set" test. We
614617
// simply set to true the array slot corresponding to this region.
615618
void register_young_region_with_region_attr(HeapRegion* r) {
616-
_region_attr.set_in_young(r->hrm_index());
619+
_region_attr.set_in_young(r->hrm_index(), r->has_pinned_objects());
617620
}
618621
inline void register_new_survivor_region_with_region_attr(HeapRegion* r);
619622
inline void register_region_with_region_attr(HeapRegion* r);
@@ -1292,9 +1295,6 @@ class G1CollectedHeap : public CollectedHeap {
12921295
G1HeapSummary create_g1_heap_summary();
12931296
G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
12941297

1295-
void pin_object(JavaThread* thread, oop obj) override;
1296-
void unpin_object(JavaThread* thread, oop obj) override;
1297-
12981298
// Printing
12991299
private:
13001300
void print_heap_regions() const;

src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -209,6 +209,8 @@ G1HeapRegionAttr G1CollectedHeap::region_attr(uint idx) const {
209209
}
210210

211211
void G1CollectedHeap::register_humongous_candidate_region_with_region_attr(uint index) {
212+
assert(!region_at(index)->has_pinned_objects(), "must be");
213+
assert(region_at(index)->rem_set()->is_complete(), "must be");
212214
_region_attr.set_humongous_candidate(index);
213215
}
214216

@@ -218,9 +220,12 @@ void G1CollectedHeap::register_new_survivor_region_with_region_attr(HeapRegion*
218220

219221
void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
220222
_region_attr.set_remset_is_tracked(r->hrm_index(), r->rem_set()->is_tracked());
223+
_region_attr.set_is_pinned(r->hrm_index(), r->has_pinned_objects());
221224
}
222225

223226
void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {
227+
assert(!r->has_pinned_objects(), "must be");
228+
assert(r->rem_set()->is_complete(), "must be");
224229
_region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());
225230
_rem_set->exclude_region_from_scan(r->hrm_index());
226231
}
@@ -257,6 +262,21 @@ inline bool G1CollectedHeap::is_obj_dead(const oop obj, const HeapRegion* hr) co
257262
}
258263
}
259264

265+
inline void G1CollectedHeap::pin_object(JavaThread* thread, oop obj) {
266+
assert(obj != nullptr, "obj must not be null");
267+
assert(!is_gc_active(), "must not pin objects during a GC");
268+
assert(obj->is_typeArray(), "must be typeArray");
269+
HeapRegion *r = heap_region_containing(obj);
270+
r->increment_pinned_object_count();
271+
}
272+
273+
inline void G1CollectedHeap::unpin_object(JavaThread* thread, oop obj) {
274+
assert(obj != nullptr, "obj must not be null");
275+
assert(!is_gc_active(), "must not unpin objects during a GC");
276+
HeapRegion *r = heap_region_containing(obj);
277+
r->decrement_pinned_object_count();
278+
}
279+
260280
inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
261281
assert(obj != nullptr, "precondition");
262282

0 commit comments

Comments
 (0)