Skip to content
Permalink
Browse files
8260044: Parallel GC: Concurrent allocation after heap expansion may …
…cause unnecessary full gc

8260045: Parallel GC: Waiting on ExpandHeap_lock may cause "expansion storm"

Loop to retry allocation if expand succeeds.  Treat space available after obtaining expand lock as expand success.

Reviewed-by: tschatzl, iwalulya, sjohanss
  • Loading branch information
Kim Barrett committed Feb 12, 2021
1 parent 92ff891 commit 6a84ec68c3e20ef971ab523e82e178b6ab4e371e
@@ -215,6 +215,15 @@ bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
return Atomic::cmpxchg(top_addr(), expected_top, obj) == expected_top;
}

// Only used by oldgen allocation.
bool MutableSpace::needs_expand(size_t word_size) const {
assert_lock_strong(ExpandHeap_lock);
// Holding the lock means end is stable. So while top may be advancing
// via concurrent allocations, there is no need to order the reads of top
// and end here, unlike in cas_allocate.
return pointer_delta(end(), top()) < word_size;
}

void MutableSpace::oop_iterate(OopIterateClosure* cl) {
HeapWord* obj_addr = bottom();
HeapWord* t = top();
@@ -142,6 +142,11 @@ class MutableSpace: public CHeapObj<mtGC> {
virtual HeapWord* cas_allocate(size_t word_size);
// Optional deallocation. Used in NUMA-allocator.
bool cas_deallocate(HeapWord *obj, size_t size);
// Return true if this space needs to be expanded in order to satisfy an
// allocation request of the indicated size. Concurrent allocations and
// resizes may change the result of a later call. Used by oldgen allocator.
// precondition: holding ExpandHeap_lock
bool needs_expand(size_t word_size) const;

// Iteration.
void oop_iterate(OopIterateClosure* cl);
@@ -178,19 +178,31 @@ void PSOldGen::object_iterate_block(ObjectClosure* cl, size_t block_index) {
}
}

HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
expand(word_size*HeapWordSize);
bool PSOldGen::expand_for_allocate(size_t word_size) {
assert(word_size > 0, "allocating zero words?");
bool result = true;
{
MutexLocker x(ExpandHeap_lock);
// Avoid "expand storms" by rechecking available space after obtaining
// the lock, because another thread may have already made sufficient
// space available. If insufficient space available, that will remain
// true until we expand, since we have the lock. Other threads may take
// the space we need before we can allocate it, regardless of whether we
// expand. That's okay, we'll just try expanding again.
if (object_space()->needs_expand(word_size)) {
result = expand(word_size*HeapWordSize);
}
}
if (GCExpandToAllocateDelayMillis > 0) {
os::naked_sleep(GCExpandToAllocateDelayMillis);
}
return cas_allocate_noexpand(word_size);
return result;
}

void PSOldGen::expand(size_t bytes) {
if (bytes == 0) {
return;
}
MutexLocker x(ExpandHeap_lock);
bool PSOldGen::expand(size_t bytes) {
assert_lock_strong(ExpandHeap_lock);
assert_locked_or_safepoint(Heap_lock);
assert(bytes > 0, "precondition");
const size_t alignment = virtual_space()->alignment();
size_t aligned_bytes = align_up(bytes, alignment);
size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment);
@@ -200,13 +212,11 @@ void PSOldGen::expand(size_t bytes) {
// providing a page per lgroup. Alignment is larger or equal to the page size.
aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
}
if (aligned_bytes == 0){
// The alignment caused the number of bytes to wrap. An expand_by(0) will
// return true with the implication that and expansion was done when it
// was not. A call to expand implies a best effort to expand by "bytes"
// but not a guarantee. Align down to give a best effort. This is likely
// the most that the generation can expand since it has some capacity to
// start with.
if (aligned_bytes == 0) {
// The alignment caused the number of bytes to wrap. A call to expand
// implies a best effort to expand by "bytes" but not a guarantee. Align
// down to give a best effort. This is likely the most that the generation
// can expand since it has some capacity to start with.
aligned_bytes = align_down(bytes, alignment);
}

@@ -224,14 +234,13 @@ void PSOldGen::expand(size_t bytes) {
if (success && GCLocker::is_active_and_needs_gc()) {
log_debug(gc)("Garbage collection disabled, expanded heap instead");
}
return success;
}

bool PSOldGen::expand_by(size_t bytes) {
assert_lock_strong(ExpandHeap_lock);
assert_locked_or_safepoint(Heap_lock);
if (bytes == 0) {
return true; // That's what virtual_space()->expand_by(0) would return
}
assert(bytes > 0, "precondition");
bool result = virtual_space()->expand_by(bytes);
if (result) {
if (ZapUnusedHeapArea) {
@@ -268,7 +277,7 @@ bool PSOldGen::expand_to_reserved() {
assert_lock_strong(ExpandHeap_lock);
assert_locked_or_safepoint(Heap_lock);

bool result = true;
bool result = false;
const size_t remaining_bytes = virtual_space()->uncommitted_size();
if (remaining_bytes > 0) {
result = expand_by(remaining_bytes);
@@ -323,10 +332,10 @@ void PSOldGen::resize(size_t desired_free_space) {
}
if (new_size > current_size) {
size_t change_bytes = new_size - current_size;
MutexLocker x(ExpandHeap_lock);
expand(change_bytes);
} else {
size_t change_bytes = current_size - new_size;
// shrink doesn't grab this lock, expand does. Is that right?
MutexLocker x(ExpandHeap_lock);
shrink(change_bytes);
}
@@ -79,8 +79,8 @@ class PSOldGen : public CHeapObj<mtGC> {
return res;
}

HeapWord* expand_and_cas_allocate(size_t word_size);
void expand(size_t bytes);
bool expand_for_allocate(size_t word_size);
bool expand(size_t bytes);
bool expand_by(size_t bytes);
bool expand_to_reserved();

@@ -135,8 +135,12 @@ class PSOldGen : public CHeapObj<mtGC> {
void resize(size_t desired_free_space);

HeapWord* allocate(size_t word_size) {
HeapWord* res = cas_allocate_noexpand(word_size);
return (res == NULL) ? expand_and_cas_allocate(word_size) : res;
HeapWord* res;
do {
res = cas_allocate_noexpand(word_size);
// Retry failed allocation if expand succeeds.
} while ((res == nullptr) && expand_for_allocate(word_size));
return res;
}

// Iteration.

0 comments on commit 6a84ec6

Please sign in to comment.