Skip to content
Permalink
Browse files
8252103: Parallel heap inspection for ParallelScavengeHeap
Reviewed-by: sjohanss, tschatzl
  • Loading branch information
Lin Zang authored and kstefanj committed Nov 5, 2020
1 parent cdef186 commit a6ce6a5d808967348d04241b3f1f05779e7a823a
Showing 4 changed files with 111 additions and 0 deletions.
@@ -539,6 +539,71 @@ void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
old_gen()->object_iterate(cl);
}

// The HeapBlockClaimer is used during parallel iteration over the heap,
// allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these.
// The eden and survivor spaces are treated as single blocks as it is hard to divide
// these spaces.
// The old space is divided into fixed-size blocks.
class HeapBlockClaimer : public StackObj {
size_t _claimed_index;

public:
static const size_t InvalidIndex = SIZE_MAX;
static const size_t EdenIndex = 0;
static const size_t SurvivorIndex = 1;
static const size_t NumNonOldGenClaims = 2;

HeapBlockClaimer() : _claimed_index(EdenIndex) { }
// Claim the block and get the block index.
size_t claim_and_get_block() {
size_t block_index;
block_index = Atomic::fetch_and_add(&_claimed_index, 1u);

PSOldGen* old_gen = ParallelScavengeHeap::heap()->old_gen();
size_t num_claims = old_gen->num_iterable_blocks() + NumNonOldGenClaims;

return block_index < num_claims ? block_index : InvalidIndex;
}
};

void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl,
HeapBlockClaimer* claimer) {
size_t block_index = claimer->claim_and_get_block();
// Iterate until all blocks are claimed
if (block_index == HeapBlockClaimer::EdenIndex) {
young_gen()->eden_space()->object_iterate(cl);
block_index = claimer->claim_and_get_block();
}
if (block_index == HeapBlockClaimer::SurvivorIndex) {
young_gen()->from_space()->object_iterate(cl);
young_gen()->to_space()->object_iterate(cl);
block_index = claimer->claim_and_get_block();
}
while (block_index != HeapBlockClaimer::InvalidIndex) {
old_gen()->object_iterate_block(cl, block_index - HeapBlockClaimer::NumNonOldGenClaims);
block_index = claimer->claim_and_get_block();
}
}

class PSScavengeParallelObjectIterator : public ParallelObjectIterator {
private:
ParallelScavengeHeap* _heap;
HeapBlockClaimer _claimer;

public:
PSScavengeParallelObjectIterator() :
_heap(ParallelScavengeHeap::heap()),
_claimer() {}

virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
_heap->object_iterate_parallel(cl, &_claimer);
}
};

ParallelObjectIterator* ParallelScavengeHeap::parallel_object_iterator(uint thread_num) {
return new PSScavengeParallelObjectIterator();
}

HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
if (young_gen()->is_in_reserved(addr)) {
assert(young_gen()->is_in(addr),
@@ -45,6 +45,7 @@

class AdjoiningGenerations;
class GCHeapSummary;
class HeapBlockClaimer;
class MemoryManager;
class MemoryPool;
class PSAdaptiveSizePolicy;
@@ -207,6 +208,8 @@ class ParallelScavengeHeap : public CollectedHeap {
size_t unsafe_max_tlab_alloc(Thread* thr) const;

void object_iterate(ObjectClosure* cl);
void object_iterate_parallel(ObjectClosure* cl, HeapBlockClaimer* claimer);
virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num);

HeapWord* block_start(const void* addr) const;
bool block_is_obj(const HeapWord* addr) const;
@@ -173,6 +173,38 @@ HeapWord* PSOldGen::allocate(size_t word_size) {
return res;
}

size_t PSOldGen::num_iterable_blocks() const {
return (object_space()->used_in_bytes() + IterateBlockSize - 1) / IterateBlockSize;
}

void PSOldGen::object_iterate_block(ObjectClosure* cl, size_t block_index) {
size_t block_word_size = IterateBlockSize / HeapWordSize;
assert((block_word_size % (ObjectStartArray::block_size)) == 0,
"Block size not a multiple of start_array block");

MutableSpace *space = object_space();

HeapWord* begin = space->bottom() + block_index * block_word_size;
HeapWord* end = MIN2(space->top(), begin + block_word_size);

if (!start_array()->object_starts_in_range(begin, end)) {
return;
}

// Get object starting at or reaching into this block.
HeapWord* start = start_array()->object_start(begin);
if (start < begin) {
start += oop(start)->size();
}
assert(start >= begin,
"Object address" PTR_FORMAT " must be larger or equal to block address at " PTR_FORMAT,
p2i(start), p2i(begin));
// Iterate all objects until the end.
for (HeapWord* p = start; p < end; p += oop(p)->size()) {
cl->do_object(oop(p));
}
}

HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
expand(word_size*HeapWordSize);
if (GCExpandToAllocateDelayMillis > 0) {
@@ -52,6 +52,9 @@ class PSOldGen : public CHeapObj<mtGC> {
const size_t _min_gen_size;
const size_t _max_gen_size;

// Block size for parallel iteration
static const size_t IterateBlockSize = 1024 * 1024;

#ifdef ASSERT
void assert_block_in_covered_region(MemRegion new_memregion) {
// Explictly capture current covered_region in a local
@@ -163,6 +166,14 @@ class PSOldGen : public CHeapObj<mtGC> {
void oop_iterate(OopIterateClosure* cl) { object_space()->oop_iterate(cl); }
void object_iterate(ObjectClosure* cl) { object_space()->object_iterate(cl); }

// Number of blocks to be iterated over in the used part of old gen.
size_t num_iterable_blocks() const;
// Iterate the objects starting in block block_index within [bottom, top) of the
// old gen. The object just reaching into this block is not iterated over.
// A block is an evenly sized non-overlapping part of the old gen of
// IterateBlockSize bytes.
void object_iterate_block(ObjectClosure* cl, size_t block_index);

// Debugging - do not use for time critical operations
void print() const;
virtual void print_on(outputStream* st) const;

1 comment on commit a6ce6a5

@openjdk-notifier
Copy link

@openjdk-notifier openjdk-notifier bot commented on a6ce6a5 Nov 5, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Review

Issues

Please sign in to comment.