Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8252103: Parallel heap inspection for ParallelScavengeHeap #25

Closed
wants to merge 15 commits into from
Closed
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
@@ -540,44 +540,47 @@ void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
}

// The HeapBlockClaimer is used during parallel iteration over the heap,
// allowing workers to claim heap blocks, gaining exclusive rights to these blocks.
// allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these.
// The eden and survivor spaces are treated as single blocks as it is hard to divide
// these spaces.
// The old spaces are divided into serveral fixed-size blocks.
// The old space is divided into fixed-size blocks.
class HeapBlockClaimer : public StackObj {
// Index of iterable block, negative values for indexes of young generation spaces,
// zero and positive values for indexes of blocks in old generation space.
ssize_t _claimed_index;
public:
static const ssize_t EdenIndex = -2;
static const ssize_t SurvivorIndex = -1;
size_t _claimed_index;

public:
static const size_t EdenIndex = 0;
static const size_t SurvivorIndex = 1;
// Use max_size_t as the invalid claim index
static const size_t InvalidIndex = (size_t)-1;
static const size_t NumNonOldGenClaims = 2;

HeapBlockClaimer() : _claimed_index(EdenIndex) { }
// Claim the block and get the block index.
bool claim_and_get_block(ssize_t* block_index) {
assert(block_index != NULL, "Invalid index pointer");
*block_index = Atomic::fetch_and_add(&_claimed_index, 1);
ssize_t iterable_blocks = (ssize_t)ParallelScavengeHeap::heap()->old_gen()->iterable_blocks();
if (*block_index >= iterable_blocks) {
return false;
}
return true;
size_t claim_and_get_block() {
size_t block_index;
block_index = Atomic::fetch_and_add(&_claimed_index, (size_t)1);
linzang marked this conversation as resolved.
Show resolved Hide resolved

PSOldGen* old_gen = ParallelScavengeHeap::heap()->old_gen();
size_t num_claims = old_gen->num_iterable_blocks() + NumNonOldGenClaims;

return block_index < num_claims ? block_index : InvalidIndex;
}
};

void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl,
HeapBlockClaimer* claimer) {
ssize_t block_index;
size_t block_index = claimer->claim_and_get_block();
// Iterate until all blocks are claimed
while (claimer->claim_and_get_block(&block_index)) {
while (block_index != HeapBlockClaimer::InvalidIndex) {
if (block_index == HeapBlockClaimer::EdenIndex) {
young_gen()->eden_space()->object_iterate(cl);
} else if (block_index == HeapBlockClaimer::SurvivorIndex) {
young_gen()->from_space()->object_iterate(cl);
young_gen()->to_space()->object_iterate(cl);
} else {
old_gen()->block_iterate(cl, (size_t)block_index);
old_gen()->object_iterate_block(cl, block_index - HeapBlockClaimer::NumNonOldGenClaims);
}
block_index = claimer->claim_and_get_block();
}
}

@@ -173,48 +173,35 @@ HeapWord* PSOldGen::allocate(size_t word_size) {
return res;
}

/*
* Divide space into blocks, processes block begins at
* bottom + block_index * (IterateBlockSize / HeapWordSize).
* NOTE:
* - The initial block start address may not be a valid
* object address, _start_array is used to correct it.
*
* - The end address is not necessary to be object address.
*
* - If there is an object that crosses blocks, it is
* processed by the worker that owns the block within
* which the object starts.
*
*/
void PSOldGen::block_iterate(ObjectClosure* cl, size_t block_index) {
MutableSpace *space = object_space();
HeapWord* bottom = space->bottom();
HeapWord* top = space->top();
size_t block_word_size = IterateBlockSize / HeapWordSize;
HeapWord* begin = bottom + block_index * block_word_size;
size_t PSOldGen::num_iterable_blocks() const {
return (object_space()->used_in_bytes() + IterateBlockSize - 1) / IterateBlockSize;
}

void PSOldGen::object_iterate_block(ObjectClosure* cl, size_t block_index) {
size_t block_word_size = IterateBlockSize / HeapWordSize;
assert((block_word_size % (ObjectStartArray::block_size)) == 0,
"BLOCK SIZE not a multiple of start_array block");

// iterate objects in block.
HeapWord* end = MIN2(top, begin + block_word_size);
// Only iterate if there are objects between begin and end.
if (start_array()->object_starts_in_range(begin, end)) {
// Process objects in the range, start from finding object at the begining
// address. Note that object_start() can return the last object in previous
// block, and that object is processed by other worker scanning that block.
// So here only focus on objects that fall into the current block.
HeapWord* start = start_array()->object_start(begin);
if (start < begin) {
start += oop(start)->size();
}
assert(begin <= start,
"object address " PTR_FORMAT " must be larger or equal to block address at " PTR_FORMAT,
p2i(start), p2i(begin));
for (HeapWord* p = start; p < end; p += oop(p)->size()) {
cl->do_object(oop(p));
}
"Block size not a multiple of start_array block");

MutableSpace *space = object_space();

HeapWord* begin = space->bottom() + block_index * block_word_size;
HeapWord* end = MIN2(space->top(), begin + block_word_size);

if (!start_array()->object_starts_in_range(begin, end)) {
return;
}

// Get object starting at or reaching into this block.
HeapWord* start = start_array()->object_start(begin);
if (start < begin) {
start += oop(start)->size();
}
assert(start >= begin,
"Object address" PTR_FORMAT " must be larger or equal to block address at " PTR_FORMAT,
p2i(start), p2i(begin));
// Iterate all objects until the end.
for (HeapWord* p = start; p < end; p += oop(p)->size()) {
cl->do_object(oop(p));
}
}

@@ -165,11 +165,14 @@ class PSOldGen : public CHeapObj<mtGC> {
// Iteration.
void oop_iterate(OopIterateClosure* cl) { object_space()->oop_iterate(cl); }
void object_iterate(ObjectClosure* cl) { object_space()->object_iterate(cl); }
size_t iterable_blocks() {
return (object_space()->used_in_bytes() + IterateBlockSize - 1) / IterateBlockSize;
}
// Iterate block with given block_index
void block_iterate(ObjectClosure* cl, size_t block_index);

// Number of blocks to be iterated over in the used part of old gen.
size_t num_iterable_blocks() const;
// Iterate the objects starting in block block_index within [bottom, top) of the
// old gen. The object just reaching into this block is not iterated over.
// A block is an evenly sized non-overlapping part of the old gen of
// IterateBlockSize bytes.
void object_iterate_block(ObjectClosure* cl, size_t block_index);

// Debugging - do not use for time critical operations
void print() const;