Skip to content
Permalink
Browse files
8260643: Remove parallel version handling in CardTableRS::younger_ref…
…s_in_space_iterate()

Reviewed-by: ayang, sjohanss
  • Loading branch information
Thomas Schatzl committed Feb 2, 2021
1 parent ddd2951 commit 288a4fed3f495e5bb83839564eda7d0c78751d21
@@ -98,5 +98,5 @@ void SerialHeap::young_process_roots(OopIterateClosure* root_closure,
cld_closure, cld_closure, &mark_code_closure);

rem_set()->at_younger_refs_iterate();
old_gen()->younger_refs_iterate(old_gen_closure, 0);
old_gen()->younger_refs_iterate(old_gen_closure);
}
@@ -307,13 +307,13 @@ void CardGeneration::space_iterate(SpaceClosure* blk,
blk->do_space(space());
}

void CardGeneration::younger_refs_iterate(OopIterateClosure* blk, uint n_threads) {
void CardGeneration::younger_refs_iterate(OopIterateClosure* blk) {
// Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in
// "sp" that point into younger generations.
// The iteration is only over objects allocated at the start of the
// iterations; objects allocated as a result of applying the closure are
// not included.

HeapWord* gen_boundary = reserved().start();
_rs->younger_refs_in_space_iterate(space(), gen_boundary, blk, n_threads);
_rs->younger_refs_in_space_iterate(space(), gen_boundary, blk);
}
@@ -89,7 +89,7 @@ class CardGeneration: public Generation {

void space_iterate(SpaceClosure* blk, bool usedOnly = false);

void younger_refs_iterate(OopIterateClosure* blk, uint n_threads);
void younger_refs_iterate(OopIterateClosure* blk);

bool is_in(const void* p) const;

@@ -79,51 +79,6 @@ void CardTableRS::at_younger_refs_iterate() {
}

inline bool ClearNoncleanCardWrapper::clear_card(CardValue* entry) {
if (_is_par) {
return clear_card_parallel(entry);
} else {
return clear_card_serial(entry);
}
}

inline bool ClearNoncleanCardWrapper::clear_card_parallel(CardValue* entry) {
while (true) {
// In the parallel case, we may have to do this several times.
CardValue entry_val = *entry;
assert(entry_val != CardTableRS::clean_card_val(),
"We shouldn't be looking at clean cards, and this should "
"be the only place they get cleaned.");
if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
|| _ct->is_prev_youngergen_card_val(entry_val)) {
CardValue res =
Atomic::cmpxchg(entry, entry_val, CardTableRS::clean_card_val());
if (res == entry_val) {
break;
} else {
assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card,
"The CAS above should only fail if another thread did "
"a GC write barrier.");
}
} else if (entry_val ==
CardTableRS::cur_youngergen_and_prev_nonclean_card) {
// Parallelism shouldn't matter in this case. Only the thread
// assigned to scan the card should change this value.
*entry = _ct->cur_youngergen_card_val();
break;
} else {
assert(entry_val == _ct->cur_youngergen_card_val(),
"Should be the only possibility.");
// In this case, the card was clean before, and become
// cur_youngergen only because of processing of a promoted object.
// We don't have to look at the card.
return false;
}
}
return true;
}


inline bool ClearNoncleanCardWrapper::clear_card_serial(CardValue* entry) {
CardValue entry_val = *entry;
assert(entry_val != CardTableRS::clean_card_val(),
"We shouldn't be looking at clean cards, and this should "
@@ -135,8 +90,8 @@ inline bool ClearNoncleanCardWrapper::clear_card_serial(CardValue* entry) {
}

ClearNoncleanCardWrapper::ClearNoncleanCardWrapper(
DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par) :
_dirty_card_closure(dirty_card_closure), _ct(ct), _is_par(is_par) {
DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct) :
_dirty_card_closure(dirty_card_closure), _ct(ct) {
}

bool ClearNoncleanCardWrapper::is_word_aligned(CardTable::CardValue* entry) {
@@ -203,12 +158,11 @@ void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {

void CardTableRS::younger_refs_in_space_iterate(Space* sp,
HeapWord* gen_boundary,
OopIterateClosure* cl,
uint n_threads) {
OopIterateClosure* cl) {
verify_used_region_at_save_marks(sp);

const MemRegion urasm = sp->used_region_at_save_marks();
non_clean_card_iterate_possibly_parallel(sp, gen_boundary, urasm, cl, this, n_threads);
non_clean_card_iterate(sp, gen_boundary, urasm, cl, this);
}

#ifdef ASSERT
@@ -580,35 +534,21 @@ bool CardTableRS::card_may_have_been_dirty(CardValue cv) {
CardTableRS::youngergen_may_have_been_dirty(cv));
}

void CardTableRS::non_clean_card_iterate_possibly_parallel(
Space* sp,
HeapWord* gen_boundary,
MemRegion mr,
OopIterateClosure* cl,
CardTableRS* ct,
uint n_threads)
void CardTableRS::non_clean_card_iterate(Space* sp,
HeapWord* gen_boundary,
MemRegion mr,
OopIterateClosure* cl,
CardTableRS* ct)
{
if (!mr.is_empty()) {
if (n_threads > 0) {
non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
} else {
// clear_cl finds contiguous dirty ranges of cards to process and clear.

// This is the single-threaded version used by DefNew.
const bool parallel = false;

DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), gen_boundary, parallel);
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);

clear_cl.do_MemRegion(mr);
}
if (mr.is_empty()) {
return;
}
}
// clear_cl finds contiguous dirty ranges of cards to process and clear.

DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), gen_boundary);
ClearNoncleanCardWrapper clear_cl(dcto_cl, ct);

void CardTableRS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
OopIterateClosure* cl, CardTableRS* ct,
uint n_threads) {
fatal("Parallel gc not supported here.");
clear_cl.do_MemRegion(mr);
}

bool CardTableRS::is_in_young(oop obj) const {
@@ -88,7 +88,7 @@ class CardTableRS: public CardTable {
CardTableRS(MemRegion whole_heap, bool scanned_concurrently);
~CardTableRS();

void younger_refs_in_space_iterate(Space* sp, HeapWord* gen_boundary, OopIterateClosure* cl, uint n_threads);
void younger_refs_in_space_iterate(Space* sp, HeapWord* gen_boundary, OopIterateClosure* cl);

virtual void verify_used_region_at_save_marks(Space* sp) const NOT_DEBUG_RETURN;

@@ -147,15 +147,11 @@ class CardTableRS: public CardTable {
// Iterate over the portion of the card-table which covers the given
// region mr in the given space and apply cl to any dirty sub-regions
// of mr. Clears the dirty cards as they are processed.
void non_clean_card_iterate_possibly_parallel(Space* sp, HeapWord* gen_boundary,
MemRegion mr, OopIterateClosure* cl,
CardTableRS* ct, uint n_threads);

// Work method used to implement non_clean_card_iterate_possibly_parallel()
// above in the parallel case.
virtual void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
OopIterateClosure* cl, CardTableRS* ct,
uint n_threads);
void non_clean_card_iterate(Space* sp,
HeapWord* gen_boundary,
MemRegion mr,
OopIterateClosure* cl,
CardTableRS* ct);

// This is an array, one element per covered region of the card table.
// Each entry is itself an array, with one element per chunk in the
@@ -175,7 +171,6 @@ class CardTableRS: public CardTable {
class ClearNoncleanCardWrapper: public MemRegionClosure {
DirtyCardToOopClosure* _dirty_card_closure;
CardTableRS* _ct;
bool _is_par;

public:

@@ -191,7 +186,7 @@ class ClearNoncleanCardWrapper: public MemRegionClosure {
bool is_word_aligned(CardValue* entry);

public:
ClearNoncleanCardWrapper(DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par);
ClearNoncleanCardWrapper(DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct);
void do_MemRegion(MemRegion mr);
};

@@ -163,8 +163,7 @@ void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {

DirtyCardToOopClosure* Space::new_dcto_cl(OopIterateClosure* cl,
CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel) {
HeapWord* boundary) {
return new DirtyCardToOopClosure(this, cl, precision, boundary);
}

@@ -243,8 +242,7 @@ ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
DirtyCardToOopClosure*
ContiguousSpace::new_dcto_cl(OopIterateClosure* cl,
CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel) {
HeapWord* boundary) {
return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
}

@@ -173,8 +173,7 @@ class Space: public CHeapObj<mtGC> {
// operate. ResourceArea allocated.
virtual DirtyCardToOopClosure* new_dcto_cl(OopIterateClosure* cl,
CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel);
HeapWord* boundary);

// If "p" is in the space, returns the address of the start of the
// "block" that contains "p". We say "block" instead of "object" since
@@ -588,8 +587,7 @@ class ContiguousSpace: public CompactibleSpace {
// Override.
DirtyCardToOopClosure* new_dcto_cl(OopIterateClosure* cl,
CardTable::PrecisionStyle precision,
HeapWord* boundary,
bool parallel);
HeapWord* boundary);

// Apply "blk->do_oop" to the addresses of all reference fields in objects
// starting with the _saved_mark_word, which was noted during a generation's

0 comments on commit 288a4fe

Please sign in to comment.