Skip to content
Permalink
Browse files
8275416: G1: remove unnecessary make_referent_alive in precleaning phase
Reviewed-by: tschatzl, sjohanss
  • Loading branch information
albertnetymk committed Oct 22, 2021
1 parent dd622e5 commit 4e647aa584cf12dae76e81e203ad4f1ebc08c1a2
Showing 3 changed files with 16 additions and 40 deletions.
@@ -1692,9 +1692,7 @@ void G1ConcurrentMark::preclean() {

SuspendibleThreadSetJoiner joiner;

G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */);
BarrierEnqueueDiscoveredFieldClosure enqueue;
G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */);

set_concurrency_and_phase(1, true);

@@ -1704,9 +1702,7 @@ void G1ConcurrentMark::preclean() {
// Precleaning is single threaded. Temporarily disable MT discovery.
ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
rp->preclean_discovered_references(rp->is_alive_non_header(),
&keep_alive,
&enqueue,
&drain_mark_stack,
&yield_cl,
_gc_timer_cm);
}
@@ -1065,9 +1065,7 @@ bool ReferenceProcessor::has_discovered_references() {
}

void ReferenceProcessor::preclean_discovered_references(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
VoidClosure* complete_gc,
YieldClosure* yield,
GCTimer* gc_timer) {
// These lists can be handled here in any order and, indeed, concurrently.
@@ -1081,7 +1079,7 @@ void ReferenceProcessor::preclean_discovered_references(BoolObjectClosure* is_al
return;
}
if (preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive,
keep_alive, enqueue, complete_gc, yield)) {
enqueue, yield)) {
log_reflist("SoftRef abort: ", _discoveredSoftRefs, _max_num_queues);
return;
}
@@ -1098,7 +1096,7 @@ void ReferenceProcessor::preclean_discovered_references(BoolObjectClosure* is_al
return;
}
if (preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive,
keep_alive, enqueue, complete_gc, yield)) {
enqueue, yield)) {
log_reflist("WeakRef abort: ", _discoveredWeakRefs, _max_num_queues);
return;
}
@@ -1115,7 +1113,7 @@ void ReferenceProcessor::preclean_discovered_references(BoolObjectClosure* is_al
return;
}
if (preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive,
keep_alive, enqueue, complete_gc, yield)) {
enqueue, yield)) {
log_reflist("FinalRef abort: ", _discoveredFinalRefs, _max_num_queues);
return;
}
@@ -1132,7 +1130,7 @@ void ReferenceProcessor::preclean_discovered_references(BoolObjectClosure* is_al
return;
}
if (preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive,
keep_alive, enqueue, complete_gc, yield)) {
enqueue, yield)) {
log_reflist("PhantomRef abort: ", _discoveredPhantomRefs, _max_num_queues);
return;
}
@@ -1141,22 +1139,11 @@ void ReferenceProcessor::preclean_discovered_references(BoolObjectClosure* is_al
}
}

// Walk the given discovered ref list, and remove all reference objects whose
// referents are still alive or NULL. NOTE: When we are precleaning the
// ref lists, we do not disable refs discovery to honor the correct semantics of
// java.lang.Reference. Therefore, as we iterate over the discovered list (DL)
// and drop elements from it, newly discovered refs can be discovered and added
// to the DL. Because precleaning is implemented single-threaded today, for
// each per-thread DL, the insertion of refs (calling `complete_gc`) happens
// after the iteration. The clear separation means no special synchronization
// is needed.
bool ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
VoidClosure* complete_gc,
YieldClosure* yield) {
DiscoveredListIterator iter(refs_list, keep_alive, is_alive, enqueue);
DiscoveredListIterator iter(refs_list, nullptr /* keep_alive */, is_alive, enqueue);
while (iter.has_next()) {
if (yield->should_return_fine_grain()) {
return true;
@@ -1168,17 +1155,12 @@ bool ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_lis
iter.move_to_next();
} else if (iter.is_referent_alive()) {
log_preclean_ref(iter, "reachable");
// Remove Reference object from list
iter.remove();
// Keep alive its cohort.
iter.make_referent_alive();
iter.move_to_next();
} else {
iter.next();
}
}
// Close the reachable set
complete_gc->do_void();

if (iter.processed() > 0) {
log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT,
@@ -304,19 +304,19 @@ class ReferenceProcessor : public ReferenceDiscoverer {
setup_policy(always_clear);
}

// "Preclean" all the discovered reference lists by removing references that
// are active (e.g. due to the mutator calling enqueue()) or with NULL or
// strongly reachable referents.
// The first argument is a predicate on an oop that indicates
// its (strong) reachability and the fourth is a closure that
// may be used to incrementalize or abort the precleaning process.
// The caller is responsible for taking care of potential
// interference with concurrent operations on these lists
// (or predicates involved) by other threads.
// "Preclean" all the discovered reference lists by removing references whose
// referents are NULL or strongly reachable (`is_alive` returns true).
// Note: when a referent is strongly reachable, we assume it's already marked
// through, so this method doesn't perform (and doesn't need to) any marking
// work at all. Currently, this assumption holds because G1 uses SATB and the
// marking status of an object is *not* updated when `Reference.get()` is
// called.
// `yield` is a closure that may be used to incrementalize or abort the
// precleaning process. The caller is responsible for taking care of
// potential interference with concurrent operations on these lists (or
// predicates involved) by other threads.
void preclean_discovered_references(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
VoidClosure* complete_gc,
YieldClosure* yield,
GCTimer* gc_timer);

@@ -331,9 +331,7 @@ class ReferenceProcessor : public ReferenceDiscoverer {
// Returns whether the operation should be aborted.
bool preclean_discovered_reflist(DiscoveredList& refs_list,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
VoidClosure* complete_gc,
YieldClosure* yield);

// round-robin mod _num_queues (not: _not_ mod _max_num_queues)

1 comment on commit 4e647aa

@openjdk-notifier
Copy link

@openjdk-notifier openjdk-notifier bot commented on 4e647aa Oct 22, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.