Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
8274054: Add custom enqueue calls during reference processing
Reviewed-by: ayang, kbarrett, sjohanss
  • Loading branch information
Thomas Schatzl committed Sep 22, 2021
1 parent c77ebe8 commit 51085b5
Show file tree
Hide file tree
Showing 9 changed files with 81 additions and 31 deletions.
5 changes: 4 additions & 1 deletion src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
Expand Up @@ -1581,8 +1581,9 @@ class G1CMRefProcProxyTask : public RefProcProxyTask {
G1CMIsAliveClosure is_alive(&_g1h);
uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
G1CMKeepAliveAndDrainClosure keep_alive(&_cm, _cm.task(index), _tm == RefProcThreadModel::Single);
BarrierEnqueueDiscoveredFieldClosure enqueue;
G1CMDrainMarkingStackClosure complete_gc(&_cm, _cm.task(index), _tm == RefProcThreadModel::Single);
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &complete_gc);
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc);
}

void prepare_run_task_hook() override {
Expand Down Expand Up @@ -1695,6 +1696,7 @@ void G1ConcurrentMark::preclean() {
SuspendibleThreadSetJoiner joiner;

G1CMKeepAliveAndDrainClosure keep_alive(this, task(0), true /* is_serial */);
BarrierEnqueueDiscoveredFieldClosure enqueue;
G1CMDrainMarkingStackClosure drain_mark_stack(this, task(0), true /* is_serial */);

set_concurrency_and_phase(1, true);
Expand All @@ -1706,6 +1708,7 @@ void G1ConcurrentMark::preclean() {
ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
rp->preclean_discovered_references(rp->is_alive_non_header(),
&keep_alive,
&enqueue,
&drain_mark_stack,
&yield_cl,
_gc_timer_cm);
Expand Down
3 changes: 2 additions & 1 deletion src/hotspot/share/gc/g1/g1FullCollector.cpp
Expand Up @@ -248,8 +248,9 @@ class G1FullGCRefProcProxyTask : public RefProcProxyTask {
G1IsAliveClosure is_alive(&_collector);
uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
G1FullKeepAliveClosure keep_alive(_collector.marker(index));
BarrierEnqueueDiscoveredFieldClosure enqueue;
G1FollowStackClosure* complete_gc = _collector.marker(index)->stack_closure();
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, complete_gc);
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, complete_gc);
}
};

Expand Down
3 changes: 2 additions & 1 deletion src/hotspot/share/gc/g1/g1YoungCollector.cpp
Expand Up @@ -912,8 +912,9 @@ class G1STWRefProcProxyTask : public RefProcProxyTask {

G1STWIsAliveClosure is_alive(&_g1h);
G1CopyingKeepAliveClosure keep_alive(&_g1h, pss);
BarrierEnqueueDiscoveredFieldClosure enqueue;
G1ParEvacuateFollowersClosure complete_gc(&_g1h, pss, &_task_queues, _tm == RefProcThreadModel::Single ? nullptr : &_terminator, G1GCPhaseTimes::ObjCopy);
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &complete_gc);
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc);

// We have completed copying any necessary live referent objects.
assert(pss->queue_is_empty(), "both queue and overflow should be empty");
Expand Down
3 changes: 2 additions & 1 deletion src/hotspot/share/gc/parallel/psParallelCompact.cpp
Expand Up @@ -2067,8 +2067,9 @@ class ParallelCompactRefProcProxyTask : public RefProcProxyTask {
assert(worker_id < _max_workers, "sanity");
ParCompactionManager* cm = (_tm == RefProcThreadModel::Single) ? ParCompactionManager::get_vmthread_cm() : ParCompactionManager::gc_thread_compaction_manager(worker_id);
PCMarkAndPushClosure keep_alive(cm);
BarrierEnqueueDiscoveredFieldClosure enqueue;
ParCompactionManager::FollowStackClosure complete_gc(cm, (_tm == RefProcThreadModel::Single) ? nullptr : &_terminator, worker_id);
_rp_task->rp_work(worker_id, PSParallelCompact::is_alive_closure(), &keep_alive, &complete_gc);
_rp_task->rp_work(worker_id, PSParallelCompact::is_alive_closure(), &keep_alive, &enqueue, &complete_gc);
}

void prepare_run_task_hook() override {
Expand Down
5 changes: 3 additions & 2 deletions src/hotspot/share/gc/parallel/psScavenge.cpp
Expand Up @@ -209,9 +209,10 @@ class ParallelScavengeRefProcProxyTask : public RefProcProxyTask {
assert(worker_id < _max_workers, "sanity");
PSPromotionManager* promotion_manager = (_tm == RefProcThreadModel::Single) ? PSPromotionManager::vm_thread_promotion_manager() : PSPromotionManager::gc_thread_promotion_manager(worker_id);
PSIsAliveClosure is_alive;
PSKeepAliveClosure keep_alive(promotion_manager);;
PSKeepAliveClosure keep_alive(promotion_manager);
BarrierEnqueueDiscoveredFieldClosure enqueue;
PSEvacuateFollowersClosure complete_gc(promotion_manager, (_marks_oops_alive && _tm == RefProcThreadModel::Multi) ? &_terminator : nullptr, worker_id);;
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &complete_gc);
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc);
}

void prepare_run_task_hook() override {
Expand Down
3 changes: 2 additions & 1 deletion src/hotspot/share/gc/serial/serialGcRefProcProxyTask.hpp
Expand Up @@ -41,7 +41,8 @@ class SerialGCRefProcProxyTask : public RefProcProxyTask {

void work(uint worker_id) override {
assert(worker_id < _max_workers, "sanity");
_rp_task->rp_work(worker_id, &_is_alive, &_keep_alive, &_complete_gc);
BarrierEnqueueDiscoveredFieldClosure enqueue;
_rp_task->rp_work(worker_id, &_is_alive, &_keep_alive, &enqueue, &_complete_gc);
}
};

Expand Down
51 changes: 32 additions & 19 deletions src/hotspot/share/gc/shared/referenceProcessor.cpp
Expand Up @@ -218,6 +218,12 @@ ReferenceProcessorStats ReferenceProcessor::process_discovered_references(RefPro
return stats;
}

void BarrierEnqueueDiscoveredFieldClosure::enqueue(oop reference, oop value) {
HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(reference,
java_lang_ref_Reference::discovered_offset(),
value);
}

void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
_current_discovered_addr = java_lang_ref_Reference::discovered_addr_raw(_current_discovered);
oop discovered = java_lang_ref_Reference::discovered(_current_discovered);
Expand Down Expand Up @@ -271,18 +277,16 @@ void DiscoveredListIterator::clear_referent() {
}

void DiscoveredListIterator::enqueue() {
HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(_current_discovered,
java_lang_ref_Reference::discovered_offset(),
_next_discovered);
_enqueue->enqueue(_current_discovered, _next_discovered);
}

void DiscoveredListIterator::complete_enqueue() {
if (_prev_discovered != NULL) {
if (_prev_discovered != nullptr) {
// This is the last object.
// Swap refs_list into pending list and set obj's
// discovered to what we read from the pending list.
oop old = Universe::swap_reference_pending_list(_refs_list.head());
HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(_prev_discovered, java_lang_ref_Reference::discovered_offset(), old);
_enqueue->enqueue(_prev_discovered, old);
}
}

Expand All @@ -307,8 +311,9 @@ inline void log_enqueued_ref(const DiscoveredListIterator& iter, const char* rea
size_t ReferenceProcessor::process_discovered_list_work(DiscoveredList& refs_list,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
bool do_enqueue_and_clear) {
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
DiscoveredListIterator iter(refs_list, keep_alive, is_alive, enqueue);
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(discovery_is_concurrent() /* allow_null_referent */));
if (iter.referent() == NULL) {
Expand Down Expand Up @@ -350,8 +355,9 @@ size_t ReferenceProcessor::process_discovered_list_work(DiscoveredList& refs_
}

size_t ReferenceProcessor::process_final_keep_alive_work(DiscoveredList& refs_list,
OopClosure* keep_alive) {
DiscoveredListIterator iter(refs_list, keep_alive, NULL);
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue) {
DiscoveredListIterator iter(refs_list, keep_alive, NULL, enqueue);
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
// keep the referent and followers around
Expand Down Expand Up @@ -421,7 +427,8 @@ size_t ReferenceProcessor::total_reference_count(ReferenceType type) const {
void RefProcTask::process_discovered_list(uint worker_id,
ReferenceType ref_type,
BoolObjectClosure* is_alive,
OopClosure* keep_alive) {
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue) {
ReferenceProcessor::RefProcSubPhases subphase;
DiscoveredList* dl;
switch (ref_type) {
Expand Down Expand Up @@ -453,6 +460,7 @@ void RefProcTask::process_discovered_list(uint worker_id,
size_t const removed = _ref_processor.process_discovered_list_work(dl[worker_id],
is_alive,
keep_alive,
enqueue,
do_enqueue_and_clear);
_phase_times->add_ref_cleared(ref_type, removed);
}
Expand All @@ -468,14 +476,15 @@ class RefProcSoftWeakFinalPhaseTask: public RefProcTask {
void rp_work(uint worker_id,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
VoidClosure* complete_gc) override {
RefProcWorkerTimeTracker t(_phase_times->soft_weak_final_refs_phase_worker_time_sec(), tracker_id(worker_id));

process_discovered_list(worker_id, REF_SOFT, is_alive, keep_alive);
process_discovered_list(worker_id, REF_SOFT, is_alive, keep_alive, enqueue);

process_discovered_list(worker_id, REF_WEAK, is_alive, keep_alive);
process_discovered_list(worker_id, REF_WEAK, is_alive, keep_alive, enqueue);

process_discovered_list(worker_id, REF_FINAL, is_alive, keep_alive);
process_discovered_list(worker_id, REF_FINAL, is_alive, keep_alive, enqueue);

// Close the reachable set; needed for collectors which keep_alive_closure do
// not immediately complete their work.
Expand All @@ -493,9 +502,10 @@ class RefProcKeepAliveFinalPhaseTask: public RefProcTask {
void rp_work(uint worker_id,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
VoidClosure* complete_gc) override {
RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::KeepAliveFinalRefsSubPhase, _phase_times, tracker_id(worker_id));
_ref_processor.process_final_keep_alive_work(_ref_processor._discoveredFinalRefs[worker_id], keep_alive);
_ref_processor.process_final_keep_alive_work(_ref_processor._discoveredFinalRefs[worker_id], keep_alive, enqueue);
// Close the reachable set
complete_gc->do_void();
}
Expand All @@ -511,8 +521,9 @@ class RefProcPhantomPhaseTask: public RefProcTask {
void rp_work(uint worker_id,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
VoidClosure* complete_gc) override {
process_discovered_list(worker_id, REF_PHANTOM, is_alive, keep_alive);
process_discovered_list(worker_id, REF_PHANTOM, is_alive, keep_alive, enqueue);

// Close the reachable set; needed for collectors which keep_alive_closure do
// not immediately complete their work.
Expand Down Expand Up @@ -1039,6 +1050,7 @@ bool ReferenceProcessor::has_discovered_references() {

void ReferenceProcessor::preclean_discovered_references(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
VoidClosure* complete_gc,
YieldClosure* yield,
GCTimer* gc_timer) {
Expand All @@ -1053,7 +1065,7 @@ void ReferenceProcessor::preclean_discovered_references(BoolObjectClosure* is_al
return;
}
if (preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive,
keep_alive, complete_gc, yield)) {
keep_alive, enqueue, complete_gc, yield)) {
log_reflist("SoftRef abort: ", _discoveredSoftRefs, _max_num_queues);
return;
}
Expand All @@ -1070,7 +1082,7 @@ void ReferenceProcessor::preclean_discovered_references(BoolObjectClosure* is_al
return;
}
if (preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive,
keep_alive, complete_gc, yield)) {
keep_alive, enqueue, complete_gc, yield)) {
log_reflist("WeakRef abort: ", _discoveredWeakRefs, _max_num_queues);
return;
}
Expand All @@ -1087,7 +1099,7 @@ void ReferenceProcessor::preclean_discovered_references(BoolObjectClosure* is_al
return;
}
if (preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive,
keep_alive, complete_gc, yield)) {
keep_alive, enqueue, complete_gc, yield)) {
log_reflist("FinalRef abort: ", _discoveredFinalRefs, _max_num_queues);
return;
}
Expand All @@ -1104,7 +1116,7 @@ void ReferenceProcessor::preclean_discovered_references(BoolObjectClosure* is_al
return;
}
if (preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive,
keep_alive, complete_gc, yield)) {
keep_alive, enqueue, complete_gc, yield)) {
log_reflist("PhantomRef abort: ", _discoveredPhantomRefs, _max_num_queues);
return;
}
Expand All @@ -1124,9 +1136,10 @@ void ReferenceProcessor::preclean_discovered_references(BoolObjectClosure* is_al
bool ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
VoidClosure* complete_gc,
YieldClosure* yield) {
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
DiscoveredListIterator iter(refs_list, keep_alive, is_alive, enqueue);
while (iter.has_next()) {
if (yield->should_return_fine_grain()) {
return true;
Expand Down
35 changes: 31 additions & 4 deletions src/hotspot/share/gc/shared/referenceProcessor.hpp
Expand Up @@ -38,6 +38,26 @@ class ReferenceProcessorPhaseTimes;
class RefProcTask;
class RefProcProxyTask;

// Provides a callback to the garbage collector to set the given value to the
// discovered field of the j.l.ref.Reference instance. This is called during STW
// reference processing when iterating over the discovered lists for all
// discovered references.
// Typically garbage collectors may just call the barrier, but for some garbage
// collectors the barrier environment (e.g. card table) may not be set up correctly
// at the point of invocation.
class EnqueueDiscoveredFieldClosure {
public:
// For the given j.l.ref.Reference reference, set the discovered field to value.
virtual void enqueue(oop reference, oop value) = 0;
};

// EnqueueDiscoveredFieldClosure that executes the default barrier on the discovered
// field of the j.l.ref.Reference reference with the given value.
class BarrierEnqueueDiscoveredFieldClosure : public EnqueueDiscoveredFieldClosure {
public:
void enqueue(oop reference, oop value) override;
};

// List of discovered references.
class DiscoveredList {
public:
Expand Down Expand Up @@ -66,7 +86,6 @@ class DiscoveredList {

// Iterator for the list of discovered references.
class DiscoveredListIterator {
private:
DiscoveredList& _refs_list;
HeapWord* _prev_discovered_addr;
oop _prev_discovered;
Expand All @@ -78,6 +97,7 @@ class DiscoveredListIterator {

OopClosure* _keep_alive;
BoolObjectClosure* _is_alive;
EnqueueDiscoveredFieldClosure* _enqueue;

DEBUG_ONLY(
oop _first_seen; // cyclic linked list check
Expand All @@ -89,7 +109,8 @@ class DiscoveredListIterator {
public:
inline DiscoveredListIterator(DiscoveredList& refs_list,
OopClosure* keep_alive,
BoolObjectClosure* is_alive);
BoolObjectClosure* is_alive,
EnqueueDiscoveredFieldClosure* enqueue);

// End Of List.
inline bool has_next() const { return _current_discovered != NULL; }
Expand Down Expand Up @@ -255,12 +276,14 @@ class ReferenceProcessor : public ReferenceDiscoverer {
size_t process_discovered_list_work(DiscoveredList& refs_list,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
bool do_enqueue_and_clear);

// Keep alive followers of referents for FinalReferences. Must only be called for
// those.
size_t process_final_keep_alive_work(DiscoveredList& refs_list,
OopClosure* keep_alive);
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue);


void setup_policy(bool always_clear) {
Expand Down Expand Up @@ -291,6 +314,7 @@ class ReferenceProcessor : public ReferenceDiscoverer {
// (or predicates involved) by other threads.
void preclean_discovered_references(BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
VoidClosure* complete_gc,
YieldClosure* yield,
GCTimer* gc_timer);
Expand All @@ -307,6 +331,7 @@ class ReferenceProcessor : public ReferenceDiscoverer {
bool preclean_discovered_reflist(DiscoveredList& refs_list,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
VoidClosure* complete_gc,
YieldClosure* yield);

Expand Down Expand Up @@ -542,7 +567,8 @@ class RefProcTask : StackObj {
void process_discovered_list(uint worker_id,
ReferenceType ref_type,
BoolObjectClosure* is_alive,
OopClosure* keep_alive);
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue);
public:
RefProcTask(ReferenceProcessor& ref_processor,
ReferenceProcessorPhaseTimes* phase_times)
Expand All @@ -552,6 +578,7 @@ class RefProcTask : StackObj {
virtual void rp_work(uint worker_id,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
VoidClosure* complete_gc) = 0;
};

Expand Down
4 changes: 3 additions & 1 deletion src/hotspot/share/gc/shared/referenceProcessor.inline.hpp
Expand Up @@ -60,7 +60,8 @@ void DiscoveredList::clear() {

DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list,
OopClosure* keep_alive,
BoolObjectClosure* is_alive):
BoolObjectClosure* is_alive,
EnqueueDiscoveredFieldClosure* enqueue):
_refs_list(refs_list),
_prev_discovered_addr(refs_list.adr_head()),
_prev_discovered(NULL),
Expand All @@ -70,6 +71,7 @@ DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list,
_referent(NULL),
_keep_alive(keep_alive),
_is_alive(is_alive),
_enqueue(enqueue),
#ifdef ASSERT
_first_seen(refs_list.head()),
#endif
Expand Down

1 comment on commit 51085b5

@openjdk-notifier
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.