Skip to content

Commit

Permalink
8298191: Enhance object reclamation process
Browse files Browse the repository at this point in the history
Reviewed-by: johnc, rkennke
  • Loading branch information
AntonKozlov authored and RealCLanger committed Apr 12, 2023
1 parent 13b5fe6 commit dfded6d
Show file tree
Hide file tree
Showing 12 changed files with 121 additions and 36 deletions.
28 changes: 27 additions & 1 deletion src/hotspot/share/gc/g1/g1CollectedHeap.cpp
Expand Up @@ -3201,6 +3201,31 @@ class G1CopyingKeepAliveClosure: public OopClosure {
}
};

// Special closure for enqueuing discovered fields: during enqueue the card table
// may not be in shape to properly handle normal barrier calls (e.g. card marks
// in regions that failed evacuation, scribbling of various values by card table
// scan code). Additionally the regular barrier enqueues into the "global"
// DCQS, but during GC we need these to-be-refined entries in the GC local queue
// so that after clearing the card table, the redirty cards phase will properly
// mark all dirty cards to be picked up by refinement.
class G1EnqueueDiscoveredFieldClosure : public EnqueueDiscoveredFieldClosure {
G1CollectedHeap* _g1h;
G1ParScanThreadState* _pss;

public:
G1EnqueueDiscoveredFieldClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) : _g1h(g1h), _pss(pss) { }

virtual void enqueue(HeapWord* discovered_field_addr, oop value) {
assert(_g1h->is_in(discovered_field_addr), PTR_FORMAT " is not in heap ", p2i(discovered_field_addr));
// Store the value first, whatever it is.
RawAccess<>::oop_store(discovered_field_addr, value);
if (value == NULL) {
return;
}
_pss->write_ref_field_post(discovered_field_addr, value);
}
};

// Serial drain queue closure. Called as the 'complete_gc'
// closure for each discovered list in some of the
// reference processing phases.
Expand Down Expand Up @@ -3245,7 +3270,8 @@ class G1STWRefProcProxyTask : public RefProcProxyTask {
G1STWIsAliveClosure is_alive(&_g1h);
G1CopyingKeepAliveClosure keep_alive(&_g1h, _pss.state_for_worker(index));
G1ParEvacuateFollowersClosure complete_gc(&_g1h, _pss.state_for_worker(index), &_task_queues, _tm == RefProcThreadModel::Single ? nullptr : &_terminator, G1GCPhaseTimes::ObjCopy);
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &complete_gc);
G1EnqueueDiscoveredFieldClosure enqueue(&_g1h, _pss.state_for_worker(index));
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc);
}

void prepare_run_task_hook() override {
Expand Down
3 changes: 2 additions & 1 deletion src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
Expand Up @@ -1478,8 +1478,9 @@ class G1CMRefProcProxyTask : public RefProcProxyTask {
G1CMIsAliveClosure is_alive(&_g1h);
uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
G1CMKeepAliveAndDrainClosure keep_alive(&_cm, _cm.task(index), _tm == RefProcThreadModel::Single);
BarrierEnqueueDiscoveredFieldClosure enqueue;
G1CMDrainMarkingStackClosure complete_gc(&_cm, _cm.task(index), _tm == RefProcThreadModel::Single);
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &complete_gc);
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc);
}

void prepare_run_task_hook() override {
Expand Down
3 changes: 2 additions & 1 deletion src/hotspot/share/gc/g1/g1FullCollector.cpp
Expand Up @@ -259,8 +259,9 @@ class G1FullGCRefProcProxyTask : public RefProcProxyTask {
G1IsAliveClosure is_alive(&_collector);
uint index = (_tm == RefProcThreadModel::Single) ? 0 : worker_id;
G1FullKeepAliveClosure keep_alive(_collector.marker(index));
BarrierEnqueueDiscoveredFieldClosure enqueue;
G1FollowStackClosure* complete_gc = _collector.marker(index)->stack_closure();
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, complete_gc);
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, complete_gc);
}
};

Expand Down
9 changes: 1 addition & 8 deletions src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
Expand Up @@ -203,14 +203,7 @@ void G1ParScanThreadState::do_oop_evac(T* p) {
}
RawAccess<IS_NOT_NULL>::oop_store(p, obj);

assert(obj != NULL, "Must be");
if (HeapRegion::is_in_same_region(p, obj)) {
return;
}
HeapRegion* from = _g1h->heap_region_containing(p);
if (!from->is_young()) {
enqueue_card_if_tracked(_g1h->region_attr(obj), p, obj);
}
write_ref_field_post(p, obj);
}

MAYBE_INLINE_EVACUATION
Expand Down
6 changes: 6 additions & 0 deletions src/hotspot/share/gc/g1/g1ParScanThreadState.hpp
Expand Up @@ -128,6 +128,12 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {

void push_on_queue(ScannerTask task);

// Apply the post barrier to the given reference field. Enqueues the card of p
// if the barrier does not filter out the reference for some reason (e.g.
// p and q are in the same region, p is in survivor, p is in collection set)
// To be called during GC if nothing particular about p and obj are known.
template <class T> void write_ref_field_post(T* p, oop obj);

template <class T> void enqueue_card_if_tracked(G1HeapRegionAttr region_attr, T* p, oop o) {
assert(!HeapRegion::is_in_same_region(p, o), "Should have filtered out cross-region references already.");
assert(!_g1h->heap_region_containing(p)->is_young(), "Should have filtered out from-young references already.");
Expand Down
12 changes: 12 additions & 0 deletions src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp
Expand Up @@ -95,4 +95,16 @@ G1OopStarChunkedList* G1ParScanThreadState::oops_into_optional_region(const Heap
return &_oops_into_optional_regions[hr->index_in_opt_cset()];
}

template <class T> void G1ParScanThreadState::write_ref_field_post(T* p, oop obj) {
assert(obj != NULL, "Must be");
if (HeapRegion::is_in_same_region(p, obj)) {
return;
}
HeapRegion* from = _g1h->heap_region_containing(p);
if (!from->is_young()) {
enqueue_card_if_tracked(_g1h->region_attr(obj), p, obj);
}
}


#endif // SHARE_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
3 changes: 2 additions & 1 deletion src/hotspot/share/gc/parallel/psParallelCompact.cpp
Expand Up @@ -2067,8 +2067,9 @@ class ParallelCompactRefProcProxyTask : public RefProcProxyTask {
assert(worker_id < _max_workers, "sanity");
ParCompactionManager* cm = (_tm == RefProcThreadModel::Single) ? ParCompactionManager::get_vmthread_cm() : ParCompactionManager::gc_thread_compaction_manager(worker_id);
PCMarkAndPushClosure keep_alive(cm);
BarrierEnqueueDiscoveredFieldClosure enqueue;
ParCompactionManager::FollowStackClosure complete_gc(cm, (_tm == RefProcThreadModel::Single) ? nullptr : &_terminator, worker_id);
_rp_task->rp_work(worker_id, PSParallelCompact::is_alive_closure(), &keep_alive, &complete_gc);
_rp_task->rp_work(worker_id, PSParallelCompact::is_alive_closure(), &keep_alive, &enqueue, &complete_gc);
}

void prepare_run_task_hook() override {
Expand Down
5 changes: 3 additions & 2 deletions src/hotspot/share/gc/parallel/psScavenge.cpp
Expand Up @@ -210,9 +210,10 @@ class ParallelScavengeRefProcProxyTask : public RefProcProxyTask {
assert(worker_id < _max_workers, "sanity");
PSPromotionManager* promotion_manager = (_tm == RefProcThreadModel::Single) ? PSPromotionManager::vm_thread_promotion_manager() : PSPromotionManager::gc_thread_promotion_manager(worker_id);
PSIsAliveClosure is_alive;
PSKeepAliveClosure keep_alive(promotion_manager);;
PSKeepAliveClosure keep_alive(promotion_manager);
BarrierEnqueueDiscoveredFieldClosure enqueue;
PSEvacuateFollowersClosure complete_gc(promotion_manager, (_marks_oops_alive && _tm == RefProcThreadModel::Multi) ? &_terminator : nullptr, worker_id);;
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &complete_gc);
_rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc);
}

void prepare_run_task_hook() override {
Expand Down
3 changes: 2 additions & 1 deletion src/hotspot/share/gc/serial/serialGcRefProcProxyTask.hpp
Expand Up @@ -41,7 +41,8 @@ class SerialGCRefProcProxyTask : public RefProcProxyTask {

void work(uint worker_id) override {
assert(worker_id < _max_workers, "sanity");
_rp_task->rp_work(worker_id, &_is_alive, &_keep_alive, &_complete_gc);
BarrierEnqueueDiscoveredFieldClosure enqueue;
_rp_task->rp_work(worker_id, &_is_alive, &_keep_alive, &enqueue, &_complete_gc);
}
};

Expand Down
46 changes: 30 additions & 16 deletions src/hotspot/share/gc/shared/referenceProcessor.cpp
Expand Up @@ -245,6 +245,12 @@ ReferenceProcessorStats ReferenceProcessor::process_discovered_references(RefPro
return stats;
}

void BarrierEnqueueDiscoveredFieldClosure::enqueue(HeapWord* discovered_field_addr, oop value) {
assert(Universe::heap()->is_in(discovered_field_addr), PTR_FORMAT " not in heap", p2i(discovered_field_addr));
HeapAccess<AS_NO_KEEPALIVE>::oop_store(discovered_field_addr,
value);
}

void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
_current_discovered_addr = java_lang_ref_Reference::discovered_addr_raw(_current_discovered);
oop discovered = java_lang_ref_Reference::discovered(_current_discovered);
Expand Down Expand Up @@ -304,12 +310,12 @@ void DiscoveredListIterator::enqueue() {
}

void DiscoveredListIterator::complete_enqueue() {
if (_prev_discovered != NULL) {
if (_prev_discovered != nullptr) {
// This is the last object.
// Swap refs_list into pending list and set obj's
// discovered to what we read from the pending list.
oop old = Universe::swap_reference_pending_list(_refs_list.head());
HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(_prev_discovered, java_lang_ref_Reference::discovered_offset(), old);
_enqueue->enqueue(java_lang_ref_Reference::discovered_addr_raw(_prev_discovered), old);
}
}

Expand Down Expand Up @@ -337,7 +343,7 @@ size_t ReferenceProcessor::process_soft_ref_reconsider_work(DiscoveredList& r
OopClosure* keep_alive,
VoidClosure* complete_gc) {
assert(policy != NULL, "Must have a non-NULL policy");
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
DiscoveredListIterator iter(refs_list, keep_alive, is_alive, NULL /* enqueue */);
// Decide which softly reachable refs should be kept alive.
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
Expand Down Expand Up @@ -365,8 +371,9 @@ size_t ReferenceProcessor::process_soft_ref_reconsider_work(DiscoveredList& r
size_t ReferenceProcessor::process_soft_weak_final_refs_work(DiscoveredList& refs_list,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
bool do_enqueue_and_clear) {
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
DiscoveredListIterator iter(refs_list, keep_alive, is_alive, enqueue);
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
if (iter.referent() == NULL) {
Expand Down Expand Up @@ -409,8 +416,9 @@ size_t ReferenceProcessor::process_soft_weak_final_refs_work(DiscoveredList&

size_t ReferenceProcessor::process_final_keep_alive_work(DiscoveredList& refs_list,
OopClosure* keep_alive,
VoidClosure* complete_gc) {
DiscoveredListIterator iter(refs_list, keep_alive, NULL);
VoidClosure* complete_gc,
EnqueueDiscoveredFieldClosure* enqueue) {
DiscoveredListIterator iter(refs_list, keep_alive, NULL, enqueue);
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
// keep the referent and followers around
Expand All @@ -436,8 +444,9 @@ size_t ReferenceProcessor::process_final_keep_alive_work(DiscoveredList& refs_li
size_t ReferenceProcessor::process_phantom_refs_work(DiscoveredList& refs_list,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
VoidClosure* complete_gc) {
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
VoidClosure* complete_gc,
EnqueueDiscoveredFieldClosure* enqueue) {
DiscoveredListIterator iter(refs_list, keep_alive, is_alive, enqueue);
while (iter.has_next()) {
iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));

Expand Down Expand Up @@ -509,8 +518,6 @@ size_t ReferenceProcessor::total_reference_count(ReferenceType type) const {
return total_count(list);
}



class RefProcPhase1Task : public RefProcTask {
public:
RefProcPhase1Task(ReferenceProcessor& ref_processor,
Expand All @@ -523,6 +530,7 @@ class RefProcPhase1Task : public RefProcTask {
void rp_work(uint worker_id,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
VoidClosure* complete_gc) override {
ResourceMark rm;
RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::SoftRefSubPhase1, _phase_times, worker_id);
Expand All @@ -543,11 +551,13 @@ class RefProcPhase2Task: public RefProcTask {
DiscoveredList list[],
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
bool do_enqueue_and_clear,
ReferenceType ref_type) {
size_t const removed = _ref_processor.process_soft_weak_final_refs_work(list[worker_id],
is_alive,
keep_alive,
enqueue,
do_enqueue_and_clear);
_phase_times->add_ref_cleared(ref_type, removed);
}
Expand All @@ -561,20 +571,21 @@ class RefProcPhase2Task: public RefProcTask {
void rp_work(uint worker_id,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
VoidClosure* complete_gc) override {
ResourceMark rm;
RefProcWorkerTimeTracker t(_phase_times->phase2_worker_time_sec(), worker_id);
{
RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::SoftRefSubPhase2, _phase_times, worker_id);
run_phase2(worker_id, _ref_processor._discoveredSoftRefs, is_alive, keep_alive, true /* do_enqueue_and_clear */, REF_SOFT);
run_phase2(worker_id, _ref_processor._discoveredSoftRefs, is_alive, keep_alive, enqueue, true /* do_enqueue_and_clear */, REF_SOFT);
}
{
RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::WeakRefSubPhase2, _phase_times, worker_id);
run_phase2(worker_id, _ref_processor._discoveredWeakRefs, is_alive, keep_alive, true /* do_enqueue_and_clear */, REF_WEAK);
run_phase2(worker_id, _ref_processor._discoveredWeakRefs, is_alive, keep_alive, enqueue, true /* do_enqueue_and_clear */, REF_WEAK);
}
{
RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::FinalRefSubPhase2, _phase_times, worker_id);
run_phase2(worker_id, _ref_processor._discoveredFinalRefs, is_alive, keep_alive, false /* do_enqueue_and_clear */, REF_FINAL);
run_phase2(worker_id, _ref_processor._discoveredFinalRefs, is_alive, keep_alive, enqueue, false /* do_enqueue_and_clear */, REF_FINAL);
}
// Close the reachable set; needed for collectors which keep_alive_closure do
// not immediately complete their work.
Expand All @@ -592,10 +603,11 @@ class RefProcPhase3Task: public RefProcTask {
void rp_work(uint worker_id,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
VoidClosure* complete_gc) override {
ResourceMark rm;
RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::FinalRefSubPhase3, _phase_times, worker_id);
_ref_processor.process_final_keep_alive_work(_ref_processor._discoveredFinalRefs[worker_id], keep_alive, complete_gc);
_ref_processor.process_final_keep_alive_work(_ref_processor._discoveredFinalRefs[worker_id], keep_alive, complete_gc, enqueue);
}
};

Expand All @@ -609,13 +621,15 @@ class RefProcPhase4Task: public RefProcTask {
void rp_work(uint worker_id,
BoolObjectClosure* is_alive,
OopClosure* keep_alive,
EnqueueDiscoveredFieldClosure* enqueue,
VoidClosure* complete_gc) override {
ResourceMark rm;
RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::PhantomRefSubPhase4, _phase_times, worker_id);
size_t const removed = _ref_processor.process_phantom_refs_work(_ref_processor._discoveredPhantomRefs[worker_id],
is_alive,
keep_alive,
complete_gc);
complete_gc,
enqueue);
_phase_times->add_ref_cleared(REF_PHANTOM, removed);
}
};
Expand Down Expand Up @@ -1259,7 +1273,7 @@ bool ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_lis
OopClosure* keep_alive,
VoidClosure* complete_gc,
YieldClosure* yield) {
DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
DiscoveredListIterator iter(refs_list, keep_alive, is_alive, NULL /* enqueue */);
while (iter.has_next()) {
if (yield->should_return_fine_grain()) {
return true;
Expand Down

0 comments on commit dfded6d

Please sign in to comment.