Skip to content

Commit 1769c48

Browse files
committed
8255471: ZGC: Rework root iterators and closures
Reviewed-by: eosterlund, pliden
1 parent b028074 commit 1769c48

17 files changed

+398
-352
lines changed

src/hotspot/share/gc/z/zBarrierSetNMethod.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
5555

5656
// Heal oops and disarm
5757
ZNMethodOopClosure cl;
58-
ZNMethod::nmethod_oops_do(nm, &cl);
58+
ZNMethod::nmethod_oops_do_inner(nm, &cl);
5959
disarm(nm);
6060

6161
return true;

src/hotspot/share/gc/z/zHeapIterator.cpp

Lines changed: 86 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -23,12 +23,14 @@
2323

2424
#include "precompiled.hpp"
2525
#include "classfile/classLoaderData.hpp"
26+
#include "gc/shared/barrierSetNMethod.hpp"
2627
#include "gc/shared/taskqueue.inline.hpp"
2728
#include "gc/z/zAddress.inline.hpp"
2829
#include "gc/z/zGlobals.hpp"
2930
#include "gc/z/zGranuleMap.inline.hpp"
3031
#include "gc/z/zHeapIterator.hpp"
3132
#include "gc/z/zLock.inline.hpp"
33+
#include "gc/z/zNMethod.hpp"
3234
#include "gc/z/zOop.inline.hpp"
3335
#include "memory/iterator.inline.hpp"
3436
#include "utilities/bitMap.inline.hpp"
@@ -92,8 +94,8 @@ class ZHeapIteratorContext {
9294
}
9395
};
9496

95-
template <bool Concurrent, bool Weak>
96-
class ZHeapIteratorRootOopClosure : public ZRootsIteratorClosure {
97+
template <bool Weak>
98+
class ZHeapIteratorRootOopClosure : public OopClosure {
9799
private:
98100
const ZHeapIteratorContext& _context;
99101

@@ -102,11 +104,7 @@ class ZHeapIteratorRootOopClosure : public ZRootsIteratorClosure {
102104
return NativeAccess<AS_NO_KEEPALIVE | ON_PHANTOM_OOP_REF>::oop_load(p);
103105
}
104106

105-
if (Concurrent) {
106-
return NativeAccess<AS_NO_KEEPALIVE>::oop_load(p);
107-
}
108-
109-
return RawAccess<>::oop_load(p);
107+
return NativeAccess<AS_NO_KEEPALIVE>::oop_load(p);
110108
}
111109

112110
public:
@@ -121,22 +119,6 @@ class ZHeapIteratorRootOopClosure : public ZRootsIteratorClosure {
121119
virtual void do_oop(narrowOop* p) {
122120
ShouldNotReachHere();
123121
}
124-
125-
virtual void do_thread(Thread* thread) {
126-
CodeBlobToOopClosure code_cl(this, false /* fix_oop_relocations */);
127-
thread->oops_do(this, &code_cl);
128-
}
129-
130-
virtual ZNMethodEntry nmethod_entry() const {
131-
if (ClassUnloading) {
132-
// All encountered nmethods should have been "entered" during stack walking
133-
return ZNMethodEntry::VerifyDisarmed;
134-
} else {
135-
// All nmethods are considered roots and will be visited.
136-
// Make sure that the unvisited gets fixed and disarmed before proceeding.
137-
return ZNMethodEntry::PreBarrier;
138-
}
139-
}
140122
};
141123

142124
template <bool VisitReferents>
@@ -180,7 +162,7 @@ ZHeapIterator::ZHeapIterator(uint nworkers, bool visit_weaks) :
180162
_bitmaps_lock(),
181163
_queues(nworkers),
182164
_array_queues(nworkers),
183-
_concurrent_roots(),
165+
_concurrent_roots(ClassLoaderData::_claim_other),
184166
_weak_roots(),
185167
_concurrent_weak_roots(),
186168
_terminator(nworkers, &_queues) {
@@ -255,10 +237,83 @@ bool ZHeapIterator::mark_object(oop obj) {
255237
return bitmap->try_set_bit(index);
256238
}
257239

258-
template <bool Concurrent, bool Weak, typename RootsIterator>
259-
void ZHeapIterator::push_roots(const ZHeapIteratorContext& context, RootsIterator& iter) {
260-
ZHeapIteratorRootOopClosure<Concurrent, Weak> cl(context);
261-
iter.oops_do(&cl);
240+
typedef ClaimingCLDToOopClosure<ClassLoaderData::_claim_other> ZHeapIteratorCLDCLosure;
241+
242+
class ZHeapIteratorNMethodClosure : public NMethodClosure {
243+
private:
244+
OopClosure* const _cl;
245+
BarrierSetNMethod* const _bs_nm;
246+
247+
public:
248+
ZHeapIteratorNMethodClosure(OopClosure* cl) :
249+
_cl(cl),
250+
_bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {}
251+
252+
virtual void do_nmethod(nmethod* nm) {
253+
assert(!ClassUnloading, "Only used if class unloading is turned off");
254+
255+
// ClassUnloading is turned off, all nmethods are considered strong,
256+
// not only those on the call stacks. The heap iteration might happen
257+
// before the concurrent processign of the code cache, make sure that
258+
// all nmethods have been processed before visiting the oops.
259+
_bs_nm->nmethod_entry_barrier(nm);
260+
261+
ZNMethod::nmethod_oops_do(nm, _cl);
262+
}
263+
};
264+
265+
class ZHeapIteratorThreadClosure : public ThreadClosure {
266+
private:
267+
OopClosure* const _cl;
268+
269+
class NMethodVisitor : public CodeBlobToOopClosure {
270+
public:
271+
NMethodVisitor(OopClosure* cl) :
272+
CodeBlobToOopClosure(cl, false /* fix_oop_relocations */) {}
273+
274+
void do_code_blob(CodeBlob* cb) {
275+
assert(!cb->is_nmethod() || !ZNMethod::is_armed(cb->as_nmethod()),
276+
"NMethods on stack should have been fixed and disarmed");
277+
278+
CodeBlobToOopClosure::do_code_blob(cb);
279+
}
280+
};
281+
282+
public:
283+
ZHeapIteratorThreadClosure(OopClosure* cl) : _cl(cl) {}
284+
285+
void do_thread(Thread* thread) {
286+
NMethodVisitor code_cl(_cl);
287+
thread->oops_do(_cl, &code_cl);
288+
}
289+
};
290+
291+
void ZHeapIterator::push_strong_roots(const ZHeapIteratorContext& context) {
292+
ZHeapIteratorRootOopClosure<false /* Weak */> cl(context);
293+
ZHeapIteratorCLDCLosure cld_cl(&cl);
294+
ZHeapIteratorNMethodClosure nm_cl(&cl);
295+
ZHeapIteratorThreadClosure thread_cl(&cl);
296+
297+
_concurrent_roots.apply(&cl,
298+
&cld_cl,
299+
&thread_cl,
300+
&nm_cl);
301+
}
302+
303+
void ZHeapIterator::push_weak_roots(const ZHeapIteratorContext& context) {
304+
ZHeapIteratorRootOopClosure<true /* Weak */> cl(context);
305+
_concurrent_weak_roots.apply(&cl);
306+
307+
AlwaysTrueClosure is_alive;
308+
_weak_roots.apply(&is_alive, &cl);
309+
}
310+
311+
template <bool VisitWeaks>
312+
void ZHeapIterator::push_roots(const ZHeapIteratorContext& context) {
313+
push_strong_roots(context);
314+
if (VisitWeaks) {
315+
push_weak_roots(context);
316+
}
262317
}
263318

264319
template <bool VisitReferents>
@@ -343,14 +398,9 @@ void ZHeapIterator::drain_and_steal(const ZHeapIteratorContext& context, ObjectC
343398
}
344399

345400
template <bool VisitWeaks>
346-
void ZHeapIterator::object_iterate_inner(const ZHeapIteratorContext& context, ObjectClosure* cl) {
347-
push_roots<true /* Concurrent */, false /* Weak */>(context, _concurrent_roots);
348-
if (VisitWeaks) {
349-
push_roots<false /* Concurrent */, true /* Weak */>(context, _weak_roots);
350-
push_roots<true /* Concurrent */, true /* Weak */>(context, _concurrent_weak_roots);
351-
}
352-
353-
drain_and_steal<VisitWeaks>(context, cl);
401+
void ZHeapIterator::object_iterate_inner(const ZHeapIteratorContext& context, ObjectClosure* object_cl) {
402+
push_roots<VisitWeaks>(context);
403+
drain_and_steal<VisitWeaks>(context, object_cl);
354404
}
355405

356406
void ZHeapIterator::object_iterate(ObjectClosure* cl, uint worker_id) {

src/hotspot/share/gc/z/zHeapIterator.hpp

Lines changed: 15 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -46,23 +46,26 @@ class ZHeapIterator : public ParallelObjectIterator {
4646
friend class ZHeapIteratorContext;
4747

4848
private:
49-
const bool _visit_weaks;
50-
ZStatTimerDisable _timer_disable;
51-
ZHeapIteratorBitMaps _bitmaps;
52-
ZLock _bitmaps_lock;
53-
ZHeapIteratorQueues _queues;
54-
ZHeapIteratorArrayQueues _array_queues;
55-
ZConcurrentRootsIteratorClaimOther _concurrent_roots;
56-
ZWeakRootsIterator _weak_roots;
57-
ZConcurrentWeakRootsIterator _concurrent_weak_roots;
58-
TaskTerminator _terminator;
49+
const bool _visit_weaks;
50+
ZStatTimerDisable _timer_disable;
51+
ZHeapIteratorBitMaps _bitmaps;
52+
ZLock _bitmaps_lock;
53+
ZHeapIteratorQueues _queues;
54+
ZHeapIteratorArrayQueues _array_queues;
55+
ZConcurrentRootsIterator _concurrent_roots;
56+
ZWeakRootsIterator _weak_roots;
57+
ZConcurrentWeakRootsIterator _concurrent_weak_roots;
58+
TaskTerminator _terminator;
5959

6060
ZHeapIteratorBitMap* object_bitmap(oop obj);
6161

6262
bool mark_object(oop obj);
6363

64-
template <bool Concurrent, bool Weak, typename RootsIterator>
65-
void push_roots(const ZHeapIteratorContext& context, RootsIterator& iter);
64+
void push_strong_roots(const ZHeapIteratorContext& context);
65+
void push_weak_roots(const ZHeapIteratorContext& context);
66+
67+
template <bool VisitWeaks>
68+
void push_roots(const ZHeapIteratorContext& context);
6669

6770
template <bool VisitReferents>
6871
void follow_object(const ZHeapIteratorContext& context, oop obj);

src/hotspot/share/gc/z/zMark.cpp

Lines changed: 58 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -23,12 +23,15 @@
2323

2424
#include "precompiled.hpp"
2525
#include "classfile/classLoaderDataGraph.hpp"
26+
#include "code/nmethod.hpp"
2627
#include "gc/shared/suspendibleThreadSet.hpp"
2728
#include "gc/z/zBarrier.inline.hpp"
29+
#include "gc/z/zLock.inline.hpp"
2830
#include "gc/z/zMark.inline.hpp"
2931
#include "gc/z/zMarkCache.inline.hpp"
3032
#include "gc/z/zMarkStack.inline.hpp"
3133
#include "gc/z/zMarkTerminate.inline.hpp"
34+
#include "gc/z/zNMethod.hpp"
3235
#include "gc/z/zOopClosures.inline.hpp"
3336
#include "gc/z/zPage.hpp"
3437
#include "gc/z/zPageTable.inline.hpp"
@@ -572,50 +575,79 @@ void ZMark::work(uint64_t timeout_in_micros) {
572575
stacks->free(&_allocator);
573576
}
574577

575-
class ZMarkConcurrentRootsIteratorClosure : public ZRootsIteratorClosure {
576-
public:
577-
ZMarkConcurrentRootsIteratorClosure() {
578-
ZThreadLocalAllocBuffer::reset_statistics();
578+
class ZMarkOopClosure : public OopClosure {
579+
virtual void do_oop(oop* p) {
580+
ZBarrier::mark_barrier_on_oop_field(p, false /* finalizable */);
579581
}
580582

581-
~ZMarkConcurrentRootsIteratorClosure() {
582-
ZThreadLocalAllocBuffer::publish_statistics();
583+
virtual void do_oop(narrowOop* p) {
584+
ShouldNotReachHere();
583585
}
586+
};
584587

585-
virtual ZNMethodEntry nmethod_entry() const {
586-
// Only apply closure to armed nmethods, and then disarm them.
587-
return ZNMethodEntry::Disarm;
588-
}
588+
class ZMarkThreadClosure : public ThreadClosure {
589+
private:
590+
OopClosure* const _cl;
589591

592+
public:
593+
ZMarkThreadClosure(OopClosure* cl) :
594+
_cl(cl) {
595+
ZThreadLocalAllocBuffer::reset_statistics();
596+
}
597+
~ZMarkThreadClosure() {
598+
ZThreadLocalAllocBuffer::publish_statistics();
599+
}
590600
virtual void do_thread(Thread* thread) {
591601
JavaThread* const jt = thread->as_Java_thread();
592-
StackWatermarkSet::finish_processing(jt, this, StackWatermarkKind::gc);
602+
StackWatermarkSet::finish_processing(jt, _cl, StackWatermarkKind::gc);
593603
ZThreadLocalAllocBuffer::update_stats(jt);
594604
}
605+
};
595606

596-
virtual void do_oop(oop* p) {
597-
ZBarrier::mark_barrier_on_oop_field(p, false /* finalizable */);
598-
}
607+
class ZMarkNMethodClosure : public NMethodClosure {
608+
private:
609+
OopClosure* const _cl;
599610

600-
virtual void do_oop(narrowOop* p) {
601-
ShouldNotReachHere();
611+
public:
612+
ZMarkNMethodClosure(OopClosure* cl) :
613+
_cl(cl) {}
614+
615+
virtual void do_nmethod(nmethod* nm) {
616+
ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
617+
if (!nm->is_alive()) {
618+
return;
619+
}
620+
621+
if (ZNMethod::is_armed(nm)) {
622+
ZNMethod::nmethod_oops_do_inner(nm, _cl);
623+
ZNMethod::disarm(nm);
624+
}
602625
}
603626
};
604627

628+
typedef ClaimingCLDToOopClosure<ClassLoaderData::_claim_strong> ZMarkCLDClosure;
629+
605630
class ZMarkConcurrentRootsTask : public ZTask {
606631
private:
607-
ZMark* const _mark;
608-
SuspendibleThreadSetJoiner _sts_joiner;
609-
ZConcurrentRootsIteratorClaimStrong _roots;
610-
ZMarkConcurrentRootsIteratorClosure _cl;
632+
ZMark* const _mark;
633+
SuspendibleThreadSetJoiner _sts_joiner;
634+
ZConcurrentRootsIterator _roots;
635+
636+
ZMarkOopClosure _cl;
637+
ZMarkCLDClosure _cld_cl;
638+
ZMarkThreadClosure _thread_cl;
639+
ZMarkNMethodClosure _nm_cl;
611640

612641
public:
613642
ZMarkConcurrentRootsTask(ZMark* mark) :
614643
ZTask("ZMarkConcurrentRootsTask"),
615644
_mark(mark),
616645
_sts_joiner(),
617-
_roots(),
618-
_cl() {
646+
_roots(ClassLoaderData::_claim_strong),
647+
_cl(),
648+
_cld_cl(&_cl),
649+
_thread_cl(&_cl),
650+
_nm_cl(&_cl) {
619651
ClassLoaderDataGraph_lock->lock();
620652
}
621653

@@ -624,7 +656,10 @@ class ZMarkConcurrentRootsTask : public ZTask {
624656
}
625657

626658
virtual void work() {
627-
_roots.oops_do(&_cl);
659+
_roots.apply(&_cl,
660+
&_cld_cl,
661+
&_thread_cl,
662+
&_nm_cl);
628663

629664
// Flush and free worker stacks. Needed here since
630665
// the set of workers executing during root scanning

0 commit comments

Comments
 (0)