Skip to content

Commit

Permalink
8255471: ZGC: Rework root iterators and closures
Browse files Browse the repository at this point in the history
Reviewed-by: eosterlund, pliden
  • Loading branch information
stefank committed Nov 2, 2020
1 parent b028074 commit 1769c48
Show file tree
Hide file tree
Showing 17 changed files with 398 additions and 352 deletions.
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/z/zBarrierSetNMethod.cpp
Expand Up @@ -55,7 +55,7 @@ bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {

// Heal oops and disarm
ZNMethodOopClosure cl;
ZNMethod::nmethod_oops_do(nm, &cl);
ZNMethod::nmethod_oops_do_inner(nm, &cl);
disarm(nm);

return true;
Expand Down
122 changes: 86 additions & 36 deletions src/hotspot/share/gc/z/zHeapIterator.cpp
Expand Up @@ -23,12 +23,14 @@

#include "precompiled.hpp"
#include "classfile/classLoaderData.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shared/taskqueue.inline.hpp"
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zGranuleMap.inline.hpp"
#include "gc/z/zHeapIterator.hpp"
#include "gc/z/zLock.inline.hpp"
#include "gc/z/zNMethod.hpp"
#include "gc/z/zOop.inline.hpp"
#include "memory/iterator.inline.hpp"
#include "utilities/bitMap.inline.hpp"
Expand Down Expand Up @@ -92,8 +94,8 @@ class ZHeapIteratorContext {
}
};

template <bool Concurrent, bool Weak>
class ZHeapIteratorRootOopClosure : public ZRootsIteratorClosure {
template <bool Weak>
class ZHeapIteratorRootOopClosure : public OopClosure {
private:
const ZHeapIteratorContext& _context;

Expand All @@ -102,11 +104,7 @@ class ZHeapIteratorRootOopClosure : public ZRootsIteratorClosure {
return NativeAccess<AS_NO_KEEPALIVE | ON_PHANTOM_OOP_REF>::oop_load(p);
}

if (Concurrent) {
return NativeAccess<AS_NO_KEEPALIVE>::oop_load(p);
}

return RawAccess<>::oop_load(p);
return NativeAccess<AS_NO_KEEPALIVE>::oop_load(p);
}

public:
Expand All @@ -121,22 +119,6 @@ class ZHeapIteratorRootOopClosure : public ZRootsIteratorClosure {
virtual void do_oop(narrowOop* p) {
ShouldNotReachHere();
}

virtual void do_thread(Thread* thread) {
CodeBlobToOopClosure code_cl(this, false /* fix_oop_relocations */);
thread->oops_do(this, &code_cl);
}

virtual ZNMethodEntry nmethod_entry() const {
if (ClassUnloading) {
// All encountered nmethods should have been "entered" during stack walking
return ZNMethodEntry::VerifyDisarmed;
} else {
// All nmethods are considered roots and will be visited.
// Make sure that the unvisited gets fixed and disarmed before proceeding.
return ZNMethodEntry::PreBarrier;
}
}
};

template <bool VisitReferents>
Expand Down Expand Up @@ -180,7 +162,7 @@ ZHeapIterator::ZHeapIterator(uint nworkers, bool visit_weaks) :
_bitmaps_lock(),
_queues(nworkers),
_array_queues(nworkers),
_concurrent_roots(),
_concurrent_roots(ClassLoaderData::_claim_other),
_weak_roots(),
_concurrent_weak_roots(),
_terminator(nworkers, &_queues) {
Expand Down Expand Up @@ -255,10 +237,83 @@ bool ZHeapIterator::mark_object(oop obj) {
return bitmap->try_set_bit(index);
}

template <bool Concurrent, bool Weak, typename RootsIterator>
void ZHeapIterator::push_roots(const ZHeapIteratorContext& context, RootsIterator& iter) {
ZHeapIteratorRootOopClosure<Concurrent, Weak> cl(context);
iter.oops_do(&cl);
typedef ClaimingCLDToOopClosure<ClassLoaderData::_claim_other> ZHeapIteratorCLDCLosure;

class ZHeapIteratorNMethodClosure : public NMethodClosure {
private:
OopClosure* const _cl;
BarrierSetNMethod* const _bs_nm;

public:
ZHeapIteratorNMethodClosure(OopClosure* cl) :
_cl(cl),
_bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {}

virtual void do_nmethod(nmethod* nm) {
assert(!ClassUnloading, "Only used if class unloading is turned off");

// ClassUnloading is turned off, all nmethods are considered strong,
// not only those on the call stacks. The heap iteration might happen
// before the concurrent processign of the code cache, make sure that
// all nmethods have been processed before visiting the oops.
_bs_nm->nmethod_entry_barrier(nm);

ZNMethod::nmethod_oops_do(nm, _cl);
}
};

class ZHeapIteratorThreadClosure : public ThreadClosure {
private:
OopClosure* const _cl;

class NMethodVisitor : public CodeBlobToOopClosure {
public:
NMethodVisitor(OopClosure* cl) :
CodeBlobToOopClosure(cl, false /* fix_oop_relocations */) {}

void do_code_blob(CodeBlob* cb) {
assert(!cb->is_nmethod() || !ZNMethod::is_armed(cb->as_nmethod()),
"NMethods on stack should have been fixed and disarmed");

CodeBlobToOopClosure::do_code_blob(cb);
}
};

public:
ZHeapIteratorThreadClosure(OopClosure* cl) : _cl(cl) {}

void do_thread(Thread* thread) {
NMethodVisitor code_cl(_cl);
thread->oops_do(_cl, &code_cl);
}
};

void ZHeapIterator::push_strong_roots(const ZHeapIteratorContext& context) {
ZHeapIteratorRootOopClosure<false /* Weak */> cl(context);
ZHeapIteratorCLDCLosure cld_cl(&cl);
ZHeapIteratorNMethodClosure nm_cl(&cl);
ZHeapIteratorThreadClosure thread_cl(&cl);

_concurrent_roots.apply(&cl,
&cld_cl,
&thread_cl,
&nm_cl);
}

void ZHeapIterator::push_weak_roots(const ZHeapIteratorContext& context) {
ZHeapIteratorRootOopClosure<true /* Weak */> cl(context);
_concurrent_weak_roots.apply(&cl);

AlwaysTrueClosure is_alive;
_weak_roots.apply(&is_alive, &cl);
}

template <bool VisitWeaks>
void ZHeapIterator::push_roots(const ZHeapIteratorContext& context) {
push_strong_roots(context);
if (VisitWeaks) {
push_weak_roots(context);
}
}

template <bool VisitReferents>
Expand Down Expand Up @@ -343,14 +398,9 @@ void ZHeapIterator::drain_and_steal(const ZHeapIteratorContext& context, ObjectC
}

template <bool VisitWeaks>
void ZHeapIterator::object_iterate_inner(const ZHeapIteratorContext& context, ObjectClosure* cl) {
push_roots<true /* Concurrent */, false /* Weak */>(context, _concurrent_roots);
if (VisitWeaks) {
push_roots<false /* Concurrent */, true /* Weak */>(context, _weak_roots);
push_roots<true /* Concurrent */, true /* Weak */>(context, _concurrent_weak_roots);
}

drain_and_steal<VisitWeaks>(context, cl);
void ZHeapIterator::object_iterate_inner(const ZHeapIteratorContext& context, ObjectClosure* object_cl) {
push_roots<VisitWeaks>(context);
drain_and_steal<VisitWeaks>(context, object_cl);
}

void ZHeapIterator::object_iterate(ObjectClosure* cl, uint worker_id) {
Expand Down
27 changes: 15 additions & 12 deletions src/hotspot/share/gc/z/zHeapIterator.hpp
Expand Up @@ -46,23 +46,26 @@ class ZHeapIterator : public ParallelObjectIterator {
friend class ZHeapIteratorContext;

private:
const bool _visit_weaks;
ZStatTimerDisable _timer_disable;
ZHeapIteratorBitMaps _bitmaps;
ZLock _bitmaps_lock;
ZHeapIteratorQueues _queues;
ZHeapIteratorArrayQueues _array_queues;
ZConcurrentRootsIteratorClaimOther _concurrent_roots;
ZWeakRootsIterator _weak_roots;
ZConcurrentWeakRootsIterator _concurrent_weak_roots;
TaskTerminator _terminator;
const bool _visit_weaks;
ZStatTimerDisable _timer_disable;
ZHeapIteratorBitMaps _bitmaps;
ZLock _bitmaps_lock;
ZHeapIteratorQueues _queues;
ZHeapIteratorArrayQueues _array_queues;
ZConcurrentRootsIterator _concurrent_roots;
ZWeakRootsIterator _weak_roots;
ZConcurrentWeakRootsIterator _concurrent_weak_roots;
TaskTerminator _terminator;

ZHeapIteratorBitMap* object_bitmap(oop obj);

bool mark_object(oop obj);

template <bool Concurrent, bool Weak, typename RootsIterator>
void push_roots(const ZHeapIteratorContext& context, RootsIterator& iter);
void push_strong_roots(const ZHeapIteratorContext& context);
void push_weak_roots(const ZHeapIteratorContext& context);

template <bool VisitWeaks>
void push_roots(const ZHeapIteratorContext& context);

template <bool VisitReferents>
void follow_object(const ZHeapIteratorContext& context, oop obj);
Expand Down
81 changes: 58 additions & 23 deletions src/hotspot/share/gc/z/zMark.cpp
Expand Up @@ -23,12 +23,15 @@

#include "precompiled.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "code/nmethod.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zLock.inline.hpp"
#include "gc/z/zMark.inline.hpp"
#include "gc/z/zMarkCache.inline.hpp"
#include "gc/z/zMarkStack.inline.hpp"
#include "gc/z/zMarkTerminate.inline.hpp"
#include "gc/z/zNMethod.hpp"
#include "gc/z/zOopClosures.inline.hpp"
#include "gc/z/zPage.hpp"
#include "gc/z/zPageTable.inline.hpp"
Expand Down Expand Up @@ -572,50 +575,79 @@ void ZMark::work(uint64_t timeout_in_micros) {
stacks->free(&_allocator);
}

class ZMarkConcurrentRootsIteratorClosure : public ZRootsIteratorClosure {
public:
ZMarkConcurrentRootsIteratorClosure() {
ZThreadLocalAllocBuffer::reset_statistics();
class ZMarkOopClosure : public OopClosure {
virtual void do_oop(oop* p) {
ZBarrier::mark_barrier_on_oop_field(p, false /* finalizable */);
}

~ZMarkConcurrentRootsIteratorClosure() {
ZThreadLocalAllocBuffer::publish_statistics();
virtual void do_oop(narrowOop* p) {
ShouldNotReachHere();
}
};

virtual ZNMethodEntry nmethod_entry() const {
// Only apply closure to armed nmethods, and then disarm them.
return ZNMethodEntry::Disarm;
}
class ZMarkThreadClosure : public ThreadClosure {
private:
OopClosure* const _cl;

public:
ZMarkThreadClosure(OopClosure* cl) :
_cl(cl) {
ZThreadLocalAllocBuffer::reset_statistics();
}
~ZMarkThreadClosure() {
ZThreadLocalAllocBuffer::publish_statistics();
}
virtual void do_thread(Thread* thread) {
JavaThread* const jt = thread->as_Java_thread();
StackWatermarkSet::finish_processing(jt, this, StackWatermarkKind::gc);
StackWatermarkSet::finish_processing(jt, _cl, StackWatermarkKind::gc);
ZThreadLocalAllocBuffer::update_stats(jt);
}
};

virtual void do_oop(oop* p) {
ZBarrier::mark_barrier_on_oop_field(p, false /* finalizable */);
}
class ZMarkNMethodClosure : public NMethodClosure {
private:
OopClosure* const _cl;

virtual void do_oop(narrowOop* p) {
ShouldNotReachHere();
public:
ZMarkNMethodClosure(OopClosure* cl) :
_cl(cl) {}

virtual void do_nmethod(nmethod* nm) {
ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
if (!nm->is_alive()) {
return;
}

if (ZNMethod::is_armed(nm)) {
ZNMethod::nmethod_oops_do_inner(nm, _cl);
ZNMethod::disarm(nm);
}
}
};

typedef ClaimingCLDToOopClosure<ClassLoaderData::_claim_strong> ZMarkCLDClosure;

class ZMarkConcurrentRootsTask : public ZTask {
private:
ZMark* const _mark;
SuspendibleThreadSetJoiner _sts_joiner;
ZConcurrentRootsIteratorClaimStrong _roots;
ZMarkConcurrentRootsIteratorClosure _cl;
ZMark* const _mark;
SuspendibleThreadSetJoiner _sts_joiner;
ZConcurrentRootsIterator _roots;

ZMarkOopClosure _cl;
ZMarkCLDClosure _cld_cl;
ZMarkThreadClosure _thread_cl;
ZMarkNMethodClosure _nm_cl;

public:
ZMarkConcurrentRootsTask(ZMark* mark) :
ZTask("ZMarkConcurrentRootsTask"),
_mark(mark),
_sts_joiner(),
_roots(),
_cl() {
_roots(ClassLoaderData::_claim_strong),
_cl(),
_cld_cl(&_cl),
_thread_cl(&_cl),
_nm_cl(&_cl) {
ClassLoaderDataGraph_lock->lock();
}

Expand All @@ -624,7 +656,10 @@ class ZMarkConcurrentRootsTask : public ZTask {
}

virtual void work() {
_roots.oops_do(&_cl);
_roots.apply(&_cl,
&_cld_cl,
&_thread_cl,
&_nm_cl);

// Flush and free worker stacks. Needed here since
// the set of workers executing during root scanning
Expand Down

1 comment on commit 1769c48

@openjdk-notifier
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.