diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp index f3ce9418d35de..58522871338b7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp @@ -67,8 +67,7 @@ inline oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, T* load oop fwd = resolve_forwarded_not_null_mutator(obj); if (obj == fwd) { - assert(_heap->is_evacuation_in_progress(), - "evac should be in progress"); + assert(_heap->is_evacuation_in_progress(), "evac should be in progress"); Thread* const t = Thread::current(); ShenandoahEvacOOMScope scope(t); fwd = _heap->evacuate_object(obj, t); @@ -86,8 +85,8 @@ inline oop ShenandoahBarrierSet::load_reference_barrier(oop obj) { if (!ShenandoahLoadRefBarrier) { return obj; } - if (_heap->has_forwarded_objects() && - _heap->in_collection_set(obj)) { // Subsumes null-check + if (_heap->has_forwarded_objects() && _heap->in_collection_set(obj)) { + // Subsumes null-check assert(obj != nullptr, "cset check must have subsumed null-check"); oop fwd = resolve_forwarded_not_null(obj); if (obj == fwd && _heap->is_evacuation_in_progress()) { @@ -381,7 +380,7 @@ void ShenandoahBarrierSet::arraycopy_work(T* src, size_t count) { // this barrier will be called with ENQUEUE=true and HAS_FWD=false, even though the young generation // may have forwarded objects. In this case, the `arraycopy_work` is first called with HAS_FWD=true and // ENQUEUE=false. - assert(HAS_FWD == _heap->has_forwarded_objects() || (_heap->gc_state() & ShenandoahHeap::OLD_MARKING) != 0, + assert(HAS_FWD == _heap->has_forwarded_objects() || _heap->is_concurrent_old_mark_in_progress(), "Forwarded object status is sane"); // This function cannot be called to handle marking and evacuation at the same time (they operate on // different sides of the copy). @@ -418,7 +417,7 @@ void ShenandoahBarrierSet::arraycopy_barrier(T* src, T* dst, size_t count) { return; } - int gc_state = _heap->gc_state(); + char gc_state = ShenandoahThreadLocalData::gc_state(Thread::current()); if ((gc_state & ShenandoahHeap::EVACUATION) != 0) { arraycopy_evacuation(src, count); } else if ((gc_state & ShenandoahHeap::UPDATEREFS) != 0) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetClone.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetClone.inline.hpp index 3b7bf9864deb0..8b83cc6b32cf9 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetClone.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetClone.inline.hpp @@ -95,8 +95,7 @@ void ShenandoahBarrierSet::clone_barrier(oop obj) { assert(ShenandoahCloneBarrier, "only get here with clone barriers enabled"); shenandoah_assert_correct(nullptr, obj); - int gc_state = _heap->gc_state(); - if ((gc_state & ShenandoahHeap::EVACUATION) != 0) { + if (_heap->is_evacuation_in_progress()) { clone_evacuation(obj); } else { clone_update(obj); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index b1c2e72ef82b6..bd703cdb96f00 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -192,8 +192,14 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) { return false; } + // Evacuation is complete, retire gc labs + heap->concurrent_prepare_for_update_refs(); + // Perform update-refs phase. - vmop_entry_init_updaterefs(); + if (ShenandoahVerify || ShenandoahPacing) { + vmop_entry_init_updaterefs(); + } + entry_updaterefs(); if (check_cancellation_and_abort(ShenandoahDegenPoint::_degenerated_updaterefs)) { return false; @@ -748,10 +754,6 @@ void ShenandoahConcurrentGC::op_final_mark() { heap->verifier()->verify_after_concmark(); } } - - if (VerifyAfterGC) { - Universe::verify(); - } } } } @@ -920,8 +922,8 @@ class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask { } // If we are going to perform concurrent class unloading later on, we need to - // cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we - // can cleanup immediate garbage sooner. + // clean up the weak oops in CLD and determine nmethod's unloading state, so that we + // can clean up immediate garbage sooner. if (ShenandoahHeap::heap()->unload_classes()) { // Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the // CLD's holder or evacuate it. @@ -947,21 +949,10 @@ void ShenandoahConcurrentGC::op_weak_roots() { ShenandoahHeap* const heap = ShenandoahHeap::heap(); assert(heap->is_concurrent_weak_root_in_progress(), "Only during this phase"); // Concurrent weak root processing - { - ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work); - ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work); - ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work); - heap->workers()->run_task(&task); - } - - // Perform handshake to flush out dead oops - { - ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_rendezvous); - heap->rendezvous_threads("Shenandoah Concurrent Weak Roots"); - } - // We can only toggle concurrent_weak_root_in_progress flag - // at a safepoint, so that mutators see a consistent - // value. The flag will be cleared at the next safepoint. + ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work); + ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work); + ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work); + heap->workers()->run_task(&task); } void ShenandoahConcurrentGC::op_class_unloading() { @@ -1058,10 +1049,6 @@ void ShenandoahConcurrentGC::op_evacuate() { void ShenandoahConcurrentGC::op_init_updaterefs() { ShenandoahHeap* const heap = ShenandoahHeap::heap(); - heap->set_evacuation_in_progress(false); - heap->set_concurrent_weak_root_in_progress(false); - heap->prepare_update_heap_references(true /*concurrent*/); - heap->set_update_refs_in_progress(true); if (ShenandoahVerify) { heap->verifier()->verify_before_updaterefs(); } @@ -1178,6 +1165,10 @@ void ShenandoahConcurrentGC::op_final_roots() { ShenandoahGenerationalHeap::heap()->update_region_ages(_generation->complete_marking_context()); } } + + if (VerifyAfterGC) { + Universe::verify(); + } } void ShenandoahConcurrentGC::op_cleanup_complete() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 89a04d23cec92..ec0bb5afbc6fb 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -998,6 +998,8 @@ HeapWord* ShenandoahFreeSet::allocate_aligned_plab(size_t size, ShenandoahAllocR HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) { assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index()); if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) { + // We cannot use this region for allocation when weak roots are in progress because the collector may need + // to reference unmarked oops during concurrent classunloading. return nullptr; } HeapWord* result = nullptr; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index d75a9234a63d2..018540b33b70b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -102,6 +102,7 @@ #include "runtime/orderAccess.hpp" #include "runtime/safepointMechanism.hpp" #include "runtime/stackWatermarkSet.hpp" +#include "runtime/threads.hpp" #include "runtime/vmThread.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/events.hpp" @@ -644,7 +645,6 @@ class ShenandoahInitWorkerGCLABClosure : public ThreadClosure { public: void do_thread(Thread* thread) { assert(thread != nullptr, "Sanity"); - assert(thread->is_Worker_thread(), "Only worker thread expected"); ShenandoahThreadLocalData::initialize_gclab(thread); } }; @@ -663,6 +663,9 @@ void ShenandoahHeap::post_initialize() { // gclab can not be initialized early during VM startup, as it can not determinate its max_size. // Now, we will let WorkerThreads to initialize gclab when new worker is created. _workers->set_initialize_gclab(); + + // Note that the safepoint workers may require gclabs if the threads are used to create a heap dump + // during a concurrent evacuation phase. if (_safepoint_workers != nullptr) { _safepoint_workers->threads_do(&init_gclabs); _safepoint_workers->set_initialize_gclab(); @@ -1190,11 +1193,86 @@ class ShenandoahEvacuationTask : public WorkerTask { } }; +class ShenandoahRetireGCLABClosure : public ThreadClosure { +private: + bool const _resize; +public: + explicit ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {} + void do_thread(Thread* thread) override { + PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); + assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name()); + gclab->retire(); + if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) { + ShenandoahThreadLocalData::set_gclab_size(thread, 0); + } + + if (ShenandoahHeap::heap()->mode()->is_generational()) { + PLAB* plab = ShenandoahThreadLocalData::plab(thread); + assert(plab != nullptr, "PLAB should be initialized for %s", thread->name()); + + // There are two reasons to retire all plabs between old-gen evacuation passes. + // 1. We need to make the plab memory parsable by remembered-set scanning. + // 2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region + ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread); + if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) { + ShenandoahThreadLocalData::set_plab_size(thread, 0); + } + } + } +}; + +class ShenandoahGCStatePropagator : public ThreadClosure { +public: + explicit ShenandoahGCStatePropagator(char gc_state) : _gc_state(gc_state) {} + + void do_thread(Thread* thread) override { + ShenandoahThreadLocalData::set_gc_state(thread, _gc_state); + } +private: + char _gc_state; +}; + +class ShenandoahPrepareForUpdateRefs : public HandshakeClosure { +public: + explicit ShenandoahPrepareForUpdateRefs(char gc_state) : + HandshakeClosure("Shenandoah Prepare for Update Refs"), + _retire(ResizeTLAB), _propagator(gc_state) {} + + void do_thread(Thread* thread) override { + _propagator.do_thread(thread); + if (ShenandoahThreadLocalData::gclab(thread) != nullptr) { + _retire.do_thread(thread); + } + } +private: + ShenandoahRetireGCLABClosure _retire; + ShenandoahGCStatePropagator _propagator; +}; + void ShenandoahHeap::evacuate_collection_set(bool concurrent) { ShenandoahEvacuationTask task(this, _collection_set, concurrent); workers()->run_task(&task); } +void ShenandoahHeap::concurrent_prepare_for_update_refs() { + // It's possible that evacuation succeeded, but we could still be cancelled when we get here. + // A cancellation at this point means the degenerated cycle must resume from update-refs. + set_gc_state_concurrent(EVACUATION, false); + set_gc_state_concurrent(WEAK_ROOTS, false); + set_gc_state_concurrent(UPDATEREFS, true); + + // This will propagate the gc state and retire gclabs and plabs for threads that require it. + ShenandoahPrepareForUpdateRefs prepare_for_update_refs(_gc_state.raw_value()); + + // The handshake won't touch worker threads (or control thread, or VM thread), so do those separately. + Threads::non_java_threads_do(&prepare_for_update_refs); + + // Now retire gclabs and plabs and propagate gc_state for mutator threads + Handshake::execute(&prepare_for_update_refs); + + _update_refs_iterator.reset(); +} + oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) { assert(thread == Thread::current(), "Expected thread parameter to be current thread."); if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) { @@ -1349,34 +1427,6 @@ class ShenandoahCheckCleanGCLABClosure : public ThreadClosure { } }; -class ShenandoahRetireGCLABClosure : public ThreadClosure { -private: - bool const _resize; -public: - ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {} - void do_thread(Thread* thread) { - PLAB* gclab = ShenandoahThreadLocalData::gclab(thread); - assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name()); - gclab->retire(); - if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) { - ShenandoahThreadLocalData::set_gclab_size(thread, 0); - } - - if (ShenandoahHeap::heap()->mode()->is_generational()) { - PLAB* plab = ShenandoahThreadLocalData::plab(thread); - assert(plab != nullptr, "PLAB should be initialized for %s", thread->name()); - - // There are two reasons to retire all plabs between old-gen evacuation passes. - // 1. We need to make the plab memory parsable by remembered-set scanning. - // 2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region - ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread); - if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) { - ShenandoahThreadLocalData::set_plab_size(thread, 0); - } - } - } -}; - void ShenandoahHeap::labs_make_parsable() { assert(UseTLAB, "Only call with UseTLAB"); @@ -1389,6 +1439,10 @@ void ShenandoahHeap::labs_make_parsable() { } workers()->threads_do(&cl); + + if (safepoint_workers() != nullptr) { + safepoint_workers()->threads_do(&cl); + } } void ShenandoahHeap::tlabs_retire(bool resize) { @@ -1424,6 +1478,7 @@ void ShenandoahHeap::gclabs_retire(bool resize) { for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { cl.do_thread(t); } + workers()->threads_do(&cl); if (safepoint_workers() != nullptr) { @@ -1933,23 +1988,25 @@ void ShenandoahHeap::prepare_update_heap_references(bool concurrent) { _update_refs_iterator.reset(); } -void ShenandoahHeap::propagate_gc_state_to_java_threads() { +void ShenandoahHeap::propagate_gc_state_to_all_threads() { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint"); if (_gc_state_changed) { + ShenandoahGCStatePropagator propagator(_gc_state.raw_value()); + Threads::threads_do(&propagator); _gc_state_changed = false; - char state = gc_state(); - for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { - ShenandoahThreadLocalData::set_gc_state(t, state); - } } } -void ShenandoahHeap::set_gc_state(uint mask, bool value) { +void ShenandoahHeap::set_gc_state_at_safepoint(uint mask, bool value) { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint"); _gc_state.set_cond(mask, value); _gc_state_changed = true; } +void ShenandoahHeap::set_gc_state_concurrent(uint mask, bool value) { + _gc_state.set_cond(mask, value); +} + void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) { uint mask; assert(!has_forwarded_objects(), "Young marking is not concurrent with evacuation"); @@ -1961,7 +2018,7 @@ void ShenandoahHeap::set_concurrent_young_mark_in_progress(bool in_progress) { } else { mask = MARKING | YOUNG_MARKING; } - set_gc_state(mask, in_progress); + set_gc_state_at_safepoint(mask, in_progress); manage_satb_barrier(in_progress); } @@ -1977,9 +2034,9 @@ void ShenandoahHeap::set_concurrent_old_mark_in_progress(bool in_progress) { if (!in_progress && is_concurrent_young_mark_in_progress()) { // If young-marking is in progress when we turn off OLD_MARKING, leave MARKING (and YOUNG_MARKING) on assert(_gc_state.is_set(MARKING), "concurrent_young_marking_in_progress implies MARKING"); - set_gc_state(OLD_MARKING, in_progress); + set_gc_state_at_safepoint(OLD_MARKING, in_progress); } else { - set_gc_state(MARKING | OLD_MARKING, in_progress); + set_gc_state_at_safepoint(MARKING | OLD_MARKING, in_progress); } manage_satb_barrier(in_progress); } @@ -2006,7 +2063,7 @@ void ShenandoahHeap::manage_satb_barrier(bool active) { void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint"); - set_gc_state(EVACUATION, in_progress); + set_gc_state_at_safepoint(EVACUATION, in_progress); } void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) { @@ -2018,7 +2075,7 @@ void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) { } void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) { - set_gc_state(WEAK_ROOTS, cond); + set_gc_state_at_safepoint(WEAK_ROOTS, cond); } GCTracer* ShenandoahHeap::tracer() { @@ -2165,7 +2222,7 @@ void ShenandoahHeap::parallel_cleaning(bool full_gc) { } void ShenandoahHeap::set_has_forwarded_objects(bool cond) { - set_gc_state(HAS_FORWARDED, cond); + set_gc_state_at_safepoint(HAS_FORWARDED, cond); } void ShenandoahHeap::set_unload_classes(bool uc) { @@ -2205,7 +2262,7 @@ void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) { } void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) { - set_gc_state(UPDATEREFS, in_progress); + set_gc_state_at_safepoint(UPDATEREFS, in_progress); } void ShenandoahHeap::register_nmethod(nmethod* nm) { @@ -2610,6 +2667,14 @@ char ShenandoahHeap::gc_state() const { return _gc_state.raw_value(); } +bool ShenandoahHeap::is_gc_state(GCState state) const { + // If the global gc state has been changed, but hasn't yet been propagated to all threads, then + // the global gc state is the correct value. Once the gc state has been synchronized with all threads, + // _gc_state_changed will be toggled to false and we need to use the thread local state. + return _gc_state_changed ? _gc_state.is_set(state) : ShenandoahThreadLocalData::is_gc_state(state); +} + + ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) { #ifdef ASSERT assert(_liveness_cache != nullptr, "sanity"); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp index 5beced0544cf6..508f993b3003f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp @@ -363,18 +363,35 @@ class ShenandoahHeap : public CollectedHeap { size_t _gc_no_progress_count; - // This updates the singlular, global gc state. This must happen on a safepoint. - void set_gc_state(uint mask, bool value); + // This updates the singular, global gc state. This call must happen on a safepoint. + void set_gc_state_at_safepoint(uint mask, bool value); + + // This also updates the global gc state, but does not need to be called on a safepoint. + // Critically, this method will _not_ flag that the global gc state has changed and threads + // will continue to use their thread local copy. This is expected to be used in conjunction + // with a handshake operation to propagate the new gc state. + void set_gc_state_concurrent(uint mask, bool value); public: + // This returns the raw value of the singular, global gc state. char gc_state() const; - // This copies the global gc state into a thread local variable for java threads. - // It is primarily intended to support quick access at barriers. - void propagate_gc_state_to_java_threads(); + // Compares the given state against either the global gc state, or the thread local state. + // The global gc state may change on a safepoint and is the correct value to use until + // the global gc state has been propagated to all threads (after which, this method will + // compare against the thread local state). The thread local gc state may also be changed + // by a handshake operation, in which case, this function continues using the updated thread + // local value. + bool is_gc_state(GCState state) const; + + // This copies the global gc state into a thread local variable for all threads. + // The thread local gc state is primarily intended to support quick access at barriers. + // All threads are updated because in some cases the control thread or the vm thread may + // need to execute the load reference barrier. + void propagate_gc_state_to_all_threads(); // This is public to support assertions that the state hasn't been changed off of - // a safepoint and that any changes were propagated to java threads after the safepoint. + // a safepoint and that any changes were propagated to threads after the safepoint. bool has_gc_state_changed() const { return _gc_state_changed; } // Returns true if allocations have occurred in new regions or if regions have been @@ -394,9 +411,7 @@ class ShenandoahHeap : public CollectedHeap { void set_concurrent_strong_root_in_progress(bool cond); void set_concurrent_weak_root_in_progress(bool cond); - inline bool is_stable() const; inline bool is_idle() const; - inline bool is_concurrent_mark_in_progress() const; inline bool is_concurrent_young_mark_in_progress() const; inline bool is_concurrent_old_mark_in_progress() const; @@ -464,6 +479,10 @@ class ShenandoahHeap : public CollectedHeap { void do_class_unloading(); // Reference updating void prepare_update_heap_references(bool concurrent); + + // Retires LABs used for evacuation + void concurrent_prepare_for_update_refs(); + virtual void update_heap_references(bool concurrent); // Final update region states void update_heap_region_states(bool concurrent); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp index 6a38266489e03..f879188431b5e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp @@ -64,10 +64,6 @@ inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() { return _heap->get_region(new_index - 1); } -inline bool ShenandoahHeap::has_forwarded_objects() const { - return _gc_state.is_set(HAS_FORWARDED); -} - inline WorkerThreads* ShenandoahHeap::workers() const { return _workers; } @@ -450,28 +446,36 @@ inline bool ShenandoahHeap::in_collection_set_loc(void* p) const { return collection_set()->is_in_loc(p); } -inline bool ShenandoahHeap::is_stable() const { - return _gc_state.is_clear(); +inline bool ShenandoahHeap::is_idle() const { + return _gc_state_changed ? _gc_state.is_clear() : ShenandoahThreadLocalData::gc_state(Thread::current()) == 0; } -inline bool ShenandoahHeap::is_idle() const { - return _gc_state.is_unset(MARKING | EVACUATION | UPDATEREFS); +inline bool ShenandoahHeap::has_forwarded_objects() const { + return is_gc_state(HAS_FORWARDED); } inline bool ShenandoahHeap::is_concurrent_mark_in_progress() const { - return _gc_state.is_set(MARKING); + return is_gc_state(MARKING); } inline bool ShenandoahHeap::is_concurrent_young_mark_in_progress() const { - return _gc_state.is_set(YOUNG_MARKING); + return is_gc_state(YOUNG_MARKING); } inline bool ShenandoahHeap::is_concurrent_old_mark_in_progress() const { - return _gc_state.is_set(OLD_MARKING); + return is_gc_state(OLD_MARKING); } inline bool ShenandoahHeap::is_evacuation_in_progress() const { - return _gc_state.is_set(EVACUATION); + return is_gc_state(EVACUATION); +} + +inline bool ShenandoahHeap::is_update_refs_in_progress() const { + return is_gc_state(UPDATEREFS); +} + +inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const { + return is_gc_state(WEAK_ROOTS); } inline bool ShenandoahHeap::is_degenerated_gc_in_progress() const { @@ -486,10 +490,6 @@ inline bool ShenandoahHeap::is_full_gc_move_in_progress() const { return _full_gc_move_in_progress.is_set(); } -inline bool ShenandoahHeap::is_update_refs_in_progress() const { - return _gc_state.is_set(UPDATEREFS); -} - inline bool ShenandoahHeap::is_stw_gc_in_progress() const { return is_full_gc_in_progress() || is_degenerated_gc_in_progress(); } @@ -498,10 +498,6 @@ inline bool ShenandoahHeap::is_concurrent_strong_root_in_progress() const { return _concurrent_strong_root_in_progress.is_set(); } -inline bool ShenandoahHeap::is_concurrent_weak_root_in_progress() const { - return _gc_state.is_set(WEAK_ROOTS); -} - template inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) { marked_object_iterate(region, cl, region->top()); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp index 73250cecd6fe5..f04aa7ce8ed1f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp @@ -147,10 +147,10 @@ static int encode_phase(ShenandoahHeap* heap) { if (heap->is_update_refs_in_progress() || heap->is_full_gc_move_in_progress()) { return 3; } - if (heap->is_concurrent_mark_in_progress() || heap->is_full_gc_in_progress()) { + if (heap->is_concurrent_mark_in_progress() || heap->is_concurrent_weak_root_in_progress() || heap->is_full_gc_in_progress()) { return 1; } - assert(heap->is_idle(), "What is it doing?"); + assert(heap->is_idle(), "Unexpected gc_state: %d", heap->gc_state()); return 0; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp b/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp index c226b1ad2545e..9e1777dd82c4f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp @@ -111,12 +111,18 @@ class ShenandoahThreadLocalData { } static char gc_state(Thread* thread) { - assert(thread->is_Java_thread(), "GC state is only synchronized to java threads"); return data(thread)->_gc_state; } + static bool is_gc_state(Thread* thread, ShenandoahHeap::GCState state) { + return (gc_state(thread) & state) != 0; + } + + static bool is_gc_state(ShenandoahHeap::GCState state) { + return is_gc_state(Thread::current(), state); + } + static void initialize_gclab(Thread* thread) { - assert (thread->is_Java_thread() || thread->is_Worker_thread(), "Only Java and GC worker threads are allowed to get GCLABs"); assert(data(thread)->_gclab == nullptr, "Only initialize once"); data(thread)->_gclab = new PLAB(PLAB::min_size()); data(thread)->_gclab_size = 0; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp index da32601eed7f0..67813be36eeb3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp @@ -82,47 +82,47 @@ void VM_ShenandoahInitMark::doit() { ShenandoahGCPauseMark mark(_gc_id, "Init Mark", SvcGCMarker::CONCURRENT); set_active_generation(); _gc->entry_init_mark(); - ShenandoahHeap::heap()->propagate_gc_state_to_java_threads(); + ShenandoahHeap::heap()->propagate_gc_state_to_all_threads(); } void VM_ShenandoahFinalMarkStartEvac::doit() { ShenandoahGCPauseMark mark(_gc_id, "Final Mark", SvcGCMarker::CONCURRENT); set_active_generation(); _gc->entry_final_mark(); - ShenandoahHeap::heap()->propagate_gc_state_to_java_threads(); + ShenandoahHeap::heap()->propagate_gc_state_to_all_threads(); } void VM_ShenandoahFullGC::doit() { ShenandoahGCPauseMark mark(_gc_id, "Full GC", SvcGCMarker::FULL); set_active_generation(); _full_gc->entry_full(_gc_cause); - ShenandoahHeap::heap()->propagate_gc_state_to_java_threads(); + ShenandoahHeap::heap()->propagate_gc_state_to_all_threads(); } void VM_ShenandoahDegeneratedGC::doit() { ShenandoahGCPauseMark mark(_gc_id, "Degenerated GC", SvcGCMarker::CONCURRENT); set_active_generation(); _gc->entry_degenerated(); - ShenandoahHeap::heap()->propagate_gc_state_to_java_threads(); + ShenandoahHeap::heap()->propagate_gc_state_to_all_threads(); } void VM_ShenandoahInitUpdateRefs::doit() { ShenandoahGCPauseMark mark(_gc_id, "Init Update Refs", SvcGCMarker::CONCURRENT); set_active_generation(); _gc->entry_init_updaterefs(); - ShenandoahHeap::heap()->propagate_gc_state_to_java_threads(); + ShenandoahHeap::heap()->propagate_gc_state_to_all_threads(); } void VM_ShenandoahFinalUpdateRefs::doit() { ShenandoahGCPauseMark mark(_gc_id, "Final Update Refs", SvcGCMarker::CONCURRENT); set_active_generation(); _gc->entry_final_updaterefs(); - ShenandoahHeap::heap()->propagate_gc_state_to_java_threads(); + ShenandoahHeap::heap()->propagate_gc_state_to_all_threads(); } void VM_ShenandoahFinalRoots::doit() { ShenandoahGCPauseMark mark(_gc_id, "Final Roots", SvcGCMarker::CONCURRENT); set_active_generation(); _gc->entry_final_roots(); - ShenandoahHeap::heap()->propagate_gc_state_to_java_threads(); + ShenandoahHeap::heap()->propagate_gc_state_to_all_threads(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index bde8638140b52..a85bbba7a1a6c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -796,7 +796,7 @@ void ShenandoahVerifier::verify_at_safepoint(const char* label, guarantee(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "only when nothing else happens"); guarantee(ShenandoahVerify, "only when enabled, and bitmap is initialized in ShenandoahHeap::initialize"); - ShenandoahHeap::heap()->propagate_gc_state_to_java_threads(); + ShenandoahHeap::heap()->propagate_gc_state_to_all_threads(); // Avoid side-effect of changing workers' active thread count, but bypass concurrent/parallel protocol check ShenandoahPushWorkerScope verify_worker_scope(_heap->workers(), _heap->max_workers(), false /*bypass check*/);