diff --git a/src/hotspot/share/gc/shared/gcVMOperations.cpp b/src/hotspot/share/gc/shared/gcVMOperations.cpp index d4a6360557835..4cf1a4ccbafd0 100644 --- a/src/hotspot/share/gc/shared/gcVMOperations.cpp +++ b/src/hotspot/share/gc/shared/gcVMOperations.cpp @@ -130,9 +130,9 @@ bool VM_GC_Operation::doit_prologue() { void VM_GC_Operation::doit_epilogue() { - // Clean up old interpreter OopMap entries that were replaced - // during the GC thread root traversal. - OopMapCache::cleanup_old_entries(); + // GC thread root traversal likely used OopMapCache a lot, which + // might have created lots of old entries. Trigger the cleanup now. + OopMapCache::trigger_cleanup(); if (Universe::has_reference_pending_list()) { Heap_lock->notify_all(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp index eeeb1dcad195a..af221550c69ab 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp @@ -42,6 +42,9 @@ bool VM_ShenandoahOperation::doit_prologue() { void VM_ShenandoahOperation::doit_epilogue() { assert(!ShenandoahHeap::heap()->has_gc_state_changed(), "GC State was not synchronized to java threads."); + // GC thread root traversal likely used OopMapCache a lot, which + // might have created lots of old entries. Trigger the cleanup now. + OopMapCache::trigger_cleanup(); } bool VM_ShenandoahReferenceOperation::doit_prologue() { @@ -52,7 +55,6 @@ bool VM_ShenandoahReferenceOperation::doit_prologue() { void VM_ShenandoahReferenceOperation::doit_epilogue() { VM_ShenandoahOperation::doit_epilogue(); - OopMapCache::cleanup_old_entries(); if (Universe::has_reference_pending_list()) { Heap_lock->notify_all(); } diff --git a/src/hotspot/share/gc/x/xDriver.cpp b/src/hotspot/share/gc/x/xDriver.cpp index b78e155d3a726..c477f4a135c32 100644 --- a/src/hotspot/share/gc/x/xDriver.cpp +++ b/src/hotspot/share/gc/x/xDriver.cpp @@ -35,6 +35,7 @@ #include "gc/x/xServiceability.hpp" #include "gc/x/xStat.hpp" #include "gc/x/xVerify.hpp" +#include "interpreter/oopMapCache.hpp" #include "logging/log.hpp" #include "memory/universe.hpp" #include "runtime/threads.hpp" @@ -130,6 +131,10 @@ class VM_XOperation : public VM_Operation { virtual void doit_epilogue() { Heap_lock->unlock(); + + // GC thread root traversal likely used OopMapCache a lot, which + // might have created lots of old entries. Trigger the cleanup now. + OopMapCache::trigger_cleanup(); } bool gc_locked() const { diff --git a/src/hotspot/share/gc/z/zGeneration.cpp b/src/hotspot/share/gc/z/zGeneration.cpp index 049edc802cc98..5c3afa9db8cc6 100644 --- a/src/hotspot/share/gc/z/zGeneration.cpp +++ b/src/hotspot/share/gc/z/zGeneration.cpp @@ -52,6 +52,7 @@ #include "gc/z/zUncoloredRoot.inline.hpp" #include "gc/z/zVerify.hpp" #include "gc/z/zWorkers.hpp" +#include "interpreter/oopMapCache.hpp" #include "logging/log.hpp" #include "memory/universe.hpp" #include "prims/jvmtiTagMap.hpp" @@ -452,6 +453,10 @@ class VM_ZOperation : public VM_Operation { virtual void doit_epilogue() { Heap_lock->unlock(); + + // GC thread root traversal likely used OopMapCache a lot, which + // might have created lots of old entries. Trigger the cleanup now. + OopMapCache::trigger_cleanup(); } bool success() const { diff --git a/src/hotspot/share/interpreter/oopMapCache.cpp b/src/hotspot/share/interpreter/oopMapCache.cpp index 3c2f1dd3e35b1..cae0efae9b26d 100644 --- a/src/hotspot/share/interpreter/oopMapCache.cpp +++ b/src/hotspot/share/interpreter/oopMapCache.cpp @@ -35,6 +35,7 @@ #include "runtime/handles.inline.hpp" #include "runtime/safepoint.hpp" #include "runtime/signature.hpp" +#include "utilities/globalCounter.inline.hpp" class OopMapCacheEntry: private InterpreterOopMap { friend class InterpreterOopMap; @@ -494,15 +495,11 @@ void OopMapCache::flush_obsolete_entries() { } } -// Called by GC for thread root scan during a safepoint only. The other interpreted frame oopmaps -// are generated locally and not cached. +// Lookup or compute/cache the entry. void OopMapCache::lookup(const methodHandle& method, int bci, InterpreterOopMap* entry_for) { - assert(SafepointSynchronize::is_at_safepoint(), "called by GC in a safepoint"); int probe = hash_value_for(method, bci); - int i; - OopMapCacheEntry* entry = nullptr; if (log_is_enabled(Debug, interpreter, oopmap)) { static int count = 0; @@ -512,14 +509,18 @@ void OopMapCache::lookup(const methodHandle& method, method()->name_and_sig_as_C_string(), probe); } - // Search hashtable for match - for(i = 0; i < _probe_depth; i++) { - entry = entry_at(probe + i); - if (entry != nullptr && !entry->is_empty() && entry->match(method, bci)) { - entry_for->resource_copy(entry); - assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); - log_debug(interpreter, oopmap)("- found at hash %d", probe + i); - return; + // Search hashtable for match. + // Need a critical section to avoid race against concurrent reclamation. + { + GlobalCounter::CriticalSection cs(Thread::current()); + for (int i = 0; i < _probe_depth; i++) { + OopMapCacheEntry *entry = entry_at(probe + i); + if (entry != nullptr && !entry->is_empty() && entry->match(method, bci)) { + entry_for->resource_copy(entry); + assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); + log_debug(interpreter, oopmap)("- found at hash %d", probe + i); + return; + } } } @@ -541,8 +542,8 @@ void OopMapCache::lookup(const methodHandle& method, } // First search for an empty slot - for(i = 0; i < _probe_depth; i++) { - entry = entry_at(probe + i); + for (int i = 0; i < _probe_depth; i++) { + OopMapCacheEntry* entry = entry_at(probe + i); if (entry == nullptr) { if (put_at(probe + i, tmp, nullptr)) { assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); @@ -557,6 +558,10 @@ void OopMapCache::lookup(const methodHandle& method, // where the first entry in the collision array is replaced with the new one. OopMapCacheEntry* old = entry_at(probe + 0); if (put_at(probe + 0, tmp, old)) { + // Cannot deallocate old entry on the spot: it can still be used by readers + // that got a reference to it before we were able to replace it in the map. + // Instead of synchronizing on GlobalCounter here and incurring heavy thread + // walk, we do this clean up out of band. enqueue_for_cleanup(old); } else { OopMapCacheEntry::deallocate(tmp); @@ -567,13 +572,14 @@ void OopMapCache::lookup(const methodHandle& method, } void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) { - bool success = false; - OopMapCacheEntry* head; - do { - head = _old_entries; + while (true) { + OopMapCacheEntry* head = Atomic::load(&_old_entries); entry->_next = head; - success = Atomic::cmpxchg(&_old_entries, head, entry) == head; - } while (!success); + if (Atomic::cmpxchg(&_old_entries, head, entry) == head) { + // Enqueued successfully. + break; + } + } if (log_is_enabled(Debug, interpreter, oopmap)) { ResourceMark rm; @@ -582,11 +588,28 @@ void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) { } } -// This is called after GC threads are done and nothing is accessing the old_entries -// list, so no synchronization needed. -void OopMapCache::cleanup_old_entries() { - OopMapCacheEntry* entry = _old_entries; - _old_entries = nullptr; +bool OopMapCache::has_cleanup_work() { + return Atomic::load(&_old_entries) != nullptr; +} + +void OopMapCache::trigger_cleanup() { + if (has_cleanup_work()) { + MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); + Service_lock->notify_all(); + } +} + +void OopMapCache::cleanup() { + OopMapCacheEntry* entry = Atomic::xchg(&_old_entries, (OopMapCacheEntry*)nullptr); + if (entry == nullptr) { + // No work. + return; + } + + // About to delete the entries than might still be accessed by other threads + // on lookup path. Need to sync up with them before proceeding. + GlobalCounter::write_synchronize(); + while (entry != nullptr) { if (log_is_enabled(Debug, interpreter, oopmap)) { ResourceMark rm; diff --git a/src/hotspot/share/interpreter/oopMapCache.hpp b/src/hotspot/share/interpreter/oopMapCache.hpp index 7378a987cc69b..3c124631377ef 100644 --- a/src/hotspot/share/interpreter/oopMapCache.hpp +++ b/src/hotspot/share/interpreter/oopMapCache.hpp @@ -179,7 +179,15 @@ class OopMapCache : public CHeapObj { // Compute an oop map without updating the cache or grabbing any locks (for debugging) static void compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry); - static void cleanup_old_entries(); + + // Check if we need to clean up old entries + static bool has_cleanup_work(); + + // Request cleanup if work is needed + static void trigger_cleanup(); + + // Clean up the old entries + static void cleanup(); }; #endif // SHARE_INTERPRETER_OOPMAPCACHE_HPP diff --git a/src/hotspot/share/oops/method.cpp b/src/hotspot/share/oops/method.cpp index 929c8b87d39d5..4be0cf78bdcc8 100644 --- a/src/hotspot/share/oops/method.cpp +++ b/src/hotspot/share/oops/method.cpp @@ -310,16 +310,13 @@ int Method::fast_exception_handler_bci_for(const methodHandle& mh, Klass* ex_kla void Method::mask_for(int bci, InterpreterOopMap* mask) { methodHandle h_this(Thread::current(), this); - // Only GC uses the OopMapCache during thread stack root scanning - // any other uses generate an oopmap but do not save it in the cache. - if (Universe::heap()->is_stw_gc_active()) { - method_holder()->mask_for(h_this, bci, mask); - } else { - OopMapCache::compute_one_oop_map(h_this, bci, mask); - } - return; + mask_for(h_this, bci, mask); } +void Method::mask_for(const methodHandle& this_mh, int bci, InterpreterOopMap* mask) { + assert(this_mh() == this, "Sanity"); + method_holder()->mask_for(this_mh, bci, mask); +} int Method::bci_from(address bcp) const { if (is_native() && bcp == 0) { diff --git a/src/hotspot/share/oops/method.hpp b/src/hotspot/share/oops/method.hpp index 319a8ff155780..905c53a4ea38b 100644 --- a/src/hotspot/share/oops/method.hpp +++ b/src/hotspot/share/oops/method.hpp @@ -446,8 +446,10 @@ class Method : public Metadata { address signature_handler() const { return *(signature_handler_addr()); } void set_signature_handler(address handler); - // Interpreter oopmap support + // Interpreter oopmap support. + // If handle is already available, call with it for better performance. void mask_for(int bci, InterpreterOopMap* mask); + void mask_for(const methodHandle& this_mh, int bci, InterpreterOopMap* mask); // operations on invocation counter void print_invocation_count(outputStream* st); diff --git a/src/hotspot/share/runtime/frame.cpp b/src/hotspot/share/runtime/frame.cpp index 275f095af37dd..8f5d2ad4acbf1 100644 --- a/src/hotspot/share/runtime/frame.cpp +++ b/src/hotspot/share/runtime/frame.cpp @@ -950,7 +950,7 @@ void frame::oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool quer ResourceMark rm(thread); InterpreterOopMap mask; if (query_oop_map_cache) { - m->mask_for(bci, &mask); + m->mask_for(m, bci, &mask); } else { OopMapCache::compute_one_oop_map(m, bci, &mask); } diff --git a/src/hotspot/share/runtime/serviceThread.cpp b/src/hotspot/share/runtime/serviceThread.cpp index a5082fad47937..f02e5062e672e 100644 --- a/src/hotspot/share/runtime/serviceThread.cpp +++ b/src/hotspot/share/runtime/serviceThread.cpp @@ -33,6 +33,7 @@ #include "gc/shared/oopStorage.hpp" #include "gc/shared/oopStorageSet.hpp" #include "memory/universe.hpp" +#include "interpreter/oopMapCache.hpp" #include "oops/oopHandle.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" @@ -95,6 +96,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) { bool oop_handles_to_release = false; bool cldg_cleanup_work = false; bool jvmti_tagmap_work = false; + bool oopmap_cache_work = false; { // Need state transition ThreadBlockInVM so that this thread // will be handled by safepoint correctly when this thread is @@ -124,7 +126,8 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) { (oopstorage_work = OopStorage::has_cleanup_work_and_reset()) | (oop_handles_to_release = JavaThread::has_oop_handles_to_release()) | (cldg_cleanup_work = ClassLoaderDataGraph::should_clean_metaspaces_and_reset()) | - (jvmti_tagmap_work = JvmtiTagMap::has_object_free_events_and_reset()) + (jvmti_tagmap_work = JvmtiTagMap::has_object_free_events_and_reset()) | + (oopmap_cache_work = OopMapCache::has_cleanup_work()) ) == 0) { // Wait until notified that there is some work to do or timer expires. // Some cleanup requests don't notify the ServiceThread so work needs to be done at periodic intervals. @@ -196,6 +199,10 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) { if (jvmti_tagmap_work) { JvmtiTagMap::flush_all_object_free_events(); } + + if (oopmap_cache_work) { + OopMapCache::cleanup(); + } } }