From eb95d49065b33f1c1331f734a0775212c340955e Mon Sep 17 00:00:00 2001 From: Ivan Walulya Date: Mon, 8 Sep 2025 13:57:32 +0200 Subject: [PATCH 01/13] init --- src/hotspot/share/gc/g1/g1CollectedHeap.cpp | 14 ++++++++++---- src/hotspot/share/gc/g1/g1CollectedHeap.hpp | 3 --- src/hotspot/share/gc/g1/g1ConcurrentMark.cpp | 2 +- src/hotspot/share/gc/g1/g1Policy.cpp | 3 ++- src/hotspot/share/gc/g1/g1RemSet.cpp | 2 +- src/hotspot/share/gc/g1/g1VMOperations.cpp | 17 +++++++++++++---- src/hotspot/share/gc/shared/collectedHeap.cpp | 10 ++++++++++ src/hotspot/share/gc/shared/collectedHeap.hpp | 5 +++++ src/hotspot/share/gc/shared/gcVMOperations.cpp | 2 +- 9 files changed, 43 insertions(+), 15 deletions(-) diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index de3fc0f5da599..e5fd8d7a4ade2 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -468,6 +468,16 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(uint node_index, size_t word_ log_warning(gc, alloc)("%s: Retried allocation %u times for %zu words", Thread::current()->name(), try_count, word_size); } + + if (is_shutting_down()) { + // If the VM is shutting down, we may have skipped allocation-triggered GCs. + // To avoid returning nullptr (which could cause premature OOME), we stall + // allocation requests here until the VM shutdown is complete. + MonitorLocker ml(BeforeExit_lock); + while (is_shutting_down()) { + ml.wait(); + } + } } ShouldNotReachHere(); @@ -1504,10 +1514,6 @@ jint G1CollectedHeap::initialize() { return JNI_OK; } -bool G1CollectedHeap::concurrent_mark_is_terminating() const { - return _cm_thread->should_terminate(); -} - void G1CollectedHeap::stop() { // Stop all concurrent threads. We do this to make sure these threads // do not continue to execute and access resources (e.g. logging) diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp index 0bb16edaf78fa..adedeaffe0581 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp @@ -900,9 +900,6 @@ class G1CollectedHeap : public CollectedHeap { // specified by the policy object. jint initialize() override; - // Returns whether concurrent mark threads (and the VM) are about to terminate. - bool concurrent_mark_is_terminating() const; - void safepoint_synchronize_begin() override; void safepoint_synchronize_end() override; diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp index 5e42bf7188216..d7de0b223ff05 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp @@ -2041,7 +2041,7 @@ bool G1ConcurrentMark::concurrent_cycle_abort() { // nothing, but this situation should be extremely rare (a full gc after shutdown // has been signalled is already rare), and this work should be negligible compared // to actual full gc work. - if (!cm_thread()->in_progress() && !_g1h->concurrent_mark_is_terminating()) { + if (!cm_thread()->in_progress() && !_g1h->is_shutting_down()) { return false; } diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp index 6141f1056fec0..559161ea88082 100644 --- a/src/hotspot/share/gc/g1/g1Policy.cpp +++ b/src/hotspot/share/gc/g1/g1Policy.cpp @@ -662,6 +662,7 @@ bool G1Policy::should_retain_evac_failed_region(uint index) const { } void G1Policy::record_pause_start_time() { + assert(!_g1h->is_shutting_down(), "Invariant!"); Ticks now = Ticks::now(); _cur_pause_start_sec = now.seconds(); @@ -1246,7 +1247,7 @@ void G1Policy::decide_on_concurrent_start_pause() { // We should not be starting a concurrent start pause if the concurrent mark // thread is terminating. - if (_g1h->concurrent_mark_is_terminating()) { + if (_g1h->is_shutting_down()) { return; } diff --git a/src/hotspot/share/gc/g1/g1RemSet.cpp b/src/hotspot/share/gc/g1/g1RemSet.cpp index 25790a00bd949..3de2f1f536435 100644 --- a/src/hotspot/share/gc/g1/g1RemSet.cpp +++ b/src/hotspot/share/gc/g1/g1RemSet.cpp @@ -1105,7 +1105,7 @@ class G1MergeHeapRootsTask : public WorkerTask { // There might actually have been scheduled multiple collections, but at that point we do // not care that much about performance and just do the work multiple times if needed. return (_g1h->collector_state()->clear_bitmap_in_progress() || - _g1h->concurrent_mark_is_terminating()) && + _g1h->is_shutting_down()) && hr->is_old(); } diff --git a/src/hotspot/share/gc/g1/g1VMOperations.cpp b/src/hotspot/share/gc/g1/g1VMOperations.cpp index 6757172b625a5..13fdc4000969d 100644 --- a/src/hotspot/share/gc/g1/g1VMOperations.cpp +++ b/src/hotspot/share/gc/g1/g1VMOperations.cpp @@ -74,6 +74,7 @@ bool VM_G1TryInitiateConcMark::doit_prologue() { // got scheduled and prevented the scheduling of the concurrent start GC. // In this case we want to retry the GC so that the concurrent start pause is // actually scheduled. + _terminating = G1CollectedHeap::heap()->is_shutting_down(); if (!result) _transient_failure = true; return result; } @@ -83,9 +84,6 @@ void VM_G1TryInitiateConcMark::doit() { GCCauseSetter x(g1h, _gc_cause); - // Record for handling by caller. - _terminating = g1h->concurrent_mark_is_terminating(); - _mark_in_progress = g1h->collector_state()->mark_in_progress(); _cycle_already_in_progress = g1h->concurrent_mark()->cm_thread()->in_progress(); @@ -119,7 +117,6 @@ VM_G1CollectForAllocation::VM_G1CollectForAllocation(size_t word_size, void VM_G1CollectForAllocation::doit() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); - GCCauseSetter x(g1h, _gc_cause); // Try a partial collection of some kind. g1h->do_collection_pause_at_safepoint(); @@ -156,6 +153,18 @@ void VM_G1PauseConcurrent::doit() { bool VM_G1PauseConcurrent::doit_prologue() { Heap_lock->lock(); + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + if (g1h->is_shutting_down()) { + Heap_lock->unlock(); + // JVM shutdown has started. Stall here until marking threads have been aborted. + // This ensures that any further operations will be properly aborted and will + // not interfere with the shutdown process. + MonitorLocker ml(G1CGC_lock, Mutex::_no_safepoint_check_flag); + while (!g1h->concurrent_mark()->has_aborted()) { + ml.wait(); + } + return false; + } return true; } diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp index 71017817d147f..b7e1dcdddb857 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -277,6 +277,7 @@ CollectedHeap::CollectedHeap() : _capacity_at_last_gc(0), _used_at_last_gc(0), _soft_ref_policy(), + _is_shutting_down(false), _is_stw_gc_active(false), _last_whole_heap_examined_time_ns(os::javaTimeNanos()), _total_collections(0), @@ -604,9 +605,18 @@ void CollectedHeap::post_initialize() { initialize_serviceability(); } +bool CollectedHeap::is_shutting_down() const { + return Atomic::load_acquire(&_is_shutting_down); +} + void CollectedHeap::before_exit() { print_tracing_info(); + { + // Acquire the Heap_lock to ensure mutual exclusion with VM_GC_Operations. + MutexLocker ml(Heap_lock); + Atomic::release_store(&_is_shutting_down, true); + } // Stop any on-going concurrent work and prepare for exit. stop(); } diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp index 57bd931673183..35cbcdd0332b0 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp @@ -110,6 +110,9 @@ class CollectedHeap : public CHeapObj { // Then, set it to FillerObject after the FillerObject_klass loading is complete. static Klass* _filler_object_klass; + // Flag to indicate that VM is shutting down. + volatile bool _is_shutting_down; + protected: // Not used by all GCs MemRegion _reserved; @@ -249,6 +252,8 @@ class CollectedHeap : public CHeapObj { // This is the correct place to place such initialization methods. virtual void post_initialize(); + bool is_shutting_down() const; + void before_exit(); // Stop and resume concurrent GC threads interfering with safepoint operations diff --git a/src/hotspot/share/gc/shared/gcVMOperations.cpp b/src/hotspot/share/gc/shared/gcVMOperations.cpp index 1299f64995fa4..083939a0aa4e0 100644 --- a/src/hotspot/share/gc/shared/gcVMOperations.cpp +++ b/src/hotspot/share/gc/shared/gcVMOperations.cpp @@ -85,7 +85,7 @@ bool VM_GC_Operation::skip_operation() const { if (_full && skip) { skip = (_full_gc_count_before != Universe::heap()->total_full_collections()); } - return skip; + return skip || Universe::heap()->is_shutting_down(); } static bool should_use_gclocker() { From 4f512f6e585ce719568f1b99801bfa600b232de3 Mon Sep 17 00:00:00 2001 From: Ivan Walulya Date: Tue, 9 Sep 2025 11:33:47 +0200 Subject: [PATCH 02/13] log_cpu_time --- src/hotspot/share/gc/g1/g1CollectedHeap.cpp | 10 +--- src/hotspot/share/gc/g1/g1VMOperations.cpp | 20 ++----- src/hotspot/share/gc/g1/g1VMOperations.hpp | 2 - .../gc/parallel/parallelScavengeHeap.cpp | 4 ++ src/hotspot/share/gc/serial/serialHeap.cpp | 4 ++ src/hotspot/share/gc/shared/collectedHeap.cpp | 56 ++++++++++++++++++- src/hotspot/share/gc/shared/collectedHeap.hpp | 4 ++ .../share/gc/shared/gcVMOperations.cpp | 6 +- src/hotspot/share/memory/universe.cpp | 41 -------------- src/hotspot/share/runtime/mutexLocker.cpp | 2 + src/hotspot/share/runtime/mutexLocker.hpp | 1 + 11 files changed, 80 insertions(+), 70 deletions(-) diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index e5fd8d7a4ade2..d95a7e2e332c8 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -470,13 +470,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(uint node_index, size_t word_ } if (is_shutting_down()) { - // If the VM is shutting down, we may have skipped allocation-triggered GCs. - // To avoid returning nullptr (which could cause premature OOME), we stall - // allocation requests here until the VM shutdown is complete. - MonitorLocker ml(BeforeExit_lock); - while (is_shutting_down()) { - ml.wait(); - } + stall_for_vm_shutdown(); } } @@ -1817,7 +1811,7 @@ bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause, // If VMOp skipped initiating concurrent marking cycle because // we're terminating, then we're done. - if (op.terminating()) { + if (is_shutting_down()) { LOG_COLLECT_CONCURRENTLY(cause, "skipped: terminating"); return false; } diff --git a/src/hotspot/share/gc/g1/g1VMOperations.cpp b/src/hotspot/share/gc/g1/g1VMOperations.cpp index 13fdc4000969d..34e14f742aee3 100644 --- a/src/hotspot/share/gc/g1/g1VMOperations.cpp +++ b/src/hotspot/share/gc/g1/g1VMOperations.cpp @@ -64,7 +64,6 @@ VM_G1TryInitiateConcMark::VM_G1TryInitiateConcMark(uint gc_count_before, _mark_in_progress(false), _cycle_already_in_progress(false), _whitebox_attached(false), - _terminating(false), _gc_succeeded(false) {} @@ -74,7 +73,6 @@ bool VM_G1TryInitiateConcMark::doit_prologue() { // got scheduled and prevented the scheduling of the concurrent start GC. // In this case we want to retry the GC so that the concurrent start pause is // actually scheduled. - _terminating = G1CollectedHeap::heap()->is_shutting_down(); if (!result) _transient_failure = true; return result; } @@ -87,13 +85,7 @@ void VM_G1TryInitiateConcMark::doit() { _mark_in_progress = g1h->collector_state()->mark_in_progress(); _cycle_already_in_progress = g1h->concurrent_mark()->cm_thread()->in_progress(); - if (_terminating && GCCause::is_user_requested_gc(_gc_cause)) { - // When terminating, the request to initiate a concurrent cycle will be - // ignored by do_collection_pause_at_safepoint; instead it will just do - // a young-only or mixed GC (depending on phase). For a user request - // there's no point in even doing that much, so done. For some non-user - // requests the alternative GC might still be needed. - } else if (!g1h->policy()->force_concurrent_start_if_outside_cycle(_gc_cause)) { + if (!g1h->policy()->force_concurrent_start_if_outside_cycle(_gc_cause)) { // Failure to force the next GC pause to be a concurrent start indicates // there is already a concurrent marking cycle in progress. Flags to indicate // that were already set, so return immediately. @@ -156,13 +148,9 @@ bool VM_G1PauseConcurrent::doit_prologue() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); if (g1h->is_shutting_down()) { Heap_lock->unlock(); - // JVM shutdown has started. Stall here until marking threads have been aborted. - // This ensures that any further operations will be properly aborted and will - // not interfere with the shutdown process. - MonitorLocker ml(G1CGC_lock, Mutex::_no_safepoint_check_flag); - while (!g1h->concurrent_mark()->has_aborted()) { - ml.wait(); - } + // JVM shutdown has started. This ensures that any further operations will be properly aborted + // and will not interfere with the shutdown process. + g1h->concurrent_mark()->abort_marking_threads(); return false; } return true; diff --git a/src/hotspot/share/gc/g1/g1VMOperations.hpp b/src/hotspot/share/gc/g1/g1VMOperations.hpp index 05b27c4508c98..2adeaed04d90e 100644 --- a/src/hotspot/share/gc/g1/g1VMOperations.hpp +++ b/src/hotspot/share/gc/g1/g1VMOperations.hpp @@ -48,7 +48,6 @@ class VM_G1TryInitiateConcMark : public VM_GC_Collect_Operation { bool _mark_in_progress; bool _cycle_already_in_progress; bool _whitebox_attached; - bool _terminating; // The concurrent start pause may be cancelled for some reasons. Keep track of // this. bool _gc_succeeded; @@ -63,7 +62,6 @@ class VM_G1TryInitiateConcMark : public VM_GC_Collect_Operation { bool mark_in_progress() const { return _mark_in_progress; } bool cycle_already_in_progress() const { return _cycle_already_in_progress; } bool whitebox_attached() const { return _whitebox_attached; } - bool terminating() const { return _terminating; } bool gc_succeeded() const { return _gc_succeeded && VM_GC_Operation::gc_succeeded(); } }; diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index 9b40475288d41..315a46981aa36 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -318,6 +318,10 @@ HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size, bool is_tlab) { assert(is_in_or_null(op.result()), "result not in heap"); return op.result(); } + + if (is_shutting_down()) { + stall_for_vm_shutdown(); + } } // Was the gc-overhead reached inside the safepoint? If so, this mutator diff --git a/src/hotspot/share/gc/serial/serialHeap.cpp b/src/hotspot/share/gc/serial/serialHeap.cpp index 662a6be695b2d..6fec8767b9fb5 100644 --- a/src/hotspot/share/gc/serial/serialHeap.cpp +++ b/src/hotspot/share/gc/serial/serialHeap.cpp @@ -323,6 +323,10 @@ HeapWord* SerialHeap::mem_allocate_work(size_t size, bool is_tlab) { break; } + if (is_shutting_down()) { + stall_for_vm_shutdown(); + } + // Give a warning if we seem to be looping forever. if ((QueuedAllocationWarningCount > 0) && (try_count % QueuedAllocationWarningCount == 0)) { diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp index b7e1dcdddb857..014ea43053b6d 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -609,12 +609,66 @@ bool CollectedHeap::is_shutting_down() const { return Atomic::load_acquire(&_is_shutting_down); } +void CollectedHeap::stall_for_vm_shutdown() { + assert(is_shutting_down(), "Precondition"); + // If the VM is shutting down, we may have skipped VM_CollectForAllocation. + // To avoid returning nullptr (which could cause premature OOME), we stall + // allocation requests here until the VM shutdown is complete. + log_info(gc) ("Stalling until shutdown"); + MonitorLocker ml(VMExit_lock); + while (true) { + ml.wait(); + } +} + +static void log_cpu_time() { + LogTarget(Info, cpu) cpuLog; + if (!cpuLog.is_enabled()) { + return; + } + + const double process_cpu_time = os::elapsed_process_cpu_time(); + if (process_cpu_time == 0 || process_cpu_time == -1) { + // 0 can happen e.g. for short running processes with + // low CPU utilization + return; + } + + const double gc_threads_cpu_time = (double) CPUTimeUsage::GC::gc_threads() / NANOSECS_PER_SEC; + const double gc_vm_thread_cpu_time = (double) CPUTimeUsage::GC::vm_thread() / NANOSECS_PER_SEC; + const double gc_string_dedup_cpu_time = (double) CPUTimeUsage::GC::stringdedup() / NANOSECS_PER_SEC; + const double gc_cpu_time = (double) gc_threads_cpu_time + gc_vm_thread_cpu_time + gc_string_dedup_cpu_time; + + const double elasped_time = os::elapsedTime(); + const bool has_error = CPUTimeUsage::Error::has_error(); + + if (gc_cpu_time < process_cpu_time) { + cpuLog.print("=== CPU time Statistics ============================================================="); + if (has_error) { + cpuLog.print("WARNING: CPU time sampling reported errors, numbers may be unreliable"); + } + cpuLog.print(" CPUs"); + cpuLog.print(" s %% utilized"); + cpuLog.print(" Process"); + cpuLog.print(" Total %30.4f %6.2f %8.1f", process_cpu_time, 100.0, process_cpu_time / elasped_time); + cpuLog.print(" Garbage Collection %30.4f %6.2f %8.1f", gc_cpu_time, percent_of(gc_cpu_time, process_cpu_time), gc_cpu_time / elasped_time); + cpuLog.print(" GC Threads %30.4f %6.2f %8.1f", gc_threads_cpu_time, percent_of(gc_threads_cpu_time, process_cpu_time), gc_threads_cpu_time / elasped_time); + cpuLog.print(" VM Thread %30.4f %6.2f %8.1f", gc_vm_thread_cpu_time, percent_of(gc_vm_thread_cpu_time, process_cpu_time), gc_vm_thread_cpu_time / elasped_time); + + if (UseStringDeduplication) { + cpuLog.print(" String Deduplication %30.4f %6.2f %8.1f", gc_string_dedup_cpu_time, percent_of(gc_string_dedup_cpu_time, process_cpu_time), gc_string_dedup_cpu_time / elasped_time); + } + cpuLog.print("====================================================================================="); + } +} + void CollectedHeap::before_exit() { - print_tracing_info(); { // Acquire the Heap_lock to ensure mutual exclusion with VM_GC_Operations. MutexLocker ml(Heap_lock); + log_cpu_time(); + print_tracing_info(); Atomic::release_store(&_is_shutting_down, true); } // Stop any on-going concurrent work and prepare for exit. diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp index 35cbcdd0332b0..c639b42be2f98 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp @@ -254,8 +254,12 @@ class CollectedHeap : public CHeapObj { bool is_shutting_down() const; + // Stall allocation requests until the VM shutdown is complete. + void stall_for_vm_shutdown(); + void before_exit(); + // Stop and resume concurrent GC threads interfering with safepoint operations virtual void safepoint_synchronize_begin() {} virtual void safepoint_synchronize_end() {} diff --git a/src/hotspot/share/gc/shared/gcVMOperations.cpp b/src/hotspot/share/gc/shared/gcVMOperations.cpp index 083939a0aa4e0..8ecce27840506 100644 --- a/src/hotspot/share/gc/shared/gcVMOperations.cpp +++ b/src/hotspot/share/gc/shared/gcVMOperations.cpp @@ -85,7 +85,7 @@ bool VM_GC_Operation::skip_operation() const { if (_full && skip) { skip = (_full_gc_count_before != Universe::heap()->total_full_collections()); } - return skip || Universe::heap()->is_shutting_down(); + return skip; } static bool should_use_gclocker() { @@ -112,7 +112,9 @@ bool VM_GC_Operation::doit_prologue() { VM_Heap_Sync_Operation::doit_prologue(); // Check invocations - if (skip_operation()) { + if (skip_operation() || Universe::heap()->is_shutting_down()) { + if (Universe::heap()->is_shutting_down()) + log_info(gc) ("Skipping GC after shut down has started"); // skip collection Heap_lock->unlock(); if (should_use_gclocker()) { diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp index f34a771138d42..36c31872cf5d1 100644 --- a/src/hotspot/share/memory/universe.cpp +++ b/src/hotspot/share/memory/universe.cpp @@ -1302,49 +1302,8 @@ void Universe::verify(VerifyOption option, const char* prefix) { } } -static void log_cpu_time() { - LogTarget(Info, cpu) cpuLog; - if (!cpuLog.is_enabled()) { - return; - } - - const double process_cpu_time = os::elapsed_process_cpu_time(); - if (process_cpu_time == 0 || process_cpu_time == -1) { - // 0 can happen e.g. for short running processes with - // low CPU utilization - return; - } - - const double gc_threads_cpu_time = (double) CPUTimeUsage::GC::gc_threads() / NANOSECS_PER_SEC; - const double gc_vm_thread_cpu_time = (double) CPUTimeUsage::GC::vm_thread() / NANOSECS_PER_SEC; - const double gc_string_dedup_cpu_time = (double) CPUTimeUsage::GC::stringdedup() / NANOSECS_PER_SEC; - const double gc_cpu_time = (double) gc_threads_cpu_time + gc_vm_thread_cpu_time + gc_string_dedup_cpu_time; - - const double elasped_time = os::elapsedTime(); - const bool has_error = CPUTimeUsage::Error::has_error(); - - if (gc_cpu_time < process_cpu_time) { - cpuLog.print("=== CPU time Statistics ============================================================="); - if (has_error) { - cpuLog.print("WARNING: CPU time sampling reported errors, numbers may be unreliable"); - } - cpuLog.print(" CPUs"); - cpuLog.print(" s %% utilized"); - cpuLog.print(" Process"); - cpuLog.print(" Total %30.4f %6.2f %8.1f", process_cpu_time, 100.0, process_cpu_time / elasped_time); - cpuLog.print(" Garbage Collection %30.4f %6.2f %8.1f", gc_cpu_time, percent_of(gc_cpu_time, process_cpu_time), gc_cpu_time / elasped_time); - cpuLog.print(" GC Threads %30.4f %6.2f %8.1f", gc_threads_cpu_time, percent_of(gc_threads_cpu_time, process_cpu_time), gc_threads_cpu_time / elasped_time); - cpuLog.print(" VM Thread %30.4f %6.2f %8.1f", gc_vm_thread_cpu_time, percent_of(gc_vm_thread_cpu_time, process_cpu_time), gc_vm_thread_cpu_time / elasped_time); - - if (UseStringDeduplication) { - cpuLog.print(" String Deduplication %30.4f %6.2f %8.1f", gc_string_dedup_cpu_time, percent_of(gc_string_dedup_cpu_time, process_cpu_time), gc_string_dedup_cpu_time / elasped_time); - } - cpuLog.print("====================================================================================="); - } -} void Universe::before_exit() { - log_cpu_time(); heap()->before_exit(); // Print GC/heap related information. diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp index 3f8915973e27d..b70fc679573d3 100644 --- a/src/hotspot/share/runtime/mutexLocker.cpp +++ b/src/hotspot/share/runtime/mutexLocker.cpp @@ -83,6 +83,7 @@ Mutex* DirectivesStack_lock = nullptr; Monitor* Terminator_lock = nullptr; Monitor* InitCompleted_lock = nullptr; Monitor* BeforeExit_lock = nullptr; +Monitor* VMExit_lock = nullptr; Monitor* Notify_lock = nullptr; Mutex* ExceptionCache_lock = nullptr; Mutex* TrainingData_lock = nullptr; @@ -250,6 +251,7 @@ void mutex_init() { MUTEX_DEFN(FullGCALot_lock , PaddedMutex , safepoint); // a lock to make FullGCALot MT safe #endif MUTEX_DEFN(BeforeExit_lock , PaddedMonitor, safepoint); + MUTEX_DEFN(VMExit_lock , PaddedMonitor, safepoint); MUTEX_DEFN(NonJavaThreadsList_lock , PaddedMutex , nosafepoint-1); MUTEX_DEFN(NonJavaThreadsListSync_lock , PaddedMutex , nosafepoint); diff --git a/src/hotspot/share/runtime/mutexLocker.hpp b/src/hotspot/share/runtime/mutexLocker.hpp index f888c789eb738..d7670dab0baf6 100644 --- a/src/hotspot/share/runtime/mutexLocker.hpp +++ b/src/hotspot/share/runtime/mutexLocker.hpp @@ -85,6 +85,7 @@ extern Mutex* DirectivesStack_lock; // a lock held when mutating th extern Monitor* Terminator_lock; // a lock used to guard termination of the vm extern Monitor* InitCompleted_lock; // a lock used to signal threads waiting on init completed extern Monitor* BeforeExit_lock; // a lock used to guard cleanups and shutdown hooks +extern Monitor* VMExit_lock; // a lock used to stall threads until VM shuts down extern Monitor* Notify_lock; // a lock used to synchronize the start-up of the vm extern Mutex* ExceptionCache_lock; // a lock used to synchronize exception cache updates From 89cf3cdb90cce60e87884e1bd0948451c1df307b Mon Sep 17 00:00:00 2001 From: Ivan Walulya Date: Tue, 9 Sep 2025 11:37:34 +0200 Subject: [PATCH 03/13] remove debug logs --- src/hotspot/share/gc/shared/collectedHeap.cpp | 1 - src/hotspot/share/gc/shared/gcVMOperations.cpp | 1 - 2 files changed, 2 deletions(-) diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp index 014ea43053b6d..5db0b4c01ab6d 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -614,7 +614,6 @@ void CollectedHeap::stall_for_vm_shutdown() { // If the VM is shutting down, we may have skipped VM_CollectForAllocation. // To avoid returning nullptr (which could cause premature OOME), we stall // allocation requests here until the VM shutdown is complete. - log_info(gc) ("Stalling until shutdown"); MonitorLocker ml(VMExit_lock); while (true) { ml.wait(); diff --git a/src/hotspot/share/gc/shared/gcVMOperations.cpp b/src/hotspot/share/gc/shared/gcVMOperations.cpp index 8ecce27840506..2355db78c41c0 100644 --- a/src/hotspot/share/gc/shared/gcVMOperations.cpp +++ b/src/hotspot/share/gc/shared/gcVMOperations.cpp @@ -114,7 +114,6 @@ bool VM_GC_Operation::doit_prologue() { // Check invocations if (skip_operation() || Universe::heap()->is_shutting_down()) { if (Universe::heap()->is_shutting_down()) - log_info(gc) ("Skipping GC after shut down has started"); // skip collection Heap_lock->unlock(); if (should_use_gclocker()) { From 1c20e32b2a433ebdbf99f1f6ca56e68b61143d3c Mon Sep 17 00:00:00 2001 From: Ivan Walulya Date: Tue, 9 Sep 2025 11:58:47 +0200 Subject: [PATCH 04/13] remove debug logs --- src/hotspot/share/gc/shared/gcVMOperations.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/hotspot/share/gc/shared/gcVMOperations.cpp b/src/hotspot/share/gc/shared/gcVMOperations.cpp index 2355db78c41c0..a0e4004bf173c 100644 --- a/src/hotspot/share/gc/shared/gcVMOperations.cpp +++ b/src/hotspot/share/gc/shared/gcVMOperations.cpp @@ -113,7 +113,6 @@ bool VM_GC_Operation::doit_prologue() { // Check invocations if (skip_operation() || Universe::heap()->is_shutting_down()) { - if (Universe::heap()->is_shutting_down()) // skip collection Heap_lock->unlock(); if (should_use_gclocker()) { From b3e1000a7891911f0b10f967d3c6ba942611d79a Mon Sep 17 00:00:00 2001 From: Ivan Walulya Date: Wed, 10 Sep 2025 10:49:19 +0200 Subject: [PATCH 05/13] space --- src/hotspot/share/gc/shared/collectedHeap.hpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp index c639b42be2f98..38a27a406c351 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp @@ -259,7 +259,6 @@ class CollectedHeap : public CHeapObj { void before_exit(); - // Stop and resume concurrent GC threads interfering with safepoint operations virtual void safepoint_synchronize_begin() {} virtual void safepoint_synchronize_end() {} From d2f45cc4f8c4685bfce106f9cfb0ca94bb4407cd Mon Sep 17 00:00:00 2001 From: Ivan Walulya Date: Fri, 12 Sep 2025 11:20:48 +0200 Subject: [PATCH 06/13] timed wait --- src/hotspot/share/gc/shared/collectedHeap.cpp | 14 ++++++++++---- src/hotspot/share/runtime/java.cpp | 5 ++--- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp index 5db0b4c01ab6d..625db25db34f2 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -613,11 +613,17 @@ void CollectedHeap::stall_for_vm_shutdown() { assert(is_shutting_down(), "Precondition"); // If the VM is shutting down, we may have skipped VM_CollectForAllocation. // To avoid returning nullptr (which could cause premature OOME), we stall - // allocation requests here until the VM shutdown is complete. + // allocation requests here allow the VM shutdown is complete. + // + // We use a timed wait (2 seconds) instead of an indefinite wait to avoid blocking + // VM shutdown if it happens to trigger a GC. + // The 2-second timeout is: + // - long enough to keep daemon threads stalled, while the shutdown + // sequence completes in the common case. + // - short enough to avoid excessive stall time if the shutdown itself + // triggers a GC. MonitorLocker ml(VMExit_lock); - while (true) { - ml.wait(); - } + ml.wait(2 * 1000); } static void log_cpu_time() { diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp index 997dd1f802a00..a39293c202973 100644 --- a/src/hotspot/share/runtime/java.cpp +++ b/src/hotspot/share/runtime/java.cpp @@ -476,9 +476,6 @@ void before_exit(JavaThread* thread, bool halt) { NativeHeapTrimmer::cleanup(); - // Run before exit and then stop concurrent GC threads - Universe::before_exit(); - if (PrintBytecodeHistogram) { BytecodeHistogram::print(); } @@ -511,6 +508,8 @@ void before_exit(JavaThread* thread, bool halt) { } #endif + // Run before exit and then stop concurrent GC threads + Universe::before_exit(); print_statistics(); { MutexLocker ml(BeforeExit_lock); From 187a463bb697ef79dfb2e5e6a65a8f73ddee747b Mon Sep 17 00:00:00 2001 From: Ivan Walulya Date: Fri, 12 Sep 2025 12:30:26 +0200 Subject: [PATCH 07/13] return on timeout --- src/hotspot/share/gc/g1/g1CollectedHeap.cpp | 6 ++++++ src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp | 1 + src/hotspot/share/gc/serial/serialHeap.cpp | 1 + src/hotspot/share/gc/shared/collectedHeap.cpp | 4 ++-- 4 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index d2aba05aca838..06dab5fe0e51f 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -471,6 +471,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(uint node_index, size_t word_ if (is_shutting_down()) { stall_for_vm_shutdown(); + return nullptr; } } @@ -707,6 +708,11 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) { log_warning(gc, alloc)("%s: Retried allocation %u times for %zu words", Thread::current()->name(), try_count, word_size); } + + if (is_shutting_down()) { + stall_for_vm_shutdown(); + return nullptr; + } } ShouldNotReachHere(); diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index f37bc8aae2f44..df51d008aaecd 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -321,6 +321,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size, bool is_tlab) { if (is_shutting_down()) { stall_for_vm_shutdown(); + return nullptr; } } diff --git a/src/hotspot/share/gc/serial/serialHeap.cpp b/src/hotspot/share/gc/serial/serialHeap.cpp index 6fec8767b9fb5..761fafbfbbe3c 100644 --- a/src/hotspot/share/gc/serial/serialHeap.cpp +++ b/src/hotspot/share/gc/serial/serialHeap.cpp @@ -325,6 +325,7 @@ HeapWord* SerialHeap::mem_allocate_work(size_t size, bool is_tlab) { if (is_shutting_down()) { stall_for_vm_shutdown(); + return nullptr; } // Give a warning if we seem to be looping forever. diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp index 625db25db34f2..3992e98978be1 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -606,7 +606,7 @@ void CollectedHeap::post_initialize() { } bool CollectedHeap::is_shutting_down() const { - return Atomic::load_acquire(&_is_shutting_down); + return AtomicAccess::load_acquire(&_is_shutting_down); } void CollectedHeap::stall_for_vm_shutdown() { @@ -674,7 +674,7 @@ void CollectedHeap::before_exit() { MutexLocker ml(Heap_lock); log_cpu_time(); print_tracing_info(); - Atomic::release_store(&_is_shutting_down, true); + AtomicAccess::release_store(&_is_shutting_down, true); } // Stop any on-going concurrent work and prepare for exit. stop(); From 0e45912bd6e1e53cf7bfc7341511fb29d2095293 Mon Sep 17 00:00:00 2001 From: Ivan Walulya Date: Mon, 15 Sep 2025 19:08:16 +0200 Subject: [PATCH 08/13] Thomas Review --- src/hotspot/share/gc/shared/collectedHeap.cpp | 11 ++++------- src/hotspot/share/gc/shared/collectedHeap.hpp | 3 +++ src/hotspot/share/memory/universe.hpp | 1 + src/hotspot/share/runtime/java.cpp | 1 - 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp index 3992e98978be1..2dad2702e0848 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -611,19 +611,16 @@ bool CollectedHeap::is_shutting_down() const { void CollectedHeap::stall_for_vm_shutdown() { assert(is_shutting_down(), "Precondition"); - // If the VM is shutting down, we may have skipped VM_CollectForAllocation. - // To avoid returning nullptr (which could cause premature OOME), we stall - // allocation requests here allow the VM shutdown is complete. - // - // We use a timed wait (2 seconds) instead of an indefinite wait to avoid blocking - // VM shutdown if it happens to trigger a GC. + // We use a timed wait (2 seconds) instead of an indefinite wait to avoid deadlock + // if the VM shutdown triggers a GC. // The 2-second timeout is: // - long enough to keep daemon threads stalled, while the shutdown // sequence completes in the common case. // - short enough to avoid excessive stall time if the shutdown itself // triggers a GC. MonitorLocker ml(VMExit_lock); - ml.wait(2 * 1000); + ml.wait(2 * MILLIUNITS); + log_warning(gc, alloc)("%s: Stall for VM-Shutdown timed out; allocation may fail with OOME", Thread::current()->name()); } static void log_cpu_time() { diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp index 38a27a406c351..3799910998fc0 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp @@ -252,6 +252,9 @@ class CollectedHeap : public CHeapObj { // This is the correct place to place such initialization methods. virtual void post_initialize(); + // If the VM is shutting down, we may have skipped VM_CollectForAllocation. + // In this case, stall the allocation request briefly in the hope that + // the VM shutdown completes before the allocation request returns. bool is_shutting_down() const; // Stall allocation requests until the VM shutdown is complete. diff --git a/src/hotspot/share/memory/universe.hpp b/src/hotspot/share/memory/universe.hpp index 90874d2392db8..33bf3b35564f9 100644 --- a/src/hotspot/share/memory/universe.hpp +++ b/src/hotspot/share/memory/universe.hpp @@ -305,6 +305,7 @@ class Universe: AllStatic { // The particular choice of collected heap. static CollectedHeap* heap() { return _collectedHeap; } + // Run heap before exit (stop concurrent GC threads, log GC/heap exit information) static void before_exit(); DEBUG_ONLY(static bool is_stw_gc_active();) diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp index a39293c202973..8fda092dc7147 100644 --- a/src/hotspot/share/runtime/java.cpp +++ b/src/hotspot/share/runtime/java.cpp @@ -508,7 +508,6 @@ void before_exit(JavaThread* thread, bool halt) { } #endif - // Run before exit and then stop concurrent GC threads Universe::before_exit(); print_statistics(); From 2ebff06cad6a6632b3665d2827b126fa6f97b27d Mon Sep 17 00:00:00 2001 From: Ivan Walulya Date: Tue, 16 Sep 2025 11:19:51 +0200 Subject: [PATCH 09/13] Revert --- src/hotspot/share/memory/universe.hpp | 1 - src/hotspot/share/runtime/java.cpp | 4 +++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/hotspot/share/memory/universe.hpp b/src/hotspot/share/memory/universe.hpp index 33bf3b35564f9..90874d2392db8 100644 --- a/src/hotspot/share/memory/universe.hpp +++ b/src/hotspot/share/memory/universe.hpp @@ -305,7 +305,6 @@ class Universe: AllStatic { // The particular choice of collected heap. static CollectedHeap* heap() { return _collectedHeap; } - // Run heap before exit (stop concurrent GC threads, log GC/heap exit information) static void before_exit(); DEBUG_ONLY(static bool is_stw_gc_active();) diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp index 8fda092dc7147..997dd1f802a00 100644 --- a/src/hotspot/share/runtime/java.cpp +++ b/src/hotspot/share/runtime/java.cpp @@ -476,6 +476,9 @@ void before_exit(JavaThread* thread, bool halt) { NativeHeapTrimmer::cleanup(); + // Run before exit and then stop concurrent GC threads + Universe::before_exit(); + if (PrintBytecodeHistogram) { BytecodeHistogram::print(); } @@ -508,7 +511,6 @@ void before_exit(JavaThread* thread, bool halt) { } #endif - Universe::before_exit(); print_statistics(); { MutexLocker ml(BeforeExit_lock); From 1a53c207eb8a8165dd6d785af9a418b848561f34 Mon Sep 17 00:00:00 2001 From: Ivan Walulya Date: Thu, 18 Sep 2025 15:12:55 +0200 Subject: [PATCH 10/13] make universal --- src/hotspot/share/gc/shared/collectedHeap.cpp | 58 +++---------------- src/hotspot/share/gc/shared/collectedHeap.hpp | 8 +-- .../share/gc/shared/gcVMOperations.cpp | 2 +- src/hotspot/share/memory/universe.cpp | 47 +++++++++++++++ src/hotspot/share/memory/universe.hpp | 5 ++ src/hotspot/share/services/cpuTimeUsage.cpp | 2 + 6 files changed, 65 insertions(+), 57 deletions(-) diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp index 77003d94c8352..6b1cd6ee01d5c 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -276,7 +276,6 @@ bool CollectedHeap::is_oop(oop object) const { CollectedHeap::CollectedHeap() : _capacity_at_last_gc(0), _used_at_last_gc(0), - _is_shutting_down(false), _is_stw_gc_active(false), _last_whole_heap_examined_time_ns(os::javaTimeNanos()), _total_collections(0), @@ -386,6 +385,12 @@ MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loa if (op.gc_succeeded()) { return op.result(); } + + if (is_shutting_down()) { + stall_for_vm_shutdown(); + return nullptr; + } + loop_count++; if ((QueuedAllocationWarningCount > 0) && (loop_count % QueuedAllocationWarningCount == 0)) { @@ -605,7 +610,7 @@ void CollectedHeap::post_initialize() { } bool CollectedHeap::is_shutting_down() const { - return AtomicAccess::load_acquire(&_is_shutting_down); + return Universe::is_shutting_down(); } void CollectedHeap::stall_for_vm_shutdown() { @@ -622,56 +627,9 @@ void CollectedHeap::stall_for_vm_shutdown() { log_warning(gc, alloc)("%s: Stall for VM-Shutdown timed out; allocation may fail with OOME", Thread::current()->name()); } -static void log_cpu_time() { - LogTarget(Info, cpu) cpuLog; - if (!cpuLog.is_enabled()) { - return; - } - - const double process_cpu_time = os::elapsed_process_cpu_time(); - if (process_cpu_time == 0 || process_cpu_time == -1) { - // 0 can happen e.g. for short running processes with - // low CPU utilization - return; - } - - const double gc_threads_cpu_time = (double) CPUTimeUsage::GC::gc_threads() / NANOSECS_PER_SEC; - const double gc_vm_thread_cpu_time = (double) CPUTimeUsage::GC::vm_thread() / NANOSECS_PER_SEC; - const double gc_string_dedup_cpu_time = (double) CPUTimeUsage::GC::stringdedup() / NANOSECS_PER_SEC; - const double gc_cpu_time = (double) gc_threads_cpu_time + gc_vm_thread_cpu_time + gc_string_dedup_cpu_time; - - const double elasped_time = os::elapsedTime(); - const bool has_error = CPUTimeUsage::Error::has_error(); - - if (gc_cpu_time < process_cpu_time) { - cpuLog.print("=== CPU time Statistics ============================================================="); - if (has_error) { - cpuLog.print("WARNING: CPU time sampling reported errors, numbers may be unreliable"); - } - cpuLog.print(" CPUs"); - cpuLog.print(" s %% utilized"); - cpuLog.print(" Process"); - cpuLog.print(" Total %30.4f %6.2f %8.1f", process_cpu_time, 100.0, process_cpu_time / elasped_time); - cpuLog.print(" Garbage Collection %30.4f %6.2f %8.1f", gc_cpu_time, percent_of(gc_cpu_time, process_cpu_time), gc_cpu_time / elasped_time); - cpuLog.print(" GC Threads %30.4f %6.2f %8.1f", gc_threads_cpu_time, percent_of(gc_threads_cpu_time, process_cpu_time), gc_threads_cpu_time / elasped_time); - cpuLog.print(" VM Thread %30.4f %6.2f %8.1f", gc_vm_thread_cpu_time, percent_of(gc_vm_thread_cpu_time, process_cpu_time), gc_vm_thread_cpu_time / elasped_time); - - if (UseStringDeduplication) { - cpuLog.print(" String Deduplication %30.4f %6.2f %8.1f", gc_string_dedup_cpu_time, percent_of(gc_string_dedup_cpu_time, process_cpu_time), gc_string_dedup_cpu_time / elasped_time); - } - cpuLog.print("====================================================================================="); - } -} - void CollectedHeap::before_exit() { + print_tracing_info(); - { - // Acquire the Heap_lock to ensure mutual exclusion with VM_GC_Operations. - MutexLocker ml(Heap_lock); - log_cpu_time(); - print_tracing_info(); - AtomicAccess::release_store(&_is_shutting_down, true); - } // Stop any on-going concurrent work and prepare for exit. stop(); } diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp index 050e7a6884db0..0516091228309 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp @@ -107,9 +107,6 @@ class CollectedHeap : public CHeapObj { // Then, set it to FillerObject after the FillerObject_klass loading is complete. static Klass* _filler_object_klass; - // Flag to indicate that VM is shutting down. - volatile bool _is_shutting_down; - protected: // Not used by all GCs MemRegion _reserved; @@ -249,12 +246,11 @@ class CollectedHeap : public CHeapObj { // This is the correct place to place such initialization methods. virtual void post_initialize(); + bool is_shutting_down() const; + // If the VM is shutting down, we may have skipped VM_CollectForAllocation. // In this case, stall the allocation request briefly in the hope that // the VM shutdown completes before the allocation request returns. - bool is_shutting_down() const; - - // Stall allocation requests until the VM shutdown is complete. void stall_for_vm_shutdown(); void before_exit(); diff --git a/src/hotspot/share/gc/shared/gcVMOperations.cpp b/src/hotspot/share/gc/shared/gcVMOperations.cpp index be684c52544d3..36aa0c9843dfe 100644 --- a/src/hotspot/share/gc/shared/gcVMOperations.cpp +++ b/src/hotspot/share/gc/shared/gcVMOperations.cpp @@ -111,7 +111,7 @@ bool VM_GC_Operation::doit_prologue() { VM_Heap_Sync_Operation::doit_prologue(); // Check invocations - if (skip_operation() || Universe::heap()->is_shutting_down()) { + if (skip_operation() || Universe::is_shutting_down()) { // skip collection Heap_lock->unlock(); if (should_use_gclocker()) { diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp index 63a35d69a173d..a4cf248c7ac93 100644 --- a/src/hotspot/share/memory/universe.cpp +++ b/src/hotspot/share/memory/universe.cpp @@ -183,6 +183,7 @@ int Universe::_base_vtable_size = 0; bool Universe::_bootstrapping = false; bool Universe::_module_initialized = false; bool Universe::_fully_initialized = false; +volatile bool Universe::_is_shutting_down = false; OopStorage* Universe::_vm_weak = nullptr; OopStorage* Universe::_vm_global = nullptr; @@ -1302,8 +1303,54 @@ void Universe::verify(VerifyOption option, const char* prefix) { } } +static void log_cpu_time() { + LogTarget(Info, cpu) cpuLog; + if (!cpuLog.is_enabled()) { + return; + } + + const double process_cpu_time = os::elapsed_process_cpu_time(); + if (process_cpu_time == 0 || process_cpu_time == -1) { + // 0 can happen e.g. for short running processes with + // low CPU utilization + return; + } + + const double gc_threads_cpu_time = (double) CPUTimeUsage::GC::gc_threads() / NANOSECS_PER_SEC; + const double gc_vm_thread_cpu_time = (double) CPUTimeUsage::GC::vm_thread() / NANOSECS_PER_SEC; + const double gc_string_dedup_cpu_time = (double) CPUTimeUsage::GC::stringdedup() / NANOSECS_PER_SEC; + const double gc_cpu_time = (double) gc_threads_cpu_time + gc_vm_thread_cpu_time + gc_string_dedup_cpu_time; + + const double elasped_time = os::elapsedTime(); + const bool has_error = CPUTimeUsage::Error::has_error(); + + if (gc_cpu_time < process_cpu_time) { + cpuLog.print("=== CPU time Statistics ============================================================="); + if (has_error) { + cpuLog.print("WARNING: CPU time sampling reported errors, numbers may be unreliable"); + } + cpuLog.print(" CPUs"); + cpuLog.print(" s %% utilized"); + cpuLog.print(" Process"); + cpuLog.print(" Total %30.4f %6.2f %8.1f", process_cpu_time, 100.0, process_cpu_time / elasped_time); + cpuLog.print(" Garbage Collection %30.4f %6.2f %8.1f", gc_cpu_time, percent_of(gc_cpu_time, process_cpu_time), gc_cpu_time / elasped_time); + cpuLog.print(" GC Threads %30.4f %6.2f %8.1f", gc_threads_cpu_time, percent_of(gc_threads_cpu_time, process_cpu_time), gc_threads_cpu_time / elasped_time); + cpuLog.print(" VM Thread %30.4f %6.2f %8.1f", gc_vm_thread_cpu_time, percent_of(gc_vm_thread_cpu_time, process_cpu_time), gc_vm_thread_cpu_time / elasped_time); + + if (UseStringDeduplication) { + cpuLog.print(" String Deduplication %30.4f %6.2f %8.1f", gc_string_dedup_cpu_time, percent_of(gc_string_dedup_cpu_time, process_cpu_time), gc_string_dedup_cpu_time / elasped_time); + } + cpuLog.print("====================================================================================="); + } +} void Universe::before_exit() { + { + MutexLocker hl(Heap_lock); + log_cpu_time(); + AtomicAccess::release_store(&_is_shutting_down, true); + } + heap()->before_exit(); // Print GC/heap related information. diff --git a/src/hotspot/share/memory/universe.hpp b/src/hotspot/share/memory/universe.hpp index 90874d2392db8..3b1f2523ed845 100644 --- a/src/hotspot/share/memory/universe.hpp +++ b/src/hotspot/share/memory/universe.hpp @@ -128,6 +128,9 @@ class Universe: AllStatic { static bool _module_initialized; // true after call_initPhase2 called static bool _fully_initialized; // true after universe_init and initialize_vtables called + // Shutdown + static volatile bool _is_shutting_down; + // the array of preallocated errors with backtraces static objArrayOop preallocated_out_of_memory_errors(); @@ -324,6 +327,8 @@ class Universe: AllStatic { static bool is_module_initialized() { return _module_initialized; } static bool is_fully_initialized() { return _fully_initialized; } + static bool is_shutting_down() { return AtomicAccess::load_acquire(&_is_shutting_down); } + static bool on_page_boundary(void* addr); static bool should_fill_in_stack_trace(Handle throwable); static void check_alignment(uintx size, uintx alignment, const char* name); diff --git a/src/hotspot/share/services/cpuTimeUsage.cpp b/src/hotspot/share/services/cpuTimeUsage.cpp index d6b01bcbf9ae7..d81fddd977d3c 100644 --- a/src/hotspot/share/services/cpuTimeUsage.cpp +++ b/src/hotspot/share/services/cpuTimeUsage.cpp @@ -28,6 +28,7 @@ #include "memory/universe.hpp" #include "runtime/globals.hpp" #include "runtime/os.hpp" +#include "runtime/osThread.hpp" #include "runtime/perfData.hpp" #include "runtime/vmThread.hpp" #include "services/cpuTimeUsage.hpp" @@ -35,6 +36,7 @@ volatile bool CPUTimeUsage::Error::_has_error = false; static inline jlong thread_cpu_time_or_zero(Thread* thread) { + assert(!thread->has_terminated(), "Cannot get cpu time for terminated thread: %zu", thread->osthread()->thread_id_for_printing()); jlong cpu_time = os::thread_cpu_time(thread); if (cpu_time == -1) { CPUTimeUsage::Error::mark_error(); From 46add7af34c45761012a5468323b6e7db15a8931 Mon Sep 17 00:00:00 2001 From: Ivan Walulya Date: Fri, 19 Sep 2025 07:11:34 +0200 Subject: [PATCH 11/13] remove assert --- src/hotspot/share/services/cpuTimeUsage.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/hotspot/share/services/cpuTimeUsage.cpp b/src/hotspot/share/services/cpuTimeUsage.cpp index d81fddd977d3c..27b5e90fbaf56 100644 --- a/src/hotspot/share/services/cpuTimeUsage.cpp +++ b/src/hotspot/share/services/cpuTimeUsage.cpp @@ -36,7 +36,6 @@ volatile bool CPUTimeUsage::Error::_has_error = false; static inline jlong thread_cpu_time_or_zero(Thread* thread) { - assert(!thread->has_terminated(), "Cannot get cpu time for terminated thread: %zu", thread->osthread()->thread_id_for_printing()); jlong cpu_time = os::thread_cpu_time(thread); if (cpu_time == -1) { CPUTimeUsage::Error::mark_error(); From 354e53cf375233a5cbfd28ced6852b689484c18c Mon Sep 17 00:00:00 2001 From: Ivan Walulya Date: Sun, 21 Sep 2025 08:34:52 +0200 Subject: [PATCH 12/13] Remove lock --- src/hotspot/share/gc/shared/collectedHeap.cpp | 7 +++---- src/hotspot/share/runtime/mutexLocker.cpp | 2 -- src/hotspot/share/runtime/mutexLocker.hpp | 1 - 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp index 6b1cd6ee01d5c..8ef73c6a0b825 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -615,15 +615,14 @@ bool CollectedHeap::is_shutting_down() const { void CollectedHeap::stall_for_vm_shutdown() { assert(is_shutting_down(), "Precondition"); - // We use a timed wait (2 seconds) instead of an indefinite wait to avoid deadlock + // Stall the thread (2 seconds) instead of an indefinite wait to avoid deadlock // if the VM shutdown triggers a GC. - // The 2-second timeout is: + // The 2-seconds sleep is: // - long enough to keep daemon threads stalled, while the shutdown // sequence completes in the common case. // - short enough to avoid excessive stall time if the shutdown itself // triggers a GC. - MonitorLocker ml(VMExit_lock); - ml.wait(2 * MILLIUNITS); + JavaThread::current()->sleep(2 * MILLIUNITS); log_warning(gc, alloc)("%s: Stall for VM-Shutdown timed out; allocation may fail with OOME", Thread::current()->name()); } diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp index 91b0a9577614e..e0eafbc416b1b 100644 --- a/src/hotspot/share/runtime/mutexLocker.cpp +++ b/src/hotspot/share/runtime/mutexLocker.cpp @@ -82,7 +82,6 @@ Mutex* DirectivesStack_lock = nullptr; Monitor* Terminator_lock = nullptr; Monitor* InitCompleted_lock = nullptr; Monitor* BeforeExit_lock = nullptr; -Monitor* VMExit_lock = nullptr; Monitor* Notify_lock = nullptr; Mutex* ExceptionCache_lock = nullptr; Mutex* TrainingData_lock = nullptr; @@ -248,7 +247,6 @@ void mutex_init() { MUTEX_DEFN(FullGCALot_lock , PaddedMutex , safepoint); // a lock to make FullGCALot MT safe #endif MUTEX_DEFN(BeforeExit_lock , PaddedMonitor, safepoint); - MUTEX_DEFN(VMExit_lock , PaddedMonitor, safepoint); MUTEX_DEFN(NonJavaThreadsList_lock , PaddedMutex , nosafepoint-1); MUTEX_DEFN(NonJavaThreadsListSync_lock , PaddedMutex , nosafepoint); diff --git a/src/hotspot/share/runtime/mutexLocker.hpp b/src/hotspot/share/runtime/mutexLocker.hpp index 04becfccd59d5..3a73edc7bf29c 100644 --- a/src/hotspot/share/runtime/mutexLocker.hpp +++ b/src/hotspot/share/runtime/mutexLocker.hpp @@ -84,7 +84,6 @@ extern Mutex* DirectivesStack_lock; // a lock held when mutating th extern Monitor* Terminator_lock; // a lock used to guard termination of the vm extern Monitor* InitCompleted_lock; // a lock used to signal threads waiting on init completed extern Monitor* BeforeExit_lock; // a lock used to guard cleanups and shutdown hooks -extern Monitor* VMExit_lock; // a lock used to stall threads until VM shuts down extern Monitor* Notify_lock; // a lock used to synchronize the start-up of the vm extern Mutex* ExceptionCache_lock; // a lock used to synchronize exception cache updates From f1ec96925480b051e092c59fb0f6051c04a2f43e Mon Sep 17 00:00:00 2001 From: Ivan Walulya Date: Mon, 22 Sep 2025 13:58:41 +0200 Subject: [PATCH 13/13] Albert suggestion --- src/hotspot/share/memory/universe.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp index a4cf248c7ac93..a3afcc5ba64b0 100644 --- a/src/hotspot/share/memory/universe.cpp +++ b/src/hotspot/share/memory/universe.cpp @@ -1346,6 +1346,8 @@ static void log_cpu_time() { void Universe::before_exit() { { + // Acquire the Heap_lock to synchronize with VM_Heap_Sync_Operations, + // which may depend on the value of _is_shutting_down flag. MutexLocker hl(Heap_lock); log_cpu_time(); AtomicAccess::release_store(&_is_shutting_down, true);