diff --git a/src/hotspot/share/gc/serial/defNewGeneration.hpp b/src/hotspot/share/gc/serial/defNewGeneration.hpp index c5e1c2b152e9b..13c2851d0cf2c 100644 --- a/src/hotspot/share/gc/serial/defNewGeneration.hpp +++ b/src/hotspot/share/gc/serial/defNewGeneration.hpp @@ -179,6 +179,7 @@ class DefNewGeneration: public Generation { // Return true if the expansion was successful. bool expand(size_t bytes); + STWGCTimer* gc_timer() const { return _gc_timer; } // Iteration void object_iterate(ObjectClosure* blk); diff --git a/src/hotspot/share/gc/serial/serialHeap.cpp b/src/hotspot/share/gc/serial/serialHeap.cpp index 23f1df6fc66db..15e0fd5e7e0d6 100644 --- a/src/hotspot/share/gc/serial/serialHeap.cpp +++ b/src/hotspot/share/gc/serial/serialHeap.cpp @@ -329,9 +329,9 @@ HeapWord* SerialHeap::mem_allocate_work(size_t size, { MutexLocker ml(Heap_lock); log_trace(gc, alloc)("SerialHeap::mem_allocate_work: attempting locked slow path allocation"); - // Note that only large objects get a shot at being - // allocated in later generations. - bool first_only = !should_try_older_generation_allocation(size); + + bool first_only = !should_try_older_generation_allocation(size) + && is_long_enough_from_prev_gc_pause_end(); result = attempt_allocation(size, is_tlab, first_only); if (result != nullptr) { @@ -634,6 +634,26 @@ void SerialHeap::scan_evacuated_objs(YoungGenScanClosure* young_cl, guarantee(young_gen()->promo_failure_scan_is_complete(), "Failed to finish scan"); } +bool SerialHeap::is_long_enough_from_prev_gc_pause_end() const { + Ticks young_gc_pause_end = _young_gen->gc_timer()->gc_end(); + Ticks full_gc_pause_end = SerialFullGC::gc_timer()->gc_end(); + + Ticks prev_gc_pause_end; + Tickspan gc_pause; + if (full_gc_pause_end < young_gc_pause_end) { + // Previous is young-gc + prev_gc_pause_end = young_gc_pause_end; + gc_pause = young_gc_pause_end - _young_gen->gc_timer()->gc_start(); + } else { + // Previous is full-gc + prev_gc_pause_end = full_gc_pause_end; + gc_pause = full_gc_pause_end - SerialFullGC::gc_timer()->gc_start(); + } + Tickspan since_end_gc_pause = Ticks::now() - prev_gc_pause_end; + + return since_end_gc_pause.seconds() * 100 >= gc_pause.seconds() * GCTimeRatio; +} + void SerialHeap::try_collect_at_safepoint(bool full) { assert(SafepointSynchronize::is_at_safepoint(), "precondition"); if (GCLocker::check_active_before_gc()) { diff --git a/src/hotspot/share/gc/serial/serialHeap.hpp b/src/hotspot/share/gc/serial/serialHeap.hpp index d787d216e37ab..add154365b7e2 100644 --- a/src/hotspot/share/gc/serial/serialHeap.hpp +++ b/src/hotspot/share/gc/serial/serialHeap.hpp @@ -120,6 +120,8 @@ class SerialHeap : public CollectedHeap { bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); } + bool is_long_enough_from_prev_gc_pause_end() const; + // Performance Counter support GCPolicyCounters* counters() { return _gc_policy_counters; }