Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8258431: Provide a JFR event with live set size estimate #2579

Closed
wants to merge 23 commits into from
Closed
Show file tree
Hide file tree
Changes from 16 commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
ddc5b5c
8258431: Provide a JFR event with live set size estimate
jbachorik Feb 12, 2021
3f92275
Update event definition and emission
jbachorik Mar 1, 2021
00d06ab
Rename ZStatHeap::live() to live_at_mark_end()
jbachorik Mar 1, 2021
0fff82a
Shenandoah related PR fixes
jbachorik Mar 1, 2021
3a68550
Epsilon related PR fixes
jbachorik Mar 1, 2021
1f6f87e
Minor G1 related PR fixes
jbachorik Mar 1, 2021
2afb47c
Common PR fixes
jbachorik Mar 1, 2021
b28e7a0
Change dead space calculation
jbachorik Mar 1, 2021
03a8617
Merge remote-tracking branch 'origin/master' into jb/live_set_1
jbachorik Mar 1, 2021
01c22ce
Do not track young, eden and old live size separately
jbachorik Mar 1, 2021
6a1aa73
Fix dangling space
jbachorik Mar 1, 2021
dd204d8
Fix syntax error
jbachorik Mar 1, 2021
08c715a
Attempt to fix G1 live set size computation
jbachorik Mar 1, 2021
aa180d1
Proper Shenandoah implementation of live size esitmate
jbachorik Mar 2, 2021
6c6f8a8
Use '0' to indicate unvailable live estimate
jbachorik Mar 2, 2021
f695418
Add tests for the heap usage summary event
jbachorik Mar 2, 2021
5730f20
Minor cleanup
jbachorik Mar 8, 2021
f708023
Adjust the deadspace calculation
jbachorik Mar 8, 2021
343e480
Cache live size estimate for memory spaces
jbachorik Mar 8, 2021
67d7894
Remove unused field
jbachorik Mar 8, 2021
056f5fd
Change get_dead_space() to dead_space()
jbachorik Mar 15, 2021
81250d1
Capture live estimate for G1 full cycle
jbachorik Mar 15, 2021
f767f25
Update liveness for G1 mixed GC
jbachorik Mar 16, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions src/hotspot/share/gc/epsilon/epsilonHeap.hpp
Expand Up @@ -77,6 +77,7 @@ class EpsilonHeap : public CollectedHeap {
virtual size_t max_capacity() const { return _virtual_space.reserved_size(); }
virtual size_t capacity() const { return _virtual_space.committed_size(); }
virtual size_t used() const { return _space->used(); }
virtual size_t live() const { return _space->used(); }

virtual bool is_in(const void* p) const {
return _space->is_in(p);
Expand Down
9 changes: 9 additions & 0 deletions src/hotspot/share/gc/g1/g1CollectedHeap.cpp
Expand Up @@ -1403,6 +1403,7 @@ G1CollectedHeap::G1CollectedHeap() :
_archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
_humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
_bot(NULL),
_live(0),
_listener(),
_numa(G1NUMA::create()),
_hrm(),
Expand Down Expand Up @@ -1841,6 +1842,10 @@ size_t G1CollectedHeap::used() const {
return result;
}

size_t G1CollectedHeap::live() const {
return Atomic::load(&_live);
}

size_t G1CollectedHeap::used_unlocked() const {
return _summary_bytes_used;
}
Expand Down Expand Up @@ -4565,6 +4570,10 @@ void G1CollectedHeap::set_used(size_t bytes) {
_summary_bytes_used = bytes;
}

void G1CollectedHeap::set_live(size_t bytes) {
Atomic::store(&_live, bytes);
}

class RebuildRegionSetsClosure : public HeapRegionClosure {
private:
bool _free_list_only;
Expand Down
5 changes: 5 additions & 0 deletions src/hotspot/share/gc/g1/g1CollectedHeap.hpp
Expand Up @@ -140,6 +140,7 @@ class G1CollectedHeap : public CollectedHeap {
friend class G1FullCollector;
friend class G1GCAllocRegion;
friend class G1HeapVerifier;
friend class G1ConcurrentMark;

// Closures used in implementation.
friend class G1ParScanThreadState;
Expand Down Expand Up @@ -178,6 +179,8 @@ class G1CollectedHeap : public CollectedHeap {
// The block offset table for the G1 heap.
G1BlockOffsetTable* _bot;

volatile size_t _live;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not happy with naming this _live, better use _live_estimate. The contents are not continuously updated and basically out of date after the first following allocation.
This includes the naming in all other instances too.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see your point - but that would probably lead to renaming live() method to live_estimate() (to keep the variable and the accessor method in sync) and that would break the nice symmetry we have now with free(), used() and live().
I have no strong feelings about this and if we can get quorum on this change I will do the renaming pass.


public:
void prepare_region_for_full_compaction(HeapRegion* hr);

Expand Down Expand Up @@ -211,6 +214,7 @@ class G1CollectedHeap : public CollectedHeap {
void decrease_used(size_t bytes);

void set_used(size_t bytes);
void set_live(size_t bytes);

// Number of bytes used in all regions during GC. Typically changed when
// retiring a GC alloc region.
Expand Down Expand Up @@ -1054,6 +1058,7 @@ class G1CollectedHeap : public CollectedHeap {

virtual size_t capacity() const;
virtual size_t used() const;
virtual size_t live() const;
// This should be called when we're not holding the heap lock. The
// result might be a bit inaccurate.
size_t used_unlocked() const;
Expand Down
11 changes: 9 additions & 2 deletions src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
Expand Up @@ -972,6 +972,7 @@ class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask {
G1ConcurrentMark* _cm;
HeapRegionClaimer _hrclaimer;
uint volatile _total_selected_for_rebuild;
size_t volatile _live;

G1PrintRegionLivenessInfoClosure _cl;

Expand All @@ -982,6 +983,7 @@ class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask {
G1PrintRegionLivenessInfoClosure* _cl;

uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild.
size_t _live; // Cumulative live set size over iterated regions

void update_remset_before_rebuild(HeapRegion* hr) {
G1RemSetTrackingPolicy* tracking_policy = _g1h->policy()->remset_tracker();
Expand Down Expand Up @@ -1054,30 +1056,34 @@ class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask {

public:
G1UpdateRemSetTrackingBeforeRebuild(G1CollectedHeap* g1h, G1ConcurrentMark* cm, G1PrintRegionLivenessInfoClosure* cl) :
_g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0) { }
_g1h(g1h), _cm(cm), _cl(cl), _num_regions_selected_for_rebuild(0), _live(0) { }

virtual bool do_heap_region(HeapRegion* r) {
update_remset_before_rebuild(r);
update_marked_bytes(r);

_live += r->live_bytes();
return false;
}

uint num_selected_for_rebuild() const { return _num_regions_selected_for_rebuild; }
size_t live_estimate() const { return _live; }
jbachorik marked this conversation as resolved.
Show resolved Hide resolved
};

public:
G1UpdateRemSetTrackingBeforeRebuildTask(G1CollectedHeap* g1h, G1ConcurrentMark* cm, uint num_workers) :
AbstractGangTask("G1 Update RemSet Tracking Before Rebuild"),
_g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _cl("Post-Marking") { }
_g1h(g1h), _cm(cm), _hrclaimer(num_workers), _total_selected_for_rebuild(0), _live(0), _cl("Post-Marking") { }

virtual void work(uint worker_id) {
G1UpdateRemSetTrackingBeforeRebuild update_cl(_g1h, _cm, &_cl);
_g1h->heap_region_par_iterate_from_worker_offset(&update_cl, &_hrclaimer, worker_id);
Atomic::add(&_total_selected_for_rebuild, update_cl.num_selected_for_rebuild());
Atomic::add(&_live, update_cl.live_estimate());
}

uint total_selected_for_rebuild() const { return _total_selected_for_rebuild; }
size_t live_estimate() const { return _live; }

// Number of regions for which roughly one thread should be spawned for this work.
static const uint RegionsPerThread = 384;
Expand Down Expand Up @@ -1147,6 +1153,7 @@ void G1ConcurrentMark::remark() {
G1UpdateRemSetTrackingBeforeRebuildTask cl(_g1h, this, num_workers);
log_debug(gc,ergo)("Running %s using %u workers for %u regions in heap", cl.name(), num_workers, _g1h->num_regions());
_g1h->workers()->run_task(&cl, num_workers);
_g1h->set_live(cl.live_estimate());

log_debug(gc, remset, tracking)("Remembered Set Tracking update regions total %u, selected %u",
_g1h->num_regions(), cl.total_selected_for_rebuild());
Expand Down
5 changes: 5 additions & 0 deletions src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
Expand Up @@ -197,6 +197,10 @@ size_t ParallelScavengeHeap::used() const {
return value;
}

size_t ParallelScavengeHeap::live() const {
return _live;
}

bool ParallelScavengeHeap::is_maximal_no_gc() const {
return old_gen()->is_maximal_no_gc() && young_gen()->is_maximal_no_gc();
}
Expand Down Expand Up @@ -433,6 +437,7 @@ void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
// cause SoftRefs to be cleared.
bool maximum_compaction = clear_all_soft_refs;
PSParallelCompact::invoke(maximum_compaction);
capture_live();
}

// Failed allocation policy. Must be called from the VM thread, and
Expand Down
9 changes: 8 additions & 1 deletion src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
Expand Up @@ -73,6 +73,8 @@ class ParallelScavengeHeap : public CollectedHeap {

WorkGang _workers;

size_t _live;

virtual void initialize_serviceability();

void trace_actual_reserved_page_size(const size_t reserved_heap_size, const ReservedSpace rs);
Expand All @@ -81,8 +83,12 @@ class ParallelScavengeHeap : public CollectedHeap {
// Allocate in oldgen and record the allocation with the size_policy.
HeapWord* allocate_old_gen_and_record(size_t word_size);

// in order to provide accurate estimate this method must be called only when the heap has just been collected and compacted
inline void capture_live();
jbachorik marked this conversation as resolved.
Show resolved Hide resolved

protected:
static inline size_t total_invocations();
size_t live() const;
HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size);

inline bool should_alloc_in_eden(size_t size) const;
Expand All @@ -101,7 +107,8 @@ class ParallelScavengeHeap : public CollectedHeap {
_workers("GC Thread",
ParallelGCThreads,
true /* are_GC_task_threads */,
false /* are_ConcurrentGC_threads */) { }
false /* are_ConcurrentGC_threads */),
_live(0) { }

// For use by VM operations
enum CollectionType {
Expand Down
5 changes: 5 additions & 0 deletions src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp
Expand Up @@ -40,6 +40,11 @@ inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const

inline void ParallelScavengeHeap::invoke_scavenge() {
PSScavenge::invoke();
capture_live();
}

inline void ParallelScavengeHeap::capture_live() {
_live = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
}

inline bool ParallelScavengeHeap::is_in_young(oop p) {
Expand Down
1 change: 1 addition & 0 deletions src/hotspot/share/gc/parallel/psAdaptiveSizePolicy.hpp
Expand Up @@ -57,6 +57,7 @@ class elapsedTimer;

class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
friend class PSGCAdaptivePolicyCounters;
friend class ParallelScavengeHeap;
jbachorik marked this conversation as resolved.
Show resolved Hide resolved
private:
// These values are used to record decisions made during the
// policy. For example, if the young generation was decreased
Expand Down
5 changes: 5 additions & 0 deletions src/hotspot/share/gc/serial/defNewGeneration.cpp
Expand Up @@ -433,6 +433,11 @@ size_t DefNewGeneration::free() const {
+ from()->free(); // to() is only used during scavenge
}

size_t DefNewGeneration::live() const {
return eden()->live()
+ from()->live(); // to() is only used during scavenge
}

size_t DefNewGeneration::max_capacity() const {
const size_t reserved_bytes = reserved().byte_size();
return reserved_bytes - compute_survivor_size(reserved_bytes, SpaceAlignment);
Expand Down
1 change: 1 addition & 0 deletions src/hotspot/share/gc/serial/defNewGeneration.hpp
Expand Up @@ -214,6 +214,7 @@ class DefNewGeneration: public Generation {
size_t capacity() const;
size_t used() const;
size_t free() const;
size_t live() const;
size_t max_capacity() const;
size_t capacity_before_gc() const;
size_t unsafe_max_alloc_nogc() const;
Expand Down
2 changes: 2 additions & 0 deletions src/hotspot/share/gc/serial/serialHeap.hpp
Expand Up @@ -41,6 +41,8 @@ class SerialHeap : public GenCollectedHeap {
MemoryPool* _survivor_pool;
MemoryPool* _old_pool;

size_t _live_size;
jbachorik marked this conversation as resolved.
Show resolved Hide resolved

virtual void initialize_serviceability();

public:
Expand Down
1 change: 1 addition & 0 deletions src/hotspot/share/gc/shared/cardGeneration.hpp
Expand Up @@ -85,6 +85,7 @@ class CardGeneration: public Generation {
size_t capacity() const;
size_t used() const;
size_t free() const;
size_t live() const;
MemRegion used_region() const;

void space_iterate(SpaceClosure* blk, bool usedOnly = false);
Expand Down
4 changes: 4 additions & 0 deletions src/hotspot/share/gc/shared/cardGeneration.inline.hpp
Expand Up @@ -40,6 +40,10 @@ inline size_t CardGeneration::free() const {
return space()->free();
}

inline size_t CardGeneration::live() const {
return space()->live();
}

inline MemRegion CardGeneration::used_region() const {
return space()->used_region();
}
Expand Down
5 changes: 5 additions & 0 deletions src/hotspot/share/gc/shared/collectedHeap.hpp
Expand Up @@ -215,6 +215,11 @@ class CollectedHeap : public CHeapObj<mtInternal> {

virtual size_t capacity() const = 0;
virtual size_t used() const = 0;
// Returns the estimate of live set size. Because live set changes over time,
jbachorik marked this conversation as resolved.
Show resolved Hide resolved
// this is a best-effort estimate by each of the implementations. These usually
// are most precise right after the GC cycle. If no GC cycle has happened yet
// the reported value will be 0.
virtual size_t live() const = 0;

// Returns unused capacity.
virtual size_t unused() const;
Expand Down
8 changes: 8 additions & 0 deletions src/hotspot/share/gc/shared/genCollectedHeap.cpp
Expand Up @@ -100,6 +100,7 @@ GenCollectedHeap::GenCollectedHeap(Generation::Name young,
_gc_policy_counters(new GCPolicyCounters(policy_counters_name, 2, 2)),
_incremental_collection_failed(false),
_full_collections_completed(0),
_live(0),
_young_manager(NULL),
_old_manager(NULL) {
}
Expand Down Expand Up @@ -243,6 +244,10 @@ size_t GenCollectedHeap::used() const {
return _young_gen->used() + _old_gen->used();
}

size_t GenCollectedHeap::live() const {
return _live;
}

void GenCollectedHeap::save_used_regions() {
_old_gen->save_used_region();
_young_gen->save_used_region();
Expand Down Expand Up @@ -1254,6 +1259,9 @@ void GenCollectedHeap::gc_epilogue(bool full) {

MetaspaceCounters::update_performance_counters();
CompressedClassSpaceCounters::update_performance_counters();

// update the live size after last GC
_live = _young_gen->live() + _old_gen->live();
};

#ifndef PRODUCT
Expand Down
3 changes: 3 additions & 0 deletions src/hotspot/share/gc/shared/genCollectedHeap.hpp
Expand Up @@ -88,6 +88,8 @@ class GenCollectedHeap : public CollectedHeap {
// In support of ExplicitGCInvokesConcurrent functionality
unsigned int _full_collections_completed;

size_t _live;

// Collects the given generation.
void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
bool run_verification, bool clear_soft_refs,
Expand Down Expand Up @@ -175,6 +177,7 @@ class GenCollectedHeap : public CollectedHeap {

size_t capacity() const;
size_t used() const;
size_t live() const;

// Save the "used_region" for both generations.
void save_used_regions();
Expand Down
1 change: 1 addition & 0 deletions src/hotspot/share/gc/shared/generation.hpp
Expand Up @@ -137,6 +137,7 @@ class Generation: public CHeapObj<mtGC> {
// generation can currently hold.
virtual size_t used() const = 0; // The number of used bytes in the gen.
virtual size_t free() const = 0; // The number of free bytes in the gen.
virtual size_t live() const = 0; // The estimate of live bytes in the gen.

// Support for java.lang.Runtime.maxMemory(); see CollectedHeap.
// Returns the total number of bytes available in a generation
Expand Down
5 changes: 5 additions & 0 deletions src/hotspot/share/gc/shared/space.hpp
Expand Up @@ -156,6 +156,7 @@ class Space: public CHeapObj<mtGC> {
size_t capacity() const { return byte_size(bottom(), end()); }
virtual size_t used() const = 0;
virtual size_t free() const = 0;
virtual size_t live() const = 0;

// Iterate over all the ref-containing fields of all objects in the
// space, calling "cl.do_oop" on each. Fields in objects allocated by
Expand Down Expand Up @@ -444,6 +445,7 @@ class CompactibleSpace: public Space {
// Used during compaction.
HeapWord* _first_dead;
HeapWord* _end_of_live;
size_t _dead_space;

// This the function is invoked when an allocation of an object covering
// "start" to "end occurs crosses the threshold; returns the next
Expand Down Expand Up @@ -548,6 +550,9 @@ class ContiguousSpace: public CompactibleSpace {
// Size computations: sizes in bytes.
size_t capacity() const { return byte_size(bottom(), end()); }
size_t used() const { return byte_size(bottom(), top()); }
size_t live() const {
jbachorik marked this conversation as resolved.
Show resolved Hide resolved
return used() - _dead_space;
}
jbachorik marked this conversation as resolved.
Show resolved Hide resolved
size_t free() const { return byte_size(top(), end()); }

virtual bool is_free_block(const HeapWord* p) const;
Expand Down
9 changes: 7 additions & 2 deletions src/hotspot/share/gc/shared/space.inline.hpp
Expand Up @@ -87,7 +87,9 @@ class DeadSpacer : StackObj {
CompactibleSpace* _space;

public:
DeadSpacer(CompactibleSpace* space) : _allowed_deadspace_words(0), _space(space) {
size_t _dead_space;
jbachorik marked this conversation as resolved.
Show resolved Hide resolved

DeadSpacer(CompactibleSpace* space) : _allowed_deadspace_words(0), _space(space), _dead_space(0) {
size_t ratio = _space->allowed_dead_ratio();
_active = ratio > 0;

Expand Down Expand Up @@ -123,6 +125,7 @@ class DeadSpacer : StackObj {
log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b",
p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize);

_dead_space += dead_length;
jbachorik marked this conversation as resolved.
Show resolved Hide resolved
return true;
} else {
_active = false;
Expand Down Expand Up @@ -183,7 +186,8 @@ inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* c
// we don't have to compact quite as often.
if (cur_obj == compact_top && dead_spacer.insert_deadspace(cur_obj, end)) {
oop obj = oop(cur_obj);
compact_top = cp->space->forward(obj, obj->size(), cp, compact_top);
size_t obj_size = obj->size();
compact_top = cp->space->forward(obj, obj_size, cp, compact_top);
jbachorik marked this conversation as resolved.
Show resolved Hide resolved
end_of_live = end;
} else {
// otherwise, it really is a free region.
Expand All @@ -204,6 +208,7 @@ inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* c

assert(cur_obj == scan_limit, "just checking");
space->_end_of_live = end_of_live;
space->_dead_space = dead_spacer._dead_space;
if (first_dead != NULL) {
space->_first_dead = first_dead;
} else {
Expand Down