Skip to content

Commit d19e6ea

Browse files
committed
8229189: Improve JFR leak profiler tracing to deal with discontiguous heaps
Reviewed-by: mgronlun, egahlin
1 parent 655cf14 commit d19e6ea

File tree

7 files changed

+195
-63
lines changed

7 files changed

+195
-63
lines changed

src/hotspot/share/jfr/leakprofiler/chains/bfsClosure.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
*
2323
*/
2424
#include "precompiled.hpp"
25-
#include "jfr/leakprofiler/chains/bitset.hpp"
25+
#include "jfr/leakprofiler/chains/bitset.inline.hpp"
2626
#include "jfr/leakprofiler/chains/bfsClosure.hpp"
2727
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
2828
#include "jfr/leakprofiler/chains/edge.hpp"

src/hotspot/share/jfr/leakprofiler/chains/bitset.cpp

Lines changed: 15 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -22,37 +22,25 @@
2222
*
2323
*/
2424
#include "precompiled.hpp"
25-
#include "jfr/leakprofiler/chains/bitset.hpp"
26-
#include "jfr/recorder/storage/jfrVirtualMemory.hpp"
27-
#include "memory/memRegion.hpp"
25+
#include "jfr/leakprofiler/chains/bitset.inline.hpp"
2826

29-
BitSet::BitSet(const MemRegion& covered_region) :
30-
_vmm(NULL),
31-
_region_start(covered_region.start()),
32-
_region_size(covered_region.word_size()) {
27+
BitSet::BitMapFragment::BitMapFragment(uintptr_t granule, BitMapFragment* next) :
28+
_bits(_bitmap_granularity_size >> LogMinObjAlignmentInBytes, mtTracing, true /* clear */),
29+
_next(next) {
3330
}
3431

35-
BitSet::~BitSet() {
36-
delete _vmm;
32+
BitSet::BitSet() :
33+
_bitmap_fragments(32),
34+
_fragment_list(NULL),
35+
_last_fragment_bits(NULL),
36+
_last_fragment_granule(0) {
3737
}
3838

39-
bool BitSet::initialize() {
40-
assert(_vmm == NULL, "invariant");
41-
_vmm = new JfrVirtualMemory();
42-
if (_vmm == NULL) {
43-
return false;
44-
}
45-
46-
const BitMap::idx_t bits = _region_size >> LogMinObjAlignment;
47-
const size_t words = bits / BitsPerWord;
48-
const size_t raw_bytes = words * sizeof(BitMap::idx_t);
49-
50-
// the virtual memory invocation will reserve and commit the entire space
51-
BitMap::bm_word_t* map = (BitMap::bm_word_t*)_vmm->initialize(raw_bytes, raw_bytes);
52-
if (map == NULL) {
53-
return false;
39+
BitSet::~BitSet() {
40+
BitMapFragment* current = _fragment_list;
41+
while (current != NULL) {
42+
BitMapFragment* next = current->next();
43+
delete current;
44+
current = next;
5445
}
55-
_bits = BitMapView(map, bits);
56-
return true;
5746
}
58-

src/hotspot/share/jfr/leakprofiler/chains/bitset.hpp

Lines changed: 65 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -26,53 +26,91 @@
2626
#define SHARE_JFR_LEAKPROFILER_CHAINS_BITSET_HPP
2727

2828
#include "memory/allocation.hpp"
29+
#include "oops/oop.hpp"
2930
#include "oops/oopsHierarchy.hpp"
30-
#include "utilities/bitMap.inline.hpp"
31+
#include "utilities/bitMap.hpp"
32+
#include "utilities/hashtable.hpp"
3133

3234
class JfrVirtualMemory;
3335
class MemRegion;
3436

3537
class BitSet : public CHeapObj<mtTracing> {
36-
private:
37-
JfrVirtualMemory* _vmm;
38-
const HeapWord* const _region_start;
39-
BitMapView _bits;
40-
const size_t _region_size;
38+
const static size_t _bitmap_granularity_shift = 26; // 64M
39+
const static size_t _bitmap_granularity_size = (size_t)1 << _bitmap_granularity_shift;
40+
const static size_t _bitmap_granularity_mask = _bitmap_granularity_size - 1;
41+
42+
class BitMapFragment;
43+
44+
class BitMapFragmentTable : public BasicHashtable<mtTracing> {
45+
class Entry : public BasicHashtableEntry<mtTracing> {
46+
public:
47+
uintptr_t _key;
48+
CHeapBitMap* _value;
49+
50+
Entry* next() {
51+
return (Entry*)BasicHashtableEntry<mtTracing>::next();
52+
}
53+
};
54+
55+
protected:
56+
Entry* bucket(int i) const;
57+
58+
Entry* new_entry(unsigned int hashValue, uintptr_t key, CHeapBitMap* value);
59+
60+
unsigned hash_segment(uintptr_t key) {
61+
unsigned hash = (unsigned)key;
62+
return hash ^ (hash >> 3);
63+
}
64+
65+
unsigned hash_to_index(unsigned hash) {
66+
return hash & (BasicHashtable<mtTracing>::table_size() - 1);
67+
}
68+
69+
public:
70+
BitMapFragmentTable(int table_size) : BasicHashtable<mtTracing>(table_size, sizeof(Entry)) {}
71+
void add(uintptr_t key, CHeapBitMap* value);
72+
CHeapBitMap** lookup(uintptr_t key);
73+
};
74+
75+
CHeapBitMap* get_fragment_bits(uintptr_t addr);
76+
77+
BitMapFragmentTable _bitmap_fragments;
78+
BitMapFragment* _fragment_list;
79+
CHeapBitMap* _last_fragment_bits;
80+
uintptr_t _last_fragment_granule;
4181

4282
public:
43-
BitSet(const MemRegion& covered_region);
83+
BitSet();
4484
~BitSet();
4585

46-
bool initialize();
86+
BitMap::idx_t addr_to_bit(uintptr_t addr) const;
4787

48-
BitMap::idx_t mark_obj(const HeapWord* addr) {
49-
const BitMap::idx_t bit = addr_to_bit(addr);
50-
_bits.set_bit(bit);
51-
return bit;
52-
}
88+
void mark_obj(uintptr_t addr);
5389

54-
BitMap::idx_t mark_obj(oop obj) {
55-
return mark_obj((HeapWord*)obj);
90+
void mark_obj(oop obj) {
91+
return mark_obj(cast_from_oop<uintptr_t>(obj));
5692
}
5793

58-
bool is_marked(const HeapWord* addr) const {
59-
return is_marked(addr_to_bit(addr));
60-
}
94+
bool is_marked(uintptr_t addr);
6195

62-
bool is_marked(oop obj) const {
63-
return is_marked((HeapWord*)obj);
96+
bool is_marked(oop obj) {
97+
return is_marked(cast_from_oop<uintptr_t>(obj));
6498
}
99+
};
65100

66-
BitMap::idx_t size() const {
67-
return _bits.size();
68-
}
101+
class BitSet::BitMapFragment : public CHeapObj<mtTracing> {
102+
CHeapBitMap _bits;
103+
BitMapFragment* _next;
104+
105+
public:
106+
BitMapFragment(uintptr_t granule, BitMapFragment* next);
69107

70-
BitMap::idx_t addr_to_bit(const HeapWord* addr) const {
71-
return pointer_delta(addr, _region_start) >> LogMinObjAlignment;
108+
BitMapFragment* next() const {
109+
return _next;
72110
}
73111

74-
bool is_marked(const BitMap::idx_t bit) const {
75-
return _bits.at(bit);
112+
CHeapBitMap* bits() {
113+
return &_bits;
76114
}
77115
};
78116

Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
/*
2+
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
3+
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4+
*
5+
* This code is free software; you can redistribute it and/or modify it
6+
* under the terms of the GNU General Public License version 2 only, as
7+
* published by the Free Software Foundation.
8+
*
9+
* This code is distributed in the hope that it will be useful, but WITHOUT
10+
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11+
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12+
* version 2 for more details (a copy is included in the LICENSE file that
13+
* accompanied this code).
14+
*
15+
* You should have received a copy of the GNU General Public License version
16+
* 2 along with this work; if not, write to the Free Software Foundation,
17+
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18+
*
19+
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20+
* or visit www.oracle.com if you need additional information or have any
21+
* questions.
22+
*
23+
*/
24+
25+
#ifndef SHARE_JFR_LEAKPROFILER_CHAINS_BITSET_INLINE_HPP
26+
#define SHARE_JFR_LEAKPROFILER_CHAINS_BITSET_INLINE_HPP
27+
28+
#include "jfr/leakprofiler/chains/bitset.hpp"
29+
#include "jfr/recorder/storage/jfrVirtualMemory.hpp"
30+
#include "memory/memRegion.hpp"
31+
#include "utilities/bitMap.inline.hpp"
32+
#include "utilities/hashtable.inline.hpp"
33+
34+
inline BitSet::BitMapFragmentTable::Entry* BitSet::BitMapFragmentTable::bucket(int i) const {
35+
return (Entry*)BasicHashtable<mtTracing>::bucket(i);
36+
}
37+
38+
inline BitSet::BitMapFragmentTable::Entry* BitSet::BitMapFragmentTable::new_entry(unsigned int hash,
39+
uintptr_t key,
40+
CHeapBitMap* value) {
41+
Entry* entry = (Entry*)BasicHashtable<mtTracing>::new_entry(hash);
42+
entry->_key = key;
43+
entry->_value = value;
44+
return entry;
45+
}
46+
47+
inline void BitSet::BitMapFragmentTable::add(uintptr_t key, CHeapBitMap* value) {
48+
unsigned hash = hash_segment(key);
49+
Entry* entry = new_entry(hash, key, value);
50+
BasicHashtable<mtTracing>::add_entry(hash_to_index(hash), entry);
51+
}
52+
53+
inline CHeapBitMap** BitSet::BitMapFragmentTable::lookup(uintptr_t key) {
54+
unsigned hash = hash_segment(key);
55+
int index = hash_to_index(hash);
56+
for (Entry* e = bucket(index); e != NULL; e = e->next()) {
57+
if (e->hash() == hash && e->_key == key) {
58+
return &(e->_value);
59+
}
60+
}
61+
return NULL;
62+
}
63+
64+
inline BitMap::idx_t BitSet::addr_to_bit(uintptr_t addr) const {
65+
return (addr & _bitmap_granularity_mask) >> LogMinObjAlignmentInBytes;
66+
}
67+
68+
inline CHeapBitMap* BitSet::get_fragment_bits(uintptr_t addr) {
69+
uintptr_t granule = addr >> _bitmap_granularity_shift;
70+
if (granule == _last_fragment_granule) {
71+
return _last_fragment_bits;
72+
}
73+
CHeapBitMap* bits = NULL;
74+
75+
CHeapBitMap** found = _bitmap_fragments.lookup(granule);
76+
if (found != NULL) {
77+
bits = *found;
78+
} else {
79+
BitMapFragment* fragment = new BitMapFragment(granule, _fragment_list);
80+
bits = fragment->bits();
81+
_fragment_list = fragment;
82+
if (_bitmap_fragments.number_of_entries() * 100 / _bitmap_fragments.table_size() > 25) {
83+
_bitmap_fragments.resize(_bitmap_fragments.table_size() * 2);
84+
}
85+
_bitmap_fragments.add(granule, bits);
86+
}
87+
88+
_last_fragment_bits = bits;
89+
_last_fragment_granule = granule;
90+
91+
return bits;
92+
}
93+
94+
inline void BitSet::mark_obj(uintptr_t addr) {
95+
CHeapBitMap* bits = get_fragment_bits(addr);
96+
const BitMap::idx_t bit = addr_to_bit(addr);
97+
bits->set_bit(bit);
98+
}
99+
100+
inline bool BitSet::is_marked(uintptr_t addr) {
101+
CHeapBitMap* bits = get_fragment_bits(addr);
102+
const BitMap::idx_t bit = addr_to_bit(addr);
103+
return bits->at(bit);
104+
}
105+
106+
#endif // SHARE_JFR_LEAKPROFILER_CHAINS_BITSET_INLINE_HPP

src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
*/
2424

2525
#include "precompiled.hpp"
26-
#include "jfr/leakprofiler/chains/bitset.hpp"
26+
#include "jfr/leakprofiler/chains/bitset.inline.hpp"
2727
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
2828
#include "jfr/leakprofiler/chains/edge.hpp"
2929
#include "jfr/leakprofiler/chains/edgeStore.hpp"

src/hotspot/share/jfr/leakprofiler/chains/pathToGcRootsOperation.cpp

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
#include "gc/shared/collectedHeap.hpp"
2727
#include "jfr/leakprofiler/leakProfiler.hpp"
2828
#include "jfr/leakprofiler/chains/bfsClosure.hpp"
29-
#include "jfr/leakprofiler/chains/bitset.hpp"
29+
#include "jfr/leakprofiler/chains/bitset.inline.hpp"
3030
#include "jfr/leakprofiler/chains/dfsClosure.hpp"
3131
#include "jfr/leakprofiler/chains/edge.hpp"
3232
#include "jfr/leakprofiler/chains/edgeQueue.hpp"
@@ -57,8 +57,8 @@ PathToGcRootsOperation::PathToGcRootsOperation(ObjectSampler* sampler, EdgeStore
5757
* Initial memory reservation: 5% of the heap OR at least 32 Mb
5858
* Commit ratio: 1 : 10 (subject to allocation granularties)
5959
*/
60-
static size_t edge_queue_memory_reservation(const MemRegion& heap_region) {
61-
const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M);
60+
static size_t edge_queue_memory_reservation() {
61+
const size_t memory_reservation_bytes = MAX2(MaxHeapSize / 20, 32*M);
6262
assert(memory_reservation_bytes >= (size_t)32*M, "invariant");
6363
return memory_reservation_bytes;
6464
}
@@ -84,17 +84,16 @@ void PathToGcRootsOperation::doit() {
8484
assert(_cutoff_ticks > 0, "invariant");
8585

8686
// The bitset used for marking is dimensioned as a function of the heap size
87-
const MemRegion heap_region = Universe::heap()->reserved_region();
88-
BitSet mark_bits(heap_region);
87+
BitSet mark_bits;
8988

9089
// The edge queue is dimensioned as a fraction of the heap size
91-
const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region);
90+
const size_t edge_queue_reservation_size = edge_queue_memory_reservation();
9291
EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size));
9392

9493
// The initialize() routines will attempt to reserve and allocate backing storage memory.
9594
// Failure to accommodate will render root chain processing impossible.
9695
// As a fallback on failure, just write out the existing samples, flat, without chains.
97-
if (!(mark_bits.initialize() && edge_queue.initialize())) {
96+
if (!edge_queue.initialize()) {
9897
log_warning(jfr)("Unable to allocate memory for root chain processing");
9998
return;
10099
}

src/hotspot/share/utilities/hashtable.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -306,6 +306,7 @@ template class BasicHashtable<mtCode>;
306306
template class BasicHashtable<mtInternal>;
307307
template class BasicHashtable<mtModule>;
308308
template class BasicHashtable<mtCompiler>;
309+
template class BasicHashtable<mtTracing>;
309310

310311
template void BasicHashtable<mtClass>::verify_table<DictionaryEntry>(char const*);
311312
template void BasicHashtable<mtModule>::verify_table<ModuleEntry>(char const*);

0 commit comments

Comments
 (0)