Skip to content

Commit 50baaf4

Browse files
committed
8321013: Parallel: Refactor ObjectStartArray
Reviewed-by: tschatzl, sjohanss
1 parent afb8964 commit 50baaf4

11 files changed

+152
-252
lines changed

src/hotspot/share/gc/parallel/objectStartArray.cpp

+68-74
Original file line numberDiff line numberDiff line change
@@ -25,32 +25,21 @@
2525
#include "precompiled.hpp"
2626
#include "gc/parallel/objectStartArray.inline.hpp"
2727
#include "gc/shared/cardTableBarrierSet.hpp"
28-
#include "memory/allocation.inline.hpp"
2928
#include "nmt/memTracker.hpp"
3029
#include "oops/oop.inline.hpp"
3130
#include "runtime/java.hpp"
3231
#include "utilities/align.hpp"
3332

34-
uint ObjectStartArray::_card_shift = 0;
35-
uint ObjectStartArray::_card_size = 0;
36-
uint ObjectStartArray::_card_size_in_words = 0;
33+
static size_t num_bytes_required(MemRegion mr) {
34+
assert(CardTable::is_card_aligned(mr.start()), "precondition");
35+
assert(CardTable::is_card_aligned(mr.end()), "precondition");
3736

38-
void ObjectStartArray::initialize_block_size(uint card_shift) {
39-
_card_shift = card_shift;
40-
_card_size = 1 << _card_shift;
41-
_card_size_in_words = _card_size / sizeof(HeapWord);
37+
return mr.word_size() / BOTConstants::card_size_in_words();
4238
}
4339

4440
void ObjectStartArray::initialize(MemRegion reserved_region) {
45-
// We're based on the assumption that we use the same
46-
// size blocks as the card table.
47-
assert(_card_size == CardTable::card_size(), "Sanity");
48-
assert(_card_size <= MaxBlockSize, "block_size must be less than or equal to " UINT32_FORMAT, MaxBlockSize);
49-
5041
// Calculate how much space must be reserved
51-
_reserved_region = reserved_region;
52-
53-
size_t bytes_to_reserve = reserved_region.word_size() / _card_size_in_words;
42+
size_t bytes_to_reserve = num_bytes_required(reserved_region);
5443
assert(bytes_to_reserve > 0, "Sanity");
5544

5645
bytes_to_reserve =
@@ -62,91 +51,96 @@ void ObjectStartArray::initialize(MemRegion reserved_region) {
6251
if (!backing_store.is_reserved()) {
6352
vm_exit_during_initialization("Could not reserve space for ObjectStartArray");
6453
}
65-
MemTracker::record_virtual_memory_type((address)backing_store.base(), mtGC);
54+
MemTracker::record_virtual_memory_type(backing_store.base(), mtGC);
6655

6756
// We do not commit any memory initially
6857
_virtual_space.initialize(backing_store);
6958

70-
_raw_base = (jbyte*)_virtual_space.low_boundary();
71-
assert(_raw_base != nullptr, "set from the backing_store");
72-
73-
_offset_base = _raw_base - (size_t(reserved_region.start()) >> _card_shift);
74-
75-
_covered_region.set_start(reserved_region.start());
76-
_covered_region.set_word_size(0);
59+
assert(_virtual_space.low_boundary() != nullptr, "set from the backing_store");
7760

78-
_blocks_region.set_start((HeapWord*)_raw_base);
79-
_blocks_region.set_word_size(0);
61+
_offset_base = (uint8_t*)(_virtual_space.low_boundary() - (uintptr_t(reserved_region.start()) >> BOTConstants::log_card_size()));
8062
}
8163

8264
void ObjectStartArray::set_covered_region(MemRegion mr) {
83-
assert(_reserved_region.contains(mr), "MemRegion outside of reserved space");
84-
assert(_reserved_region.start() == mr.start(), "Attempt to move covered region");
85-
86-
HeapWord* low_bound = mr.start();
87-
HeapWord* high_bound = mr.end();
88-
assert((uintptr_t(low_bound) & (_card_size - 1)) == 0, "heap must start at block boundary");
89-
assert((uintptr_t(high_bound) & (_card_size - 1)) == 0, "heap must end at block boundary");
90-
91-
size_t requested_blocks_size_in_bytes = mr.word_size() / _card_size_in_words;
65+
DEBUG_ONLY(_covered_region = mr;)
9266

67+
size_t requested_size = num_bytes_required(mr);
9368
// Only commit memory in page sized chunks
94-
requested_blocks_size_in_bytes =
95-
align_up(requested_blocks_size_in_bytes, os::vm_page_size());
69+
requested_size = align_up(requested_size, os::vm_page_size());
9670

97-
_covered_region = mr;
71+
size_t current_size = _virtual_space.committed_size();
9872

99-
size_t current_blocks_size_in_bytes = _blocks_region.byte_size();
73+
if (requested_size == current_size) {
74+
return;
75+
}
10076

101-
if (requested_blocks_size_in_bytes > current_blocks_size_in_bytes) {
77+
if (requested_size > current_size) {
10278
// Expand
103-
size_t expand_by = requested_blocks_size_in_bytes - current_blocks_size_in_bytes;
79+
size_t expand_by = requested_size - current_size;
10480
if (!_virtual_space.expand_by(expand_by)) {
10581
vm_exit_out_of_memory(expand_by, OOM_MMAP_ERROR, "object start array expansion");
10682
}
107-
// Clear *only* the newly allocated region
108-
memset(_blocks_region.end(), clean_block, expand_by);
109-
}
110-
111-
if (requested_blocks_size_in_bytes < current_blocks_size_in_bytes) {
83+
} else {
11284
// Shrink
113-
size_t shrink_by = current_blocks_size_in_bytes - requested_blocks_size_in_bytes;
85+
size_t shrink_by = current_size - requested_size;
11486
_virtual_space.shrink_by(shrink_by);
11587
}
116-
117-
_blocks_region.set_word_size(requested_blocks_size_in_bytes / sizeof(HeapWord));
118-
119-
assert(requested_blocks_size_in_bytes % sizeof(HeapWord) == 0, "Block table not expanded in word sized increment");
120-
assert(requested_blocks_size_in_bytes == _blocks_region.byte_size(), "Sanity");
121-
assert(block_for_addr(low_bound) == &_raw_base[0], "Checking start of map");
122-
assert(block_for_addr(high_bound-1) <= &_raw_base[_blocks_region.byte_size()-1], "Checking end of map");
12388
}
12489

125-
void ObjectStartArray::reset() {
126-
memset(_blocks_region.start(), clean_block, _blocks_region.byte_size());
90+
static void fill_range(uint8_t* start, uint8_t* end, uint8_t v) {
91+
// + 1 for inclusive
92+
memset(start, v, pointer_delta(end, start, sizeof(uint8_t)) + 1);
12793
}
12894

129-
bool ObjectStartArray::object_starts_in_range(HeapWord* start_addr,
130-
HeapWord* end_addr) const {
131-
assert(start_addr <= end_addr,
132-
"Range is wrong. start_addr (" PTR_FORMAT ") is after end_addr (" PTR_FORMAT ")",
133-
p2i(start_addr), p2i(end_addr));
134-
135-
assert(is_aligned(start_addr, _card_size), "precondition");
136-
137-
if (start_addr == end_addr) {
138-
// No objects in empty range.
139-
return false;
95+
void ObjectStartArray::update_for_block_work(HeapWord* blk_start,
96+
HeapWord* blk_end) {
97+
HeapWord* const cur_card_boundary = align_up_by_card_size(blk_start);
98+
uint8_t* const offset_entry = entry_for_addr(cur_card_boundary);
99+
100+
// The first card holds the actual offset.
101+
*offset_entry = checked_cast<uint8_t>(pointer_delta(cur_card_boundary, blk_start));
102+
103+
// Check if this block spans over other cards.
104+
uint8_t* const end_entry = entry_for_addr(blk_end - 1);
105+
assert(offset_entry <= end_entry, "inv");
106+
107+
if (offset_entry != end_entry) {
108+
// Handling remaining entries.
109+
uint8_t* start_entry_for_region = offset_entry + 1;
110+
for (uint i = 0; i < BOTConstants::N_powers; i++) {
111+
// -1 so that the reach ends in this region and not at the start
112+
// of the next.
113+
uint8_t* reach = offset_entry + BOTConstants::power_to_cards_back(i + 1) - 1;
114+
uint8_t value = checked_cast<uint8_t>(BOTConstants::card_size_in_words() + i);
115+
116+
fill_range(start_entry_for_region, MIN2(reach, end_entry), value);
117+
start_entry_for_region = reach + 1;
118+
119+
if (reach >= end_entry) {
120+
break;
121+
}
122+
}
123+
assert(start_entry_for_region > end_entry, "Sanity check");
140124
}
141125

142-
jbyte* start_block = block_for_addr(start_addr);
143-
jbyte* end_block = block_for_addr(end_addr - 1);
126+
debug_only(verify_for_block(blk_start, blk_end);)
127+
}
144128

145-
for (jbyte* block = start_block; block <= end_block; block++) {
146-
if (*block != clean_block) {
147-
return true;
129+
void ObjectStartArray::verify_for_block(HeapWord* blk_start, HeapWord* blk_end) const {
130+
assert(is_crossing_card_boundary(blk_start, blk_end), "precondition");
131+
132+
const uint8_t* const start_entry = entry_for_addr(align_up_by_card_size(blk_start));
133+
const uint8_t* const end_entry = entry_for_addr(blk_end - 1);
134+
// Check entries in [start_entry, end_entry]
135+
assert(*start_entry < BOTConstants::card_size_in_words(), "offset entry");
136+
137+
for (const uint8_t* i = start_entry + 1; i <= end_entry; ++i) {
138+
const uint8_t prev = *(i-1);
139+
const uint8_t value = *i;
140+
if (prev != value) {
141+
assert(value >= prev, "monotonic");
142+
size_t n_cards_back = BOTConstants::entry_to_cards_back(value);
143+
assert(start_entry == (i - n_cards_back), "inv");
148144
}
149145
}
150-
151-
return false;
152146
}

src/hotspot/share/gc/parallel/objectStartArray.hpp

+31-106
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#define SHARE_GC_PARALLEL_OBJECTSTARTARRAY_HPP
2727

2828
#include "gc/parallel/psVirtualspace.hpp"
29+
#include "gc/shared/blockOffsetTable.hpp"
2930
#include "memory/allocation.hpp"
3031
#include "memory/memRegion.hpp"
3132
#include "oops/oop.hpp"
@@ -36,141 +37,65 @@
3637
//
3738

3839
class ObjectStartArray : public CHeapObj<mtGC> {
39-
friend class VerifyObjectStartArrayClosure;
40-
41-
private:
42-
PSVirtualSpace _virtual_space;
43-
MemRegion _reserved_region;
4440
// The committed (old-gen heap) virtual space this object-start-array covers.
45-
MemRegion _covered_region;
46-
MemRegion _blocks_region;
47-
jbyte* _raw_base;
48-
jbyte* _offset_base;
49-
50-
static uint _card_shift;
51-
static uint _card_size;
52-
static uint _card_size_in_words;
53-
54-
public:
55-
56-
enum BlockValueConstants {
57-
clean_block = -1
58-
};
59-
60-
// Maximum size an offset table entry can cover. This maximum is derived from that
61-
// we need an extra bit for possible offsets in the byte for backskip values, leaving 2^7 possible offsets.
62-
// Minimum object alignment is 8 bytes (2^3), so we can at most represent 2^10 offsets within a BOT value.
63-
static const uint MaxBlockSize = 1024;
64-
65-
// Initialize block size based on card size
66-
static void initialize_block_size(uint card_shift);
41+
DEBUG_ONLY(MemRegion _covered_region;)
6742

68-
static uint card_shift() {
69-
return _card_shift;
70-
}
71-
72-
static uint card_size() {
73-
return _card_size;
74-
}
75-
static uint card_size_in_words() {
76-
return _card_size_in_words;
77-
}
43+
// BOT array
44+
PSVirtualSpace _virtual_space;
7845

79-
protected:
46+
// Biased array-start of BOT array for fast heap-addr / BOT entry translation
47+
uint8_t* _offset_base;
8048

8149
// Mapping from address to object start array entry
82-
jbyte* block_for_addr(void* p) const {
50+
uint8_t* entry_for_addr(const void* const p) const {
8351
assert(_covered_region.contains(p),
8452
"out of bounds access to object start array");
85-
jbyte* result = &_offset_base[uintptr_t(p) >> _card_shift];
86-
assert(_blocks_region.contains(result),
87-
"out of bounds result in byte_for");
53+
uint8_t* result = &_offset_base[uintptr_t(p) >> BOTConstants::log_card_size()];
8854
return result;
8955
}
9056

9157
// Mapping from object start array entry to address of first word
92-
HeapWord* addr_for_block(jbyte* p) {
93-
assert(_blocks_region.contains(p),
94-
"out of bounds access to object start array");
95-
size_t delta = pointer_delta(p, _offset_base, sizeof(jbyte));
96-
HeapWord* result = (HeapWord*) (delta << _card_shift);
58+
HeapWord* addr_for_entry(const uint8_t* const p) const {
59+
size_t delta = pointer_delta(p, _offset_base, sizeof(uint8_t));
60+
HeapWord* result = (HeapWord*) (delta << BOTConstants::log_card_size());
9761
assert(_covered_region.contains(result),
9862
"out of bounds accessor from card marking array");
9963
return result;
10064
}
10165

102-
// Mapping that includes the derived offset.
103-
// If the block is clean, returns the last address in the covered region.
104-
// If the block is < index 0, returns the start of the covered region.
105-
HeapWord* offset_addr_for_block(jbyte* p) const {
106-
// We have to do this before the assert
107-
if (p < _raw_base) {
108-
return _covered_region.start();
109-
}
110-
111-
assert(_blocks_region.contains(p),
112-
"out of bounds access to object start array");
113-
114-
if (*p == clean_block) {
115-
return _covered_region.end();
116-
}
66+
static HeapWord* align_up_by_card_size(HeapWord* const addr) {
67+
return align_up(addr, BOTConstants::card_size());
68+
}
11769

118-
size_t delta = pointer_delta(p, _offset_base, sizeof(jbyte));
119-
HeapWord* result = (HeapWord*) (delta << _card_shift);
120-
result += *p;
70+
void update_for_block_work(HeapWord* blk_start, HeapWord* blk_end);
12171

122-
assert(_covered_region.contains(result),
123-
"out of bounds accessor from card marking array");
124-
125-
return result;
126-
}
72+
void verify_for_block(HeapWord* blk_start, HeapWord* blk_end) const;
12773

12874
public:
129-
130-
// This method is in lieu of a constructor, so that this class can be
131-
// embedded inline in other classes.
13275
void initialize(MemRegion reserved_region);
13376

77+
// Heap old-gen resizing
13478
void set_covered_region(MemRegion mr);
13579

136-
void reset();
137-
138-
MemRegion covered_region() { return _covered_region; }
139-
140-
#define assert_covered_region_contains(addr) \
141-
assert(_covered_region.contains(addr), \
142-
#addr " (" PTR_FORMAT ") is not in covered region [" PTR_FORMAT ", " PTR_FORMAT "]", \
143-
p2i(addr), p2i(_covered_region.start()), p2i(_covered_region.end()))
144-
145-
void allocate_block(HeapWord* p) {
146-
assert_covered_region_contains(p);
147-
jbyte* block = block_for_addr(p);
148-
HeapWord* block_base = addr_for_block(block);
149-
size_t offset = pointer_delta(p, block_base, sizeof(HeapWord*));
150-
assert(offset < 128, "Sanity");
151-
// When doing MT offsets, we can't assert this.
152-
//assert(offset > *block, "Found backwards allocation");
153-
*block = (jbyte)offset;
80+
static bool is_crossing_card_boundary(HeapWord* const blk_start,
81+
HeapWord* const blk_end) {
82+
HeapWord* cur_card_boundary = align_up_by_card_size(blk_start);
83+
// Strictly greater-than, since we check if this block *crosses* card boundary.
84+
return blk_end > cur_card_boundary;
15485
}
15586

156-
// Optimized for finding the first object that crosses into
157-
// a given block. The blocks contain the offset of the last
158-
// object in that block. Scroll backwards by one, and the first
159-
// object hit should be at the beginning of the block
160-
inline HeapWord* object_start(HeapWord* addr) const;
87+
// Returns the address of the start of the block reaching into the card containing
88+
// "addr".
89+
inline HeapWord* block_start_reaching_into_card(HeapWord* const addr) const;
16190

162-
bool is_block_allocated(HeapWord* addr) {
163-
assert_covered_region_contains(addr);
164-
jbyte* block = block_for_addr(addr);
165-
return *block != clean_block;
91+
// [blk_start, blk_end) representing a block of memory in the heap.
92+
void update_for_block(HeapWord* blk_start, HeapWord* blk_end) {
93+
if (is_crossing_card_boundary(blk_start, blk_end)) {
94+
update_for_block_work(blk_start, blk_end);
95+
}
16696
}
16797

168-
// Return true iff an object starts in
169-
// [start_addr, end_addr_aligned_up)
170-
// where
171-
// end_addr_aligned_up = align_up(end_addr, _card_size)
172-
// Precondition: start_addr is card-size aligned
173-
bool object_starts_in_range(HeapWord* start_addr, HeapWord* end_addr) const;
98+
inline HeapWord* object_start(HeapWord* const addr) const;
17499
};
175100

176101
#endif // SHARE_GC_PARALLEL_OBJECTSTARTARRAY_HPP

0 commit comments

Comments
 (0)