Skip to content
Permalink
Browse files

8233439: G1 zero_filled optimization when committing CardCountsTable …

…does not work

Reviewed-by: tschatzl, kbarrett
  • Loading branch information
kstefanj committed May 4, 2020
1 parent c7b1b1b commit cbfcae7746398fad9a71a4a8efef6f0034320f88
@@ -46,7 +46,7 @@ G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs,
_listener(NULL),
_storage(rs, used_size, page_size),
_region_granularity(region_granularity),
_commit_map(rs.size() * commit_factor / region_granularity, mtGC),
_region_commit_map(rs.size() * commit_factor / region_granularity, mtGC),
_memory_type(type) {
guarantee(is_power_of_2(page_size), "must be");
guarantee(is_power_of_2(region_granularity), "must be");
@@ -88,32 +88,40 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper {
if (AlwaysPreTouch) {
_storage.pretouch(start_page, size_in_pages, pretouch_gang);
}
_commit_map.set_range(start_idx, start_idx + num_regions);
_region_commit_map.set_range(start_idx, start_idx + num_regions);
fire_on_commit(start_idx, num_regions, zero_filled);
}

virtual void uncommit_regions(uint start_idx, size_t num_regions) {
_storage.uncommit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region);
_commit_map.clear_range(start_idx, start_idx + num_regions);
_region_commit_map.clear_range(start_idx, start_idx + num_regions);
}
};

// G1RegionToSpaceMapper implementation where the region granularity is smaller
// than the commit granularity.
// Basically, the contents of one OS page span several regions.
class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
private:
class CommitRefcountArray : public G1BiasedMappedArray<uint> {
protected:
virtual uint default_value() const { return 0; }
};

size_t _regions_per_page;

CommitRefcountArray _refcounts;
size_t region_idx_to_page_idx(uint region_idx) const {
return region_idx / _regions_per_page;
}

bool is_page_committed(size_t page_idx) {
size_t region = page_idx * _regions_per_page;
size_t region_limit = region + _regions_per_page;
// Committed if there is a bit set in the range.
return _region_commit_map.get_next_one_offset(region, region_limit) != region_limit;
}

uintptr_t region_idx_to_page_idx(uint region) const {
return region / _regions_per_page;
void numa_request_on_node(size_t page_idx) {
if (_memory_type == mtJavaHeap) {
uint region = (uint)(page_idx * _regions_per_page);
void* address = _storage.page_start(page_idx);
size_t size_in_bytes = _storage.page_size();
G1NUMA::numa()->request_memory_on_node(address, size_in_bytes, region);
}
}

public:
@@ -124,63 +132,76 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper {
size_t commit_factor,
MemoryType type) :
G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type),
_regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() {
_regions_per_page((page_size * commit_factor) / alloc_granularity) {

guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity");
_refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_up(rs.size(), page_size)), page_size);
}

virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) {
uint region_limit = (uint)(start_idx + num_regions);
assert(num_regions > 0, "Must commit at least one region");
assert(_region_commit_map.get_next_one_offset(start_idx, region_limit) == region_limit,
"Should be no committed regions in the range [%u, %u)", start_idx, region_limit);

size_t const NoPage = ~(size_t)0;

size_t first_committed = NoPage;
size_t num_committed = 0;

size_t start_page = region_idx_to_page_idx(start_idx);
size_t end_page = region_idx_to_page_idx(region_limit - 1);

bool all_zero_filled = true;
G1NUMA* numa = G1NUMA::numa();

for (uint region_idx = start_idx; region_idx < start_idx + num_regions; region_idx++) {
assert(!_commit_map.at(region_idx), "Trying to commit storage at region %u that is already committed", region_idx);
size_t page_idx = region_idx_to_page_idx(region_idx);
uint old_refcount = _refcounts.get_by_index(page_idx);

bool zero_filled = false;
if (old_refcount == 0) {
if (first_committed == NoPage) {
first_committed = page_idx;
num_committed = 1;
} else {
num_committed++;
for (size_t page = start_page; page <= end_page; page++) {
if (!is_page_committed(page)) {
// Page not committed.
if (num_committed == 0) {
first_committed = page;
}
zero_filled = _storage.commit(page_idx, 1);
if (_memory_type == mtJavaHeap) {
void* address = _storage.page_start(page_idx);
size_t size_in_bytes = _storage.page_size();
numa->request_memory_on_node(address, size_in_bytes, region_idx);
num_committed++;

if (!_storage.commit(page, 1)) {
// Found dirty region during commit.
all_zero_filled = false;
}
}
all_zero_filled &= zero_filled;

_refcounts.set_by_index(page_idx, old_refcount + 1);
_commit_map.set_bit(region_idx);
// Move memory to correct NUMA node for the heap.
numa_request_on_node(page);
} else {
// Page already committed.
all_zero_filled = false;
}
}

// Update the commit map for the given range.
_region_commit_map.set_range(start_idx, region_limit);

if (AlwaysPreTouch && num_committed > 0) {
_storage.pretouch(first_committed, num_committed, pretouch_gang);
}

fire_on_commit(start_idx, num_regions, all_zero_filled);
}

virtual void uncommit_regions(uint start_idx, size_t num_regions) {
for (uint i = start_idx; i < start_idx + num_regions; i++) {
assert(_commit_map.at(i), "Trying to uncommit storage at region %u that is not committed", i);
size_t idx = region_idx_to_page_idx(i);
uint old_refcount = _refcounts.get_by_index(idx);
assert(old_refcount > 0, "must be");
if (old_refcount == 1) {
_storage.uncommit(idx, 1);
uint region_limit = (uint)(start_idx + num_regions);
assert(num_regions > 0, "Must uncommit at least one region");
assert(_region_commit_map.get_next_zero_offset(start_idx, region_limit) == region_limit,
"Should only be committed regions in the range [%u, %u)", start_idx, region_limit);

size_t start_page = region_idx_to_page_idx(start_idx);
size_t end_page = region_idx_to_page_idx(region_limit - 1);

// Clear commit map for the given range.
_region_commit_map.clear_range(start_idx, region_limit);

for (size_t page = start_page; page <= end_page; page++) {
// We know all pages were committed before clearing the map. If the
// the page is still marked as committed after the clear we should
// not uncommit it.
if (!is_page_committed(page)) {
_storage.uncommit(page, 1);
}
_refcounts.set_by_index(idx, old_refcount - 1);
_commit_map.clear_bit(i);
}
}
};
@@ -51,7 +51,7 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {

size_t _region_granularity;
// Mapping management
CHeapBitMap _commit_map;
CHeapBitMap _region_commit_map;

MemoryType _memory_type;

@@ -68,10 +68,6 @@ class G1RegionToSpaceMapper : public CHeapObj<mtGC> {

virtual ~G1RegionToSpaceMapper() {}

bool is_committed(uintptr_t idx) const {
return _commit_map.at(idx);
}

void commit_and_set_special();
virtual void commit_regions(uint start_idx, size_t num_regions = 1, WorkGang* pretouch_workers = NULL) = 0;
virtual void uncommit_regions(uint start_idx, size_t num_regions = 1) = 0;

0 comments on commit cbfcae7

Please sign in to comment.
You can’t perform that action at this time.