Skip to content
Permalink
Browse files
8261527: Record page size used for underlying mapping in ReservedSpace
Reviewed-by: rkennke, iwalulya
  • Loading branch information
kstefanj committed May 4, 2021
1 parent 8e071c4 commit 141cc2f2a35abdce48397071e2ce7ea862cf5755
@@ -341,7 +341,7 @@ size_t ArchiveBuilder::estimate_archive_size() {

address ArchiveBuilder::reserve_buffer() {
size_t buffer_size = estimate_archive_size();
ReservedSpace rs(buffer_size, MetaspaceShared::core_region_alignment(), false);
ReservedSpace rs(buffer_size, MetaspaceShared::core_region_alignment(), os::vm_page_size());
if (!rs.is_reserved()) {
log_error(cds)("Failed to reserve " SIZE_FORMAT " bytes of output buffer.", buffer_size);
vm_direct_exit(0);
@@ -1220,7 +1220,7 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
// Get the simple case out of the way first:
// no compressed class space, simple allocation.
archive_space_rs = ReservedSpace(archive_space_size, archive_space_alignment,
false /* bool large */, (char*)base_address);
os::vm_page_size(), (char*)base_address);
if (archive_space_rs.is_reserved()) {
assert(base_address == NULL ||
(address)archive_space_rs.base() == base_address, "Sanity");
@@ -1269,9 +1269,9 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
// via sequential file IO.
address ccs_base = base_address + archive_space_size + gap_size;
archive_space_rs = ReservedSpace(archive_space_size, archive_space_alignment,
false /* large */, (char*)base_address);
os::vm_page_size(), (char*)base_address);
class_space_rs = ReservedSpace(class_space_size, class_space_alignment,
false /* large */, (char*)ccs_base);
os::vm_page_size(), (char*)ccs_base);
}
if (!archive_space_rs.is_reserved() || !class_space_rs.is_reserved()) {
release_reserved_spaces(total_space_rs, archive_space_rs, class_space_rs);
@@ -1280,7 +1280,7 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
} else {
if (use_archive_base_addr && base_address != nullptr) {
total_space_rs = ReservedSpace(total_range_size, archive_space_alignment,
false /* bool large */, (char*) base_address);
os::vm_page_size(), (char*) base_address);
} else {
// Reserve at any address, but leave it up to the platform to choose a good one.
total_space_rs = Metaspace::reserve_address_space_for_compressed_classes(total_range_size);
@@ -336,7 +336,7 @@ ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
const size_t rs_ps = page_size();
const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity());
const size_t rs_size = align_up(size, rs_align);
ReservedCodeSpace rs(rs_size, rs_align, rs_ps > (size_t) os::vm_page_size());
ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
if (!rs.is_reserved()) {
vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
rs_size/K));
@@ -1497,7 +1497,7 @@ G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* des
size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
// Allocate a new reserved space, preferring to use large pages.
ReservedSpace rs(size, preferred_page_size);
size_t page_size = ReservedSpace::actual_reserved_page_size(rs);
size_t page_size = rs.page_size();
G1RegionToSpaceMapper* result =
G1RegionToSpaceMapper::create_mapper(rs,
size,
@@ -1589,7 +1589,7 @@ jint G1CollectedHeap::initialize() {
_hot_card_cache = new G1HotCardCache(this);

// Create space mappers.
size_t page_size = ReservedSpace::actual_reserved_page_size(heap_rs);
size_t page_size = heap_rs.page_size();
G1RegionToSpaceMapper* heap_storage =
G1RegionToSpaceMapper::create_mapper(heap_rs,
heap_rs.size(),
@@ -49,8 +49,8 @@ ParMarkBitMap::initialize(MemRegion covered_region)

const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
MAX2(page_sz, granularity);
ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0);
const size_t used_page_sz = ReservedSpace::actual_reserved_page_size(rs);
ReservedSpace rs(_reserved_byte_size, rs_align, page_sz);
const size_t used_page_sz = rs.page_size();
os::trace_page_sizes("Mark Bitmap", raw_bytes, raw_bytes, used_page_sz,
rs.base(), rs.size());

@@ -748,7 +748,7 @@ void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
void ParallelScavengeHeap::trace_actual_reserved_page_size(const size_t reserved_heap_size, const ReservedSpace rs) {
// Check if Info level is enabled, since os::trace_page_sizes() logs on Info level.
if(log_is_enabled(Info, pagesize)) {
const size_t page_size = ReservedSpace::actual_reserved_page_size(rs);
const size_t page_size = rs.page_size();
os::trace_page_sizes("Heap",
MinHeapSize,
reserved_heap_size,
@@ -446,7 +446,7 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size)

const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
MAX2(page_sz, granularity);
ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0);
ReservedSpace rs(_reserved_byte_size, rs_align, page_sz);
os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, page_sz, rs.base(),
rs.size());

@@ -78,7 +78,7 @@ void CardTable::initialize() {

const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
MAX2(_page_size, (size_t) os::vm_allocation_granularity());
ReservedSpace heap_rs(_byte_map_size, rs_align, false);
ReservedSpace heap_rs(_byte_map_size, rs_align, _page_size);

MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);

@@ -172,7 +172,7 @@ ReservedHeapSpace GenCollectedHeap::allocate(size_t alignment) {
SIZE_FORMAT, total_reserved, alignment);

ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
size_t used_page_size = ReservedSpace::actual_reserved_page_size(heap_rs);
size_t used_page_size = heap_rs.page_size();

os::trace_page_sizes("Heap",
MinHeapSize,
@@ -298,7 +298,7 @@ jint ShenandoahHeap::initialize() {
for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
char* req_addr = (char*)addr;
assert(is_aligned(req_addr, cset_align), "Should be aligned");
ReservedSpace cset_rs(cset_size, cset_align, false, req_addr);
ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr);
if (cset_rs.is_reserved()) {
assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
@@ -307,7 +307,7 @@ jint ShenandoahHeap::initialize() {
}

if (_collection_set == NULL) {
ReservedSpace cset_rs(cset_size, cset_align, false);
ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
}
}
@@ -104,7 +104,7 @@ bool JfrVirtualMemorySegment::initialize(size_t reservation_size_request_bytes)
assert(is_aligned(reservation_size_request_bytes, os::vm_allocation_granularity()), "invariant");
_rs = ReservedSpace(reservation_size_request_bytes,
os::vm_allocation_granularity(),
UseLargePages && os::can_commit_large_page_memory());
os::vm_page_size());
if (!_rs.is_reserved()) {
return false;
}
@@ -207,7 +207,7 @@ bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_s
_log2_segment_size = exact_log2(segment_size);

// Reserve and initialize space for _memory.
const size_t page_size = ReservedSpace::actual_reserved_page_size(rs);
const size_t page_size = rs.page_size();
const size_t granularity = os::vm_allocation_granularity();
const size_t c_size = align_up(committed_size, page_size);
assert(c_size <= rs.size(), "alignment made committed size to large");
@@ -563,7 +563,7 @@ ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t siz
assert(CompressedKlassPointers::is_valid_base(a), "Sanity");
while (a < search_ranges[i].to) {
ReservedSpace rs(size, Metaspace::reserve_alignment(),
false /*large_pages*/, (char*)a);
os::vm_page_size(), (char*)a);
if (rs.is_reserved()) {
assert(a == (address)rs.base(), "Sanity");
return rs;
@@ -579,7 +579,7 @@ ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t siz
return ReservedSpace();
#else
// Default implementation: Just reserve anywhere.
return ReservedSpace(size, Metaspace::reserve_alignment(), false, (char*)NULL);
return ReservedSpace(size, Metaspace::reserve_alignment(), os::vm_page_size(), (char*)NULL);
#endif // AARCH64
}

@@ -717,7 +717,7 @@ void Metaspace::global_initialize() {
if (base != NULL) {
if (CompressedKlassPointers::is_valid_base(base)) {
rs = ReservedSpace(size, Metaspace::reserve_alignment(),
false /* large */, (char*)base);
os::vm_page_size(), (char*)base);
}
}

@@ -71,7 +71,7 @@ MetaspaceTestContext::MetaspaceTestContext(const char* name, size_t commit_limit
reserve_limit, Metaspace::reserve_alignment_words());
if (reserve_limit > 0) {
// have reserve limit -> non-expandable context
_rs = ReservedSpace(reserve_limit * BytesPerWord, Metaspace::reserve_alignment(), false);
_rs = ReservedSpace(reserve_limit * BytesPerWord, Metaspace::reserve_alignment(), os::vm_page_size());
_context = MetaspaceContext::create_nonexpandable_context(name, _rs, &_commit_limiter);
} else {
// no reserve limit -> expandable vslist
@@ -244,8 +244,7 @@ VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size,
DEBUG_ONLY(assert_is_aligned(word_size, chunklevel::MAX_CHUNK_WORD_SIZE);)
ReservedSpace rs(word_size * BytesPerWord,
Settings::virtual_space_node_reserve_alignment_words() * BytesPerWord,
false // large
);
os::vm_page_size());
if (!rs.is_reserved()) {
vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace");
}
@@ -826,13 +826,17 @@ ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
"heap size is too big for compressed oops");

bool use_large_pages = UseLargePages && is_aligned(alignment, os::large_page_size());
assert(!UseLargePages
|| UseParallelGC
|| use_large_pages, "Wrong alignment to use large pages");
size_t page_size = os::vm_page_size();
if (UseLargePages && is_aligned(alignment, os::large_page_size())) {
page_size = os::large_page_size();
} else {
// Parallel is the only collector that might opt out of using large pages
// for the heap.
assert(!UseLargePages || UseParallelGC , "Wrong alignment to use large pages");
}

// Now create the space.
ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, AllocateHeapAt);
ReservedHeapSpace total_rs(total_reserved, alignment, page_size, AllocateHeapAt);

if (total_rs.is_reserved()) {
assert((total_reserved == total_rs.size()) && ((uintptr_t)total_rs.base() % alignment == 0),
@@ -858,7 +862,7 @@ ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {

// satisfy compiler
ShouldNotReachHere();
return ReservedHeapSpace(0, 0, false);
return ReservedHeapSpace(0, 0, os::vm_page_size());
}

OopStorage* Universe::vm_weak() {

0 comments on commit 141cc2f

Please sign in to comment.