Skip to content

Commit

Permalink
8330076: NMT: add/make a mandatory MEMFLAGS argument to family of os:…
Browse files Browse the repository at this point in the history
…:reserve/commit/uncommit memory API

Reviewed-by: stefank, jsjolen, stuefe
  • Loading branch information
Afshin Zafari committed May 2, 2024
1 parent 9108091 commit 4036d7d
Show file tree
Hide file tree
Showing 62 changed files with 398 additions and 420 deletions.
4 changes: 2 additions & 2 deletions src/hotspot/cpu/aarch64/compressedKlass_aarch64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ static char* reserve_at_eor_compatible_address(size_t size, bool aslr) {
const uint64_t immediate = ((uint64_t)immediates[index]) << 32;
assert(immediate > 0 && Assembler::operand_valid_for_logical_immediate(/*is32*/false, immediate),
"Invalid immediate %d " UINT64_FORMAT, index, immediate);
result = os::attempt_reserve_memory_at((char*)immediate, size, false);
result = os::attempt_reserve_memory_at((char*)immediate, size, !ExecMem, mtClass);
if (result == nullptr) {
log_trace(metaspace, map)("Failed to attach at " UINT64_FORMAT_X, immediate);
}
Expand Down Expand Up @@ -112,7 +112,7 @@ char* CompressedKlassPointers::reserve_address_space_for_compressed_classes(size
if (result == nullptr) {
constexpr size_t alignment = nth_bit(32);
log_debug(metaspace, map)("Trying to reserve at a 32-bit-aligned address");
result = os::reserve_memory_aligned(size, alignment, false);
result = os::reserve_memory_aligned(size, alignment, !ExecMem, mtClass);
}

return result;
Expand Down
10 changes: 5 additions & 5 deletions src/hotspot/os/aix/os_aix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1805,7 +1805,7 @@ bool os::remove_stack_guard_pages(char* addr, size_t size) {
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
}

void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint, MEMFLAGS flag) {
}

size_t os::pd_pretouch_memory(void* first, void* last, size_t page_size) {
Expand Down Expand Up @@ -1847,7 +1847,7 @@ bool os::numa_get_group_ids_for_range(const void** addresses, int* lgrp_ids, siz
}

// Reserves and attaches a shared memory segment.
char* os::pd_reserve_memory(size_t bytes, bool exec) {
char* os::pd_reserve_memory(size_t bytes, bool exec, MEMFLAGS flag) {
// Always round to os::vm_page_size(), which may be larger than 4K.
bytes = align_up(bytes, os::vm_page_size());

Expand Down Expand Up @@ -1996,7 +1996,7 @@ void os::large_page_init() {
return; // Nothing to do. See query_multipage_support and friends.
}

char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* req_addr, bool exec) {
char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* req_addr, bool exec, MEMFLAGS flag) {
fatal("os::reserve_memory_special should not be called on AIX.");
return nullptr;
}
Expand All @@ -2015,7 +2015,7 @@ bool os::can_commit_large_page_memory() {
return false;
}

char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc, MEMFLAGS flag) {
assert(file_desc >= 0, "file_desc is not valid");
char* result = nullptr;

Expand All @@ -2033,7 +2033,7 @@ char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, i

// Reserve memory at an arbitrary address, only if that area is
// available (and not reserved for something else).
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) {
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec, MEMFLAGS flag) {
char* addr = nullptr;

// Always round to os::vm_page_size(), which may be larger than 4K.
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/os/bsd/gc/x/xPhysicalMemoryBacking_bsd.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -78,7 +78,7 @@ XPhysicalMemoryBacking::XPhysicalMemoryBacking(size_t max_capacity) :
_initialized(false) {

// Reserve address space for backing memory
_base = (uintptr_t)os::reserve_memory(max_capacity);
_base = (uintptr_t)os::reserve_memory(max_capacity, !ExecMem, mtJavaHeap);
if (_base == 0) {
// Failed
log_error_pd(gc)("Failed to reserve address space for backing memory");
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -79,7 +79,7 @@ ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity)
_initialized(false) {

// Reserve address space for backing memory
_base = (uintptr_t)os::reserve_memory(max_capacity);
_base = (uintptr_t)os::reserve_memory(max_capacity, !ExecMem, mtJavaHeap);
if (_base == 0) {
// Failed
log_error_pd(gc)("Failed to reserve address space for backing memory");
Expand Down
16 changes: 8 additions & 8 deletions src/hotspot/os/bsd/os_bsd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1668,7 +1668,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size,
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
}

void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint, MEMFLAGS flag) {
::madvise(addr, bytes, MADV_DONTNEED);
}

Expand Down Expand Up @@ -1766,13 +1766,13 @@ bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) {
}

bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
return os::commit_memory(addr, size, !ExecMem);
return os::commit_memory(addr, size, !ExecMem, mtThreadStack);
}

// If this is a growable mapping, remove the guard pages entirely by
// munmap()ping them. If not, just call uncommit_memory().
bool os::remove_stack_guard_pages(char* addr, size_t size) {
return os::uncommit_memory(addr, size);
return os::uncommit_memory(addr, size, !ExecMem, mtThreadStack);
}

// 'requested_addr' is only treated as a hint, the return value may or
Expand Down Expand Up @@ -1809,7 +1809,7 @@ static int anon_munmap(char * addr, size_t size) {
}
}

char* os::pd_reserve_memory(size_t bytes, bool exec) {
char* os::pd_reserve_memory(size_t bytes, bool exec, MEMFLAGS flag) {
return anon_mmap(nullptr /* addr */, bytes, exec);
}

Expand Down Expand Up @@ -1869,7 +1869,7 @@ void os::large_page_init() {
}


char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* req_addr, bool exec) {
char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* req_addr, bool exec, MEMFLAGS flag) {
fatal("os::reserve_memory_special should not be called on BSD.");
return nullptr;
}
Expand All @@ -1888,9 +1888,9 @@ bool os::can_commit_large_page_memory() {
return false;
}

char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc, MEMFLAGS flag) {
assert(file_desc >= 0, "file_desc is not valid");
char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem);
char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem, flag);
if (result != nullptr) {
if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == nullptr) {
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
Expand All @@ -1902,7 +1902,7 @@ char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, i
// Reserve memory at an arbitrary address, only if that area is
// available (and not reserved for something else).

char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) {
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec, MEMFLAGS flag) {
// Assert only that the size is a multiple of the page size, since
// that's all that mmap requires, and since that's all we really know
// about at this low abstraction level. If we need higher alignment,
Expand Down
26 changes: 12 additions & 14 deletions src/hotspot/os/linux/os_linux.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3023,14 +3023,14 @@ void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
}
}

void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint, MEMFLAGS flag) {
// This method works by doing an mmap over an existing mmaping and effectively discarding
// the existing pages. However it won't work for SHM-based large pages that cannot be
// uncommitted at all. We don't do anything in this case to avoid creating a segment with
// small pages on top of the SHM segment. This method always works for small pages, so we
// allow that in any case.
if (alignment_hint <= os::vm_page_size() || can_commit_large_page_memory()) {
commit_memory(addr, bytes, alignment_hint, !ExecMem);
commit_memory(addr, bytes, alignment_hint, !ExecMem, flag);
}
}

Expand Down Expand Up @@ -3637,7 +3637,7 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
}
}

return os::commit_memory(addr, size, !ExecMem);
return os::commit_memory(addr, size, !ExecMem, mtThreadStack);
}

// If this is a growable mapping, remove the guard pages entirely by
Expand All @@ -3653,7 +3653,7 @@ bool os::remove_stack_guard_pages(char* addr, size_t size) {
return ::munmap(addr, size) == 0;
}

return os::uncommit_memory(addr, size);
return os::uncommit_memory(addr, size, !ExecMem, mtThreadStack);
}

// 'requested_addr' is only treated as a hint, the return value may or
Expand Down Expand Up @@ -3757,7 +3757,7 @@ static int anon_munmap(char * addr, size_t size) {
return 1;
}

char* os::pd_reserve_memory(size_t bytes, bool exec) {
char* os::pd_reserve_memory(size_t bytes, bool exec, MEMFLAGS flag) {
return anon_mmap(nullptr, bytes);
}

Expand Down Expand Up @@ -4214,7 +4214,7 @@ static char* reserve_memory_special_huge_tlbfs(size_t bytes,
}

char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size,
char* req_addr, bool exec) {
char* req_addr, bool exec, MEMFLAGS flag) {
assert(UseLargePages, "only for large pages");

char* const addr = reserve_memory_special_huge_tlbfs(bytes, alignment, page_size, req_addr, exec);
Expand Down Expand Up @@ -4249,9 +4249,9 @@ bool os::can_commit_large_page_memory() {
return UseTransparentHugePages;
}

char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc, MEMFLAGS flag) {
assert(file_desc >= 0, "file_desc is not valid");
char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem);
char* result = pd_attempt_reserve_memory_at(requested_addr, bytes, !ExecMem, flag);
if (result != nullptr) {
if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == nullptr) {
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
Expand All @@ -4263,7 +4263,7 @@ char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, i
// Reserve memory at an arbitrary address, only if that area is
// available (and not reserved for something else).

char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec) {
char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes, bool exec, MEMFLAGS flag) {
// Assert only that the size is a multiple of the page size, since
// that's all that mmap requires, and since that's all we really know
// about at this low abstraction level. If we need higher alignment,
Expand Down Expand Up @@ -4655,23 +4655,21 @@ static void workaround_expand_exec_shield_cs_limit() {
*/
char* hint = (char*)(os::Linux::initial_thread_stack_bottom() -
(StackOverflow::stack_guard_zone_size() + page_size));
char* codebuf = os::attempt_reserve_memory_at(hint, page_size);
char* codebuf = os::attempt_reserve_memory_at(hint, page_size, !ExecMem, mtInternal);

if (codebuf == nullptr) {
// JDK-8197429: There may be a stack gap of one megabyte between
// the limit of the stack and the nearest memory region: this is a
// Linux kernel workaround for CVE-2017-1000364. If we failed to
// map our codebuf, try again at an address one megabyte lower.
hint -= 1 * M;
codebuf = os::attempt_reserve_memory_at(hint, page_size);
codebuf = os::attempt_reserve_memory_at(hint, page_size, !ExecMem, mtInternal);
}

if ((codebuf == nullptr) || (!os::commit_memory(codebuf, page_size, true))) {
if ((codebuf == nullptr) || (!os::commit_memory(codebuf, page_size, ExecMem, mtInternal))) {
return; // No matter, we tried, best effort.
}

MemTracker::record_virtual_memory_type((address)codebuf, mtInternal);

log_info(os)("[CS limit NX emulation work-around, exec code at: %p]", codebuf);

// Some code to exec: the 'ret' instruction
Expand Down
6 changes: 3 additions & 3 deletions src/hotspot/os/posix/os_posix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -395,9 +395,9 @@ static char* chop_extra_memory(size_t size, size_t alignment, char* extra_base,
// Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
// so on posix, unmap the section at the start and at the end of the chunk that we mapped
// rather than unmapping and remapping the whole chunk to get requested alignment.
char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) {
char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec, MEMFLAGS flag) {
size_t extra_size = calculate_aligned_extra_size(size, alignment);
char* extra_base = os::reserve_memory(extra_size, exec);
char* extra_base = os::reserve_memory(extra_size, exec, flag);
if (extra_base == nullptr) {
return nullptr;
}
Expand All @@ -421,7 +421,7 @@ char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int file_des
if (replace_existing_mapping_with_file_mapping(aligned_base, size, file_desc) == nullptr) {
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
}
MemTracker::record_virtual_memory_commit((address)aligned_base, size, CALLER_PC);
MemTracker::record_virtual_memory_commit((address)aligned_base, size, CALLER_PC, flag);
return aligned_base;
}

Expand Down
6 changes: 3 additions & 3 deletions src/hotspot/os/posix/perfMemory_posix.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2021 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
Expand Down Expand Up @@ -65,14 +65,14 @@ static char* backing_store_file_name = nullptr; // name of the backing store
static char* create_standard_memory(size_t size) {

// allocate an aligned chuck of memory
char* mapAddress = os::reserve_memory(size);
char* mapAddress = os::reserve_memory(size, !ExecMem, mtInternal);

if (mapAddress == nullptr) {
return nullptr;
}

// commit memory
if (!os::commit_memory(mapAddress, size, !ExecMem)) {
if (!os::commit_memory(mapAddress, size, !ExecMem, mtInternal)) {
if (PrintMiscellaneous && Verbose) {
warning("Could not commit PerfData memory\n");
}
Expand Down
Loading

1 comment on commit 4036d7d

@openjdk-notifier
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.