Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8252973: ZGC: Implement Large Pages support on Windows #1184

Closed
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
@@ -29,7 +29,11 @@
#include <sys/mman.h>
#include <sys/types.h>

void ZVirtualMemoryManager::pd_initialize() {
void ZVirtualMemoryManager::pd_initialize_before_reserve() {
// Does nothing
}

void ZVirtualMemoryManager::pd_initialize_after_reserve() {
// Does nothing
}

@@ -22,8 +22,19 @@
*/

#include "precompiled.hpp"
#include "gc/shared/gcLogPrecious.hpp"
#include "gc/z/zLargePages.hpp"
#include "gc/z/zSyscall_windows.hpp"
#include "runtime/globals.hpp"

void ZLargePages::pd_initialize() {
if (UseLargePages) {
if (ZSyscall::is_large_pages_supported()) {
_state = Explicit;
return;
}
log_info_p(gc, init)("Shared large pages not supported on this OS version");
}

_state = Disabled;
}
@@ -199,6 +199,62 @@ void ZMapper::close_paging_file_mapping(HANDLE file_handle) {
}
}

HANDLE ZMapper::create_shared_awe_section() {
MEM_EXTENDED_PARAMETER parameter = { 0 };
parameter.Type = MemSectionExtendedParameterUserPhysicalFlags;
parameter.ULong64 = 0;

HANDLE section = ZSyscall::CreateFileMapping2(
INVALID_HANDLE_VALUE, // File
NULL, // SecurityAttributes
SECTION_MAP_READ | SECTION_MAP_WRITE, // DesiredAccess
PAGE_READWRITE, // PageProtection
SEC_RESERVE | SEC_LARGE_PAGES, // AllocationAttributes
0, // MaximumSize
NULL, // Name
&parameter, // ExtendedParameters
1 // ParameterCount
);

if (section == NULL) {
fatal("Could not create shared AWE section (%d)", GetLastError());
}

return section;
}

uintptr_t ZMapper::reserve_for_shared_awe(HANDLE awe_section, uintptr_t addr, size_t size) {
MEM_EXTENDED_PARAMETER parameter = { 0 };
parameter.Type = MemExtendedParameterUserPhysicalHandle;
parameter.Handle = awe_section;

void* const res = ZSyscall::VirtualAlloc2(
GetCurrentProcess(), // Process
(void*)addr, // BaseAddress
size, // Size
MEM_RESERVE | MEM_PHYSICAL, // AllocationType
PAGE_READWRITE, // PageProtection
&parameter, // ExtendedParameters
1 // ParameterCount
);

// Caller responsible for error handling
return (uintptr_t)res;
}

void ZMapper::unreserve_for_shared_awe(uintptr_t addr, size_t size) {
bool res = VirtualFree(
(void*)addr, // lpAddress
0, // dwSize
MEM_RELEASE // dwFreeType
);

if (!res) {
fatal("Failed to unreserve memory: " PTR_FORMAT " " SIZE_FORMAT "M (%d)",
addr, size / M, GetLastError());
}
}

void ZMapper::split_placeholder(uintptr_t addr, size_t size) {
const bool res = VirtualFree(
(void*)addr, // lpAddress
@@ -59,6 +59,15 @@ class ZMapper : public AllStatic {
// Close paging file mapping
static void close_paging_file_mapping(HANDLE file_handle);

// Create a shared AWE section
static HANDLE create_shared_awe_section();

// Reserve memory attached to the shared AWE section
static uintptr_t reserve_for_shared_awe(HANDLE awe_section, uintptr_t addr, size_t size);

// Unreserve memory attached to a shared AWE section
static void unreserve_for_shared_awe(uintptr_t addr, size_t size);

// Split a placeholder
//
// A view can only replace an entire placeholder, so placeholders need to be
@@ -24,98 +24,229 @@
#include "precompiled.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zGranuleMap.inline.hpp"
#include "gc/z/zLargePages.inline.hpp"
#include "gc/z/zMapper_windows.hpp"
#include "gc/z/zPhysicalMemoryBacking_windows.hpp"
#include "logging/log.hpp"
#include "runtime/globals.hpp"
#include "utilities/debug.hpp"

class ZPhysicalMemoryBackingImpl : public CHeapObj<mtGC> {
public:
virtual size_t commit(size_t offset, size_t size) = 0;
virtual size_t uncommit(size_t offset, size_t size) = 0;
virtual void map(uintptr_t addr, size_t size, size_t offset) const = 0;
virtual void unmap(uintptr_t addr, size_t size) const = 0;
};

// Implements small pages (paged) support using placeholder reservation.
//
// The backing commits and uncommits physical memory, that can be
// multi-mapped into the virtual address space. To support fine-graned
// committing and uncommitting, each ZGranuleSize'd chunk is mapped to
// a separate paging file mapping.

ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity) :
_handles(max_capacity) {}
class ZPhysicalMemoryBackingSmallPages : public ZPhysicalMemoryBackingImpl {
private:
ZGranuleMap<HANDLE> _handles;

bool ZPhysicalMemoryBacking::is_initialized() const {
return true;
}
HANDLE get_handle(uintptr_t offset) const {
HANDLE const handle = _handles.get(offset);
assert(handle != 0, "Should be set");
return handle;
}

void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
// Does nothing
}
void put_handle(uintptr_t offset, HANDLE handle) {
assert(handle != INVALID_HANDLE_VALUE, "Invalid handle");
assert(_handles.get(offset) == 0, "Should be cleared");
_handles.put(offset, handle);
}

HANDLE ZPhysicalMemoryBacking::get_handle(uintptr_t offset) const {
HANDLE const handle = _handles.get(offset);
assert(handle != 0, "Should be set");
return handle;
}
void clear_handle(uintptr_t offset) {
assert(_handles.get(offset) != 0, "Should be set");
_handles.put(offset, 0);
}

void ZPhysicalMemoryBacking::put_handle(uintptr_t offset, HANDLE handle) {
assert(handle != INVALID_HANDLE_VALUE, "Invalid handle");
assert(_handles.get(offset) == 0, "Should be cleared");
_handles.put(offset, handle);
}
public:
ZPhysicalMemoryBackingSmallPages(size_t max_capacity) :
ZPhysicalMemoryBackingImpl(),
_handles(max_capacity) {}

void ZPhysicalMemoryBacking::clear_handle(uintptr_t offset) {
assert(_handles.get(offset) != 0, "Should be set");
_handles.put(offset, 0);
}
size_t commit(size_t offset, size_t size) {
for (size_t i = 0; i < size; i += ZGranuleSize) {
HANDLE const handle = ZMapper::create_and_commit_paging_file_mapping(ZGranuleSize);
if (handle == 0) {
return i;
}

size_t ZPhysicalMemoryBacking::commit_from_paging_file(size_t offset, size_t size) {
for (size_t i = 0; i < size; i += ZGranuleSize) {
HANDLE const handle = ZMapper::create_and_commit_paging_file_mapping(ZGranuleSize);
if (handle == 0) {
return i;
put_handle(offset + i, handle);
}

put_handle(offset + i, handle);
return size;
}

return size;
}
size_t uncommit(size_t offset, size_t size) {
for (size_t i = 0; i < size; i += ZGranuleSize) {
HANDLE const handle = get_handle(offset + i);
clear_handle(offset + i);
ZMapper::close_paging_file_mapping(handle);
}

return size;
}

void map(uintptr_t addr, size_t size, size_t offset) const {
assert(is_aligned(offset, ZGranuleSize), "Misaligned");
assert(is_aligned(addr, ZGranuleSize), "Misaligned");
assert(is_aligned(size, ZGranuleSize), "Misaligned");

for (size_t i = 0; i < size; i += ZGranuleSize) {
HANDLE const handle = get_handle(offset + i);
ZMapper::map_view_replace_placeholder(handle, 0 /* offset */, addr + i, ZGranuleSize);
}
}

void unmap(uintptr_t addr, size_t size) const {
assert(is_aligned(addr, ZGranuleSize), "Misaligned");
assert(is_aligned(size, ZGranuleSize), "Misaligned");

for (size_t i = 0; i < size; i += ZGranuleSize) {
ZMapper::unmap_view_preserve_placeholder(addr + i, ZGranuleSize);
}
}
};

// Implements Large Pages (locked) support using shared AWE physical memory.
//
// Shared AWE physical memory also works with small pages, but it has
// a few drawbacks that makes it a no-go to use it at this point:
//
// 1) It seems to use 8 bytes of committed memory per *reserved* memory.
// Given our scheme to use a large address space range this turns out to
// use too much memory.
//
// 2) It requires memory locking privilages, even for small pages. This
// has always been a requirement for large pages, and would be an extra
// restriction for usage with small pages.
//
// Note: The large pages size is tied to our ZGranuleSize.

extern HANDLE ZAWESection;

class ZPhysicalMemoryBackingLargePages : public ZPhysicalMemoryBackingImpl {
private:
ULONG_PTR* const _page_array;

static ULONG_PTR* alloc_page_array(size_t max_capacity) {
const size_t npages = max_capacity / ZGranuleSize;
const size_t array_size = npages * sizeof(ULONG_PTR);

return (ULONG_PTR*)os::malloc(array_size, mtGC);
}

public:
ZPhysicalMemoryBackingLargePages(size_t max_capacity) :
ZPhysicalMemoryBackingImpl(),
_page_array(alloc_page_array(max_capacity)) {}

size_t commit(size_t offset, size_t size) {
const size_t index = offset >> ZGranuleSizeShift;
const size_t npages = size >> ZGranuleSizeShift;

size_t npages_res = npages;
const bool res = AllocateUserPhysicalPages(ZAWESection, &npages_res, &_page_array[index]);
if (!res) {
fatal("Failed to allocate physical memory " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)",
size / M, offset, GetLastError());
} else {
log_debug(gc)("Allocated physical memory: " SIZE_FORMAT "M @ " PTR_FORMAT, size / M, offset);
}

size_t ZPhysicalMemoryBacking::uncommit_from_paging_file(size_t offset, size_t size) {
for (size_t i = 0; i < size; i += ZGranuleSize) {
HANDLE const handle = get_handle(offset + i);
clear_handle(offset + i);
ZMapper::close_paging_file_mapping(handle);
// AllocateUserPhysicalPages might not be able to allocate the requested amount of memory.
// The allocated number of pages are written in npages_res.
return npages_res << ZGranuleSizeShift;
}

return size;
size_t uncommit(size_t offset, size_t size) {
const size_t index = offset >> ZGranuleSizeShift;
const size_t npages = size >> ZGranuleSizeShift;

size_t npages_res = npages;
const bool res = FreeUserPhysicalPages(ZAWESection, &npages_res, &_page_array[index]);
if (!res) {
fatal("Failed to uncommit physical memory " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)",
size, offset, GetLastError());
}

return npages_res << ZGranuleSizeShift;
}

void map(uintptr_t addr, size_t size, size_t offset) const {
const size_t npages = size >> ZGranuleSizeShift;
const size_t index = offset >> ZGranuleSizeShift;

const bool res = MapUserPhysicalPages((char*)addr, npages, &_page_array[index]);
if (!res) {
fatal("Failed to map view " PTR_FORMAT " " SIZE_FORMAT "M @ " PTR_FORMAT " (%d)",
addr, size / M, offset, GetLastError());
}
}

void unmap(uintptr_t addr, size_t size) const {
const size_t npages = size >> ZGranuleSizeShift;

const bool res = MapUserPhysicalPages((char*)addr, npages, NULL);
if (!res) {
fatal("Failed to unmap view " PTR_FORMAT " " SIZE_FORMAT "M (%d)",
addr, size / M, GetLastError());
}
}
};

static ZPhysicalMemoryBackingImpl* select_impl(size_t max_capacity) {
if (ZLargePages::is_enabled()) {
return new ZPhysicalMemoryBackingLargePages(max_capacity);
}

return new ZPhysicalMemoryBackingSmallPages(max_capacity);
}

ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity) :
_impl(select_impl(max_capacity)) {}

bool ZPhysicalMemoryBacking::is_initialized() const {
return true;
}

void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
// Does nothing
}

size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
offset / M, (offset + length) / M, length / M);

return commit_from_paging_file(offset, length);
return _impl->commit(offset, length);
}

size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) {
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
offset / M, (offset + length) / M, length / M);

return uncommit_from_paging_file(offset, length);
return _impl->uncommit(offset, length);
}

void ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, size_t offset) const {
assert(is_aligned(offset, ZGranuleSize), "Misaligned");
assert(is_aligned(addr, ZGranuleSize), "Misaligned");
assert(is_aligned(size, ZGranuleSize), "Misaligned");
assert(is_aligned(offset, ZGranuleSize), "Misaligned: " PTR_FORMAT, offset);
assert(is_aligned(addr, ZGranuleSize), "Misaligned: " PTR_FORMAT, addr);
assert(is_aligned(size, ZGranuleSize), "Misaligned: " PTR_FORMAT, size);

for (size_t i = 0; i < size; i += ZGranuleSize) {
HANDLE const handle = get_handle(offset + i);
ZMapper::map_view_replace_placeholder(handle, 0 /* offset */, addr + i, ZGranuleSize);
}
_impl->map(addr, size, offset);
}

void ZPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const {
assert(is_aligned(addr, ZGranuleSize), "Misaligned");
assert(is_aligned(size, ZGranuleSize), "Misaligned");

for (size_t i = 0; i < size; i += ZGranuleSize) {
ZMapper::unmap_view_preserve_placeholder(addr + i, ZGranuleSize);
}
_impl->unmap(addr, size);
}