Skip to content
Permalink
Browse files

Automatic merge of jdk:master into master

  • Loading branch information
duke
duke committed May 20, 2020
2 parents 7cb2026 + 3d659ef commit 69753d9d1789e0356518c8686ba914bc16c9699d
Showing with 214 additions and 61 deletions.
  1. +214 −61 src/hotspot/os/windows/os_windows.cpp
@@ -2716,8 +2716,121 @@ int os::vm_allocation_granularity() {
#define MEM_LARGE_PAGES 0x20000000
#endif

static HANDLE _hProcess;
static HANDLE _hToken;
#define VirtualFreeChecked(mem, size, type) \
do { \
bool ret = VirtualFree(mem, size, type); \
assert(ret, "Failed to free memory: " PTR_FORMAT, p2i(mem)); \
} while (false)

// The number of bytes is setup to match 1 pixel and 32 bits per pixel.
static const int gdi_tiny_bitmap_width_bytes = 4;

static HBITMAP gdi_create_tiny_bitmap(void* mem) {
// The documentation for CreateBitmap states a word-alignment requirement.
STATIC_ASSERT(is_aligned_(gdi_tiny_bitmap_width_bytes, sizeof(WORD)));

// Some callers use this function to test if memory crossing separate memory
// reservations can be used. Create a height of 2 to make sure that one pixel
// ends up in the first reservation and the other in the second.
int nHeight = 2;

assert(is_aligned(mem, gdi_tiny_bitmap_width_bytes), "Incorrect alignment");

// Width is one pixel and correlates with gdi_tiny_bitmap_width_bytes.
int nWidth = 1;

// Calculate bit count - will be 32.
UINT nBitCount = gdi_tiny_bitmap_width_bytes / nWidth * BitsPerByte;

return CreateBitmap(
nWidth,
nHeight,
1, // nPlanes
nBitCount,
mem); // lpBits
}

// It has been found that some of the GDI functions fail under these two situations:
// 1) When used with large pages
// 2) When mem crosses the boundary between two separate memory reservations.
//
// This is a small test used to see if the current GDI implementation is
// susceptible to any of these problems.
static bool gdi_can_use_memory(void* mem) {
HBITMAP bitmap = gdi_create_tiny_bitmap(mem);
if (bitmap != NULL) {
DeleteObject(bitmap);
return true;
}

// Verify that the bitmap could be created with a normal page.
// If this fails, the testing method above isn't reliable.
#ifdef ASSERT
void* verify_mem = ::malloc(4 * 1024);
HBITMAP verify_bitmap = gdi_create_tiny_bitmap(verify_mem);
if (verify_bitmap == NULL) {
fatal("Couldn't create test bitmap with malloced memory");
} else {
DeleteObject(verify_bitmap);
}
::free(verify_mem);
#endif

return false;
}

// Test if GDI functions work when memory spans
// two adjacent memory reservations.
static bool gdi_can_use_split_reservation_memory(bool use_large_pages, size_t granule) {
DWORD mem_large_pages = use_large_pages ? MEM_LARGE_PAGES : 0;

// Find virtual memory range. Two granules for regions and one for alignment.
void* reserved = VirtualAlloc(NULL,
granule * 3,
MEM_RESERVE,
PAGE_NOACCESS);
if (reserved == NULL) {
// Can't proceed with test - pessimistically report false
return false;
}
VirtualFreeChecked(reserved, 0, MEM_RELEASE);

// Ensure proper alignment
void* res0 = align_up(reserved, granule);
void* res1 = (char*)res0 + granule;

// Reserve and commit the first part
void* mem0 = VirtualAlloc(res0,
granule,
MEM_RESERVE|MEM_COMMIT|mem_large_pages,
PAGE_READWRITE);
if (mem0 != res0) {
// Can't proceed with test - pessimistically report false
return false;
}

// Reserve and commit the second part
void* mem1 = VirtualAlloc(res1,
granule,
MEM_RESERVE|MEM_COMMIT|mem_large_pages,
PAGE_READWRITE);
if (mem1 != res1) {
VirtualFreeChecked(mem0, 0, MEM_RELEASE);
// Can't proceed with test - pessimistically report false
return false;
}

// Set the bitmap's bits to point one "width" bytes before, so that
// the bitmap extends across the reservation boundary.
void* bitmapBits = (char*)mem1 - gdi_tiny_bitmap_width_bytes;

bool success = gdi_can_use_memory(bitmapBits);

VirtualFreeChecked(mem1, 0, MEM_RELEASE);
VirtualFreeChecked(mem0, 0, MEM_RELEASE);

return success;
}

// Container for NUMA node list info
class NUMANodeListHolder {
@@ -2766,17 +2879,17 @@ class NUMANodeListHolder {

} numa_node_list_holder;



static size_t _large_page_size = 0;

static bool request_lock_memory_privilege() {
_hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
os::current_process_id());
HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
os::current_process_id());

bool success = false;
HANDLE hToken = NULL;
LUID luid;
if (_hProcess != NULL &&
OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
if (hProcess != NULL &&
OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &hToken) &&
LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {

TOKEN_PRIVILEGES tp;
@@ -2786,51 +2899,58 @@ static bool request_lock_memory_privilege() {

// AdjustTokenPrivileges() may return TRUE even when it couldn't change the
// privilege. Check GetLastError() too. See MSDN document.
if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
if (AdjustTokenPrivileges(hToken, false, &tp, sizeof(tp), NULL, NULL) &&
(GetLastError() == ERROR_SUCCESS)) {
return true;
success = true;
}
}

return false;
}
// Cleanup
if (hProcess != NULL) {
CloseHandle(hProcess);
}
if (hToken != NULL) {
CloseHandle(hToken);
}

static void cleanup_after_large_page_init() {
if (_hProcess) CloseHandle(_hProcess);
_hProcess = NULL;
if (_hToken) CloseHandle(_hToken);
_hToken = NULL;
return success;
}

static bool numa_interleaving_init() {
bool success = false;
bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);

// print a warning if UseNUMAInterleaving flag is specified on command line
bool warn_on_failure = use_numa_interleaving_specified;
bool warn_on_failure = !FLAG_IS_DEFAULT(UseNUMAInterleaving);

#define WARN(msg) if (warn_on_failure) { warning(msg); }

// NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);

if (numa_node_list_holder.build()) {
if (log_is_enabled(Debug, os, cpu)) {
Log(os, cpu) log;
log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
log.debug(" %d ", numa_node_list_holder.get_node_list_entry(i));
}
}
success = true;
} else {
if (!numa_node_list_holder.build()) {
WARN("Process does not cover multiple NUMA nodes.");
WARN("...Ignoring UseNUMAInterleaving flag.");
return false;
}

if (!gdi_can_use_split_reservation_memory(UseLargePages, min_interleave_granularity)) {
WARN("Windows GDI cannot handle split reservations.");
WARN("...Ignoring UseNUMAInterleaving flag.");
return false;
}
if (!success) {
if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");

if (log_is_enabled(Debug, os, cpu)) {
Log(os, cpu) log;
log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
log.debug(" %d ", numa_node_list_holder.get_node_list_entry(i));
}
}
return success;

#undef WARN

return true;
}

// this routine is used whenever we need to reserve a contiguous VA range
@@ -2951,51 +3071,84 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
return p_buf;
}



void os::large_page_init() {
if (!UseLargePages) return;

static size_t large_page_init_decide_size() {
// print a warning if any large page related flag is specified on command line
bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
!FLAG_IS_DEFAULT(LargePageSizeInBytes);
bool success = false;

#define WARN(msg) if (warn_on_failure) { warning(msg); }
if (request_lock_memory_privilege()) {
size_t s = GetLargePageMinimum();
if (s) {
#if defined(IA32) || defined(AMD64)
if (s > 4*M || LargePageSizeInBytes > 4*M) {
WARN("JVM cannot use large pages bigger than 4mb.");
} else {
#endif
if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
_large_page_size = LargePageSizeInBytes;
} else {
_large_page_size = s;
}
success = true;

if (!request_lock_memory_privilege()) {
WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
return 0;
}

size_t size = GetLargePageMinimum();
if (size == 0) {
WARN("Large page is not supported by the processor.");
return 0;
}

#if defined(IA32) || defined(AMD64)
}
if (size > 4*M || LargePageSizeInBytes > 4*M) {
WARN("JVM cannot use large pages bigger than 4mb.");
return 0;
}
#endif
} else {
WARN("Large page is not supported by the processor.");
}
} else {
WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");

if (LargePageSizeInBytes > 0 && LargePageSizeInBytes % size == 0) {
size = LargePageSizeInBytes;
}

// Now test allocating a page
void* large_page = VirtualAlloc(NULL,
size,
MEM_RESERVE|MEM_COMMIT|MEM_LARGE_PAGES,
PAGE_READWRITE);
if (large_page == NULL) {
WARN("JVM cannot allocate one single large page.");
return 0;
}

// Detect if GDI can use memory backed by large pages
if (!gdi_can_use_memory(large_page)) {
WARN("JVM cannot use large pages because of bug in Windows GDI.");
return 0;
}

// Release test page
VirtualFreeChecked(large_page, 0, MEM_RELEASE);

#undef WARN

return size;
}

void os::large_page_init() {
if (!UseLargePages) {
return;
}

_large_page_size = large_page_init_decide_size();

const size_t default_page_size = (size_t) vm_page_size();
if (success && _large_page_size > default_page_size) {
if (_large_page_size > default_page_size) {
_page_sizes[0] = _large_page_size;
_page_sizes[1] = default_page_size;
_page_sizes[2] = 0;
}

cleanup_after_large_page_init();
UseLargePages = success;
UseLargePages = _large_page_size != 0;

if (UseLargePages && UseLargePagesIndividualAllocation) {
if (!gdi_can_use_split_reservation_memory(true /* use_large_pages */, _large_page_size)) {
if (FLAG_IS_CMDLINE(UseLargePagesIndividualAllocation)) {
warning("Windows GDI cannot handle split reservations.");
warning("...Ignoring UseLargePagesIndividualAllocation flag.");
}
UseLargePagesIndividualAllocation = false;
}
}
}

int os::create_file_for_heap(const char* dir) {

0 comments on commit 69753d9

Please sign in to comment.