Skip to content
Permalink
Browse files
8213269: convert test/hotspot/jtreg/runtime/memory/RunUnitTestsConcur…
…rently to gtest

Reviewed-by: iignatyev, coleenp, stuefe
  • Loading branch information
Mikhailo Seledtsov committed Mar 8, 2021
1 parent 17853ee commit 9221540e2a3bd0e1b39297dc4c31f7dcba30e9c8
Show file tree
Hide file tree
Showing 13 changed files with 638 additions and 668 deletions.
@@ -3224,12 +3224,6 @@ int os::get_core_path(char* buffer, size_t bufferSize) {
return strlen(buffer);
}

#ifndef PRODUCT
void TestReserveMemorySpecial_test() {
// No tests available for this platform
}
#endif

bool os::start_debugging(char *buf, int buflen) {
int len = (int)strlen(buf);
char *p = &buf[len];
@@ -2718,12 +2718,6 @@ bool os::supports_map_sync() {
return false;
}

#ifndef PRODUCT
void TestReserveMemorySpecial_test() {
// No tests available for this platform
}
#endif

bool os::start_debugging(char *buf, int buflen) {
int len = (int)strlen(buf);
char *p = &buf[len];
@@ -5535,172 +5535,3 @@ void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {
st->cr();
}
}

/////////////// Unit tests ///////////////

#ifndef PRODUCT

class TestReserveMemorySpecial : AllStatic {
public:
static void small_page_write(void* addr, size_t size) {
size_t page_size = os::vm_page_size();

char* end = (char*)addr + size;
for (char* p = (char*)addr; p < end; p += page_size) {
*p = 1;
}
}

static void test_reserve_memory_special_huge_tlbfs_only(size_t size) {
if (!UseHugeTLBFS) {
return;
}

char* addr = os::Linux::reserve_memory_special_huge_tlbfs_only(size, NULL, false);

if (addr != NULL) {
small_page_write(addr, size);

os::Linux::release_memory_special_huge_tlbfs(addr, size);
}
}

static void test_reserve_memory_special_huge_tlbfs_only() {
if (!UseHugeTLBFS) {
return;
}

size_t lp = os::large_page_size();

for (size_t size = lp; size <= lp * 10; size += lp) {
test_reserve_memory_special_huge_tlbfs_only(size);
}
}

static void test_reserve_memory_special_huge_tlbfs_mixed() {
size_t lp = os::large_page_size();
size_t ag = os::vm_allocation_granularity();

// sizes to test
const size_t sizes[] = {
lp, lp + ag, lp + lp / 2, lp * 2,
lp * 2 + ag, lp * 2 - ag, lp * 2 + lp / 2,
lp * 10, lp * 10 + lp / 2
};
const int num_sizes = sizeof(sizes) / sizeof(size_t);

// For each size/alignment combination, we test three scenarios:
// 1) with req_addr == NULL
// 2) with a non-null req_addr at which we expect to successfully allocate
// 3) with a non-null req_addr which contains a pre-existing mapping, at which we
// expect the allocation to either fail or to ignore req_addr

// Pre-allocate two areas; they shall be as large as the largest allocation
// and aligned to the largest alignment we will be testing.
const size_t mapping_size = sizes[num_sizes - 1] * 2;
char* const mapping1 = (char*) ::mmap(NULL, mapping_size,
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
-1, 0);
assert(mapping1 != MAP_FAILED, "should work");

char* const mapping2 = (char*) ::mmap(NULL, mapping_size,
PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE,
-1, 0);
assert(mapping2 != MAP_FAILED, "should work");

// Unmap the first mapping, but leave the second mapping intact: the first
// mapping will serve as a value for a "good" req_addr (case 2). The second
// mapping, still intact, as "bad" req_addr (case 3).
::munmap(mapping1, mapping_size);

// Case 1
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, NULL, false);
if (p != NULL) {
assert(is_aligned(p, alignment), "must be");
small_page_write(p, size);
os::Linux::release_memory_special_huge_tlbfs(p, size);
}
}
}

// Case 2
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
char* const req_addr = align_up(mapping1, alignment);
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
if (p != NULL) {
assert(p == req_addr, "must be");
small_page_write(p, size);
os::Linux::release_memory_special_huge_tlbfs(p, size);
}
}
}

// Case 3
for (int i = 0; i < num_sizes; i++) {
const size_t size = sizes[i];
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
char* const req_addr = align_up(mapping2, alignment);
char* p = os::Linux::reserve_memory_special_huge_tlbfs_mixed(size, alignment, req_addr, false);
// as the area around req_addr contains already existing mappings, the API should always
// return NULL (as per contract, it cannot return another address)
assert(p == NULL, "must be");
}
}

::munmap(mapping2, mapping_size);

}

static void test_reserve_memory_special_huge_tlbfs() {
if (!UseHugeTLBFS) {
return;
}

test_reserve_memory_special_huge_tlbfs_only();
test_reserve_memory_special_huge_tlbfs_mixed();
}

static void test_reserve_memory_special_shm(size_t size, size_t alignment) {
if (!UseSHM) {
return;
}

char* addr = os::Linux::reserve_memory_special_shm(size, alignment, NULL, false);

if (addr != NULL) {
assert(is_aligned(addr, alignment), "Check");
assert(is_aligned(addr, os::large_page_size()), "Check");

small_page_write(addr, size);

os::Linux::release_memory_special_shm(addr, size);
}
}

static void test_reserve_memory_special_shm() {
size_t lp = os::large_page_size();
size_t ag = os::vm_allocation_granularity();

for (size_t size = ag; size < lp * 3; size += ag) {
for (size_t alignment = ag; is_aligned(size, alignment); alignment *= 2) {
test_reserve_memory_special_shm(size, alignment);
}
}
}

static void test() {
test_reserve_memory_special_huge_tlbfs();
test_reserve_memory_special_shm();
}
};

void TestReserveMemorySpecial_test() {
TestReserveMemorySpecial::test();
}

#endif
@@ -5790,58 +5790,6 @@ char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
return agent_entry_name;
}

#ifndef PRODUCT

// test the code path in reserve_memory_special() that tries to allocate memory in a single
// contiguous memory block at a particular address.
// The test first tries to find a good approximate address to allocate at by using the same
// method to allocate some memory at any address. The test then tries to allocate memory in
// the vicinity (not directly after it to avoid possible by-chance use of that location)
// This is of course only some dodgy assumption, there is no guarantee that the vicinity of
// the previously allocated memory is available for allocation. The only actual failure
// that is reported is when the test tries to allocate at a particular location but gets a
// different valid one. A NULL return value at this point is not considered an error but may
// be legitimate.
void TestReserveMemorySpecial_test() {
if (!UseLargePages) {
return;
}
// save current value of globals
bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
bool old_use_numa_interleaving = UseNUMAInterleaving;

// set globals to make sure we hit the correct code path
UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;

// do an allocation at an address selected by the OS to get a good one.
const size_t large_allocation_size = os::large_page_size() * 4;
char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
if (result == NULL) {
} else {
os::release_memory_special(result, large_allocation_size);

// allocate another page within the recently allocated memory area which seems to be a good location. At least
// we managed to get it once.
const size_t expected_allocation_size = os::large_page_size();
char* expected_location = result + os::large_page_size();
char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
if (actual_location == NULL) {
} else {
// release memory
os::release_memory_special(actual_location, expected_allocation_size);
// only now check, after releasing any memory to avoid any leaks.
assert(actual_location == expected_location,
"Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
expected_location, expected_allocation_size, actual_location);
}
}

// restore globals
UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
UseNUMAInterleaving = old_use_numa_interleaving;
}
#endif // PRODUCT

/*
All the defined signal names for Windows.

0 comments on commit 9221540

Please sign in to comment.