Skip to content
This repository has been archived by the owner. It is now read-only.
Permalink
Browse files
8245208: ZGC: Don't hold the ZPageAllocator lock while committing/unc…
…ommitting memory

Reviewed-by: eosterlund, stefank
  • Loading branch information
pliden committed Jun 9, 2020
1 parent cd16b56 commit d7e68f375cf063ea6aefc402ac71364c258a3a4e
Showing with 1,206 additions and 935 deletions.
  1. +4 −4 src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp
  2. +4 −4 src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp
  3. +24 −24 src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
  4. +14 −14 src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp
  5. +1 −1 src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp
  6. +1 −1 src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp
  7. +3 −3 src/hotspot/share/gc/z/vmStructs_z.hpp
  8. +25 −14 src/hotspot/share/gc/z/zAllocationFlags.hpp
  9. +13 −1 src/hotspot/share/gc/z/zArguments.cpp
  10. +15 −7 src/hotspot/share/gc/z/zCollectedHeap.cpp
  11. +4 −5 src/hotspot/share/gc/z/zCollectedHeap.hpp
  12. +1 −2 src/hotspot/share/gc/z/zFuture.hpp
  13. +1 −6 src/hotspot/share/gc/z/zFuture.inline.hpp
  14. +6 −27 src/hotspot/share/gc/z/zHeap.cpp
  15. +2 −10 src/hotspot/share/gc/z/zHeap.hpp
  16. +9 −4 src/hotspot/share/gc/z/zHeuristics.cpp
  17. +3 −1 src/hotspot/share/gc/z/zHeuristics.hpp
  18. +1 −2 src/hotspot/share/gc/z/zInitialize.cpp
  19. +12 −1 src/hotspot/share/gc/z/zMemory.cpp
  20. +3 −1 src/hotspot/share/gc/z/zMemory.hpp
  21. +23 −1 src/hotspot/share/gc/z/zPage.cpp
  22. +4 −5 src/hotspot/share/gc/z/zPage.hpp
  23. +6 −13 src/hotspot/share/gc/z/zPage.inline.hpp
  24. +453 −389 src/hotspot/share/gc/z/zPageAllocator.cpp
  25. +35 −27 src/hotspot/share/gc/z/zPageAllocator.hpp
  26. +110 −18 src/hotspot/share/gc/z/zPageCache.cpp
  27. +8 −18 src/hotspot/share/gc/z/zPageCache.hpp
  28. +0 −35 src/hotspot/share/gc/z/zPageCache.inline.hpp
  29. +255 −112 src/hotspot/share/gc/z/zPhysicalMemory.cpp
  30. +27 −12 src/hotspot/share/gc/z/zPhysicalMemory.hpp
  31. +16 −6 src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp
  32. +50 −29 src/hotspot/share/gc/z/zUncommitter.cpp
  33. +10 −6 src/hotspot/share/gc/z/zUncommitter.hpp
  34. +5 −5 src/hotspot/share/gc/z/zVirtualMemory.cpp
  35. +2 −2 src/hotspot/share/gc/z/zVirtualMemory.hpp
  36. +3 −10 src/hotspot/share/jfr/metadata/metadata.xml
  37. +0 −6 src/jdk.jfr/share/conf/jfr/default.jfc
  38. +0 −6 src/jdk.jfr/share/conf/jfr/profile.jfc
  39. +2 −2 test/hotspot/gtest/gc/z/test_zForwarding.cpp
  40. +46 −14 test/hotspot/gtest/gc/z/test_zPhysicalMemory.cpp
  41. +5 −11 test/hotspot/jtreg/gc/z/TestUncommit.java
  42. +0 −69 test/jdk/jdk/jfr/event/gc/detailed/TestZPageCacheFlushEvent.java
  43. +0 −6 test/jdk/jdk/jfr/event/gc/detailed/TestZUncommitEvent.java
  44. +0 −1 test/lib/jdk/test/lib/jfr/EventNames.java
@@ -93,11 +93,11 @@ bool ZPhysicalMemoryBacking::is_initialized() const {
return _initialized;
}

void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
// Does nothing
}

bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) {
bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const {
assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
assert(is_aligned(length, os::vm_page_size()), "Invalid length");

@@ -116,7 +116,7 @@ bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) {
return true;
}

size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
// Try to commit the whole region
if (commit_inner(offset, length)) {
// Success
@@ -144,7 +144,7 @@ size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
}
}

size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) {
size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
assert(is_aligned(length, os::vm_page_size()), "Invalid length");

@@ -29,17 +29,17 @@ class ZPhysicalMemoryBacking {
uintptr_t _base;
bool _initialized;

bool commit_inner(size_t offset, size_t length);
bool commit_inner(size_t offset, size_t length) const;

public:
ZPhysicalMemoryBacking(size_t max_capacity);

bool is_initialized() const;

void warn_commit_limits(size_t max) const;
void warn_commit_limits(size_t max_capacity) const;

size_t commit(size_t offset, size_t length);
size_t uncommit(size_t offset, size_t length);
size_t commit(size_t offset, size_t length) const;
size_t uncommit(size_t offset, size_t length) const;

void map(uintptr_t addr, size_t size, uintptr_t offset) const;
void unmap(uintptr_t addr, size_t size) const;
@@ -302,7 +302,7 @@ bool ZPhysicalMemoryBacking::is_initialized() const {
return _initialized;
}

void ZPhysicalMemoryBacking::warn_available_space(size_t max) const {
void ZPhysicalMemoryBacking::warn_available_space(size_t max_capacity) const {
// Note that the available space on a tmpfs or a hugetlbfs filesystem
// will be zero if no size limit was specified when it was mounted.
if (_available == 0) {
@@ -316,18 +316,18 @@ void ZPhysicalMemoryBacking::warn_available_space(size_t max) const {
// Warn if the filesystem doesn't currently have enough space available to hold
// the max heap size. The max heap size will be capped if we later hit this limit
// when trying to expand the heap.
if (_available < max) {
if (_available < max_capacity) {
log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
log_warning_p(gc)("Not enough space available on the backing filesystem to hold the current max Java heap");
log_warning_p(gc)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem accordingly "
"(available", max / M);
"(available", max_capacity / M);
log_warning_p(gc)("space is currently " SIZE_FORMAT "M). Continuing execution with the current filesystem "
"size could", _available / M);
log_warning_p(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to map memory.");
"size could", _available / M);
log_warning_p(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to commit memory.");
}
}

void ZPhysicalMemoryBacking::warn_max_map_count(size_t max) const {
void ZPhysicalMemoryBacking::warn_max_map_count(size_t max_capacity) const {
const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT;
FILE* const file = fopen(filename, "r");
if (file == NULL) {
@@ -350,24 +350,24 @@ void ZPhysicalMemoryBacking::warn_max_map_count(size_t max) const {
// However, ZGC tends to create the most mappings and dominate the total count.
// In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
// We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
const size_t required_max_map_count = (max / ZGranuleSize) * 3 * 1.2;
const size_t required_max_map_count = (max_capacity / ZGranuleSize) * 3 * 1.2;
if (actual_max_map_count < required_max_map_count) {
log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
log_warning_p(gc)("The system limit on number of memory mappings per process might be too low for the given");
log_warning_p(gc)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at",
max / M, filename);
max_capacity / M, filename);
log_warning_p(gc)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing execution "
"with the current", required_max_map_count, actual_max_map_count);
log_warning_p(gc)("limit could lead to a fatal error, due to failure to map memory.");
"with the current", required_max_map_count, actual_max_map_count);
log_warning_p(gc)("limit could lead to a premature OutOfMemoryError being thrown, due to failure to map memory.");
}
}

void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
// Warn if available space is too low
warn_available_space(max);
warn_available_space(max_capacity);

// Warn if max map count is too low
warn_max_map_count(max);
warn_max_map_count(max_capacity);
}

bool ZPhysicalMemoryBacking::is_tmpfs() const {
@@ -477,7 +477,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_compat_pwrite(size_t offset, size_t len
return 0;
}

ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) {
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) const {
// fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs
// since Linux 4.3. When fallocate(2) is not supported we emulate it using
// mmap/munmap (for hugetlbfs and tmpfs with transparent huge pages) or pwrite
@@ -491,7 +491,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t
}
}

ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t length) {
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t length) const {
const int mode = 0; // Allocate
const int res = ZSyscall::fallocate(_fd, mode, offset, length);
if (res == -1) {
@@ -503,7 +503,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t
return 0;
}

ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) {
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) const {
// Using compat mode is more efficient when allocating space on hugetlbfs.
// Note that allocating huge pages this way will only reserve them, and not
// associate them with segments of the file. We must guarantee that we at
@@ -530,7 +530,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length)
return fallocate_fill_hole_compat(offset, length);
}

ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) {
ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) const {
if (ZLargePages::is_explicit()) {
// We can only punch hole in pages that have been touched. Non-touched
// pages are only reserved, and not associated with any specific file
@@ -553,7 +553,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length
return 0;
}

ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offset, size_t length) {
ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offset, size_t length) const {
// Try first half
const size_t offset0 = offset;
const size_t length0 = align_up(length / 2, _block_size);
@@ -574,7 +574,7 @@ ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offse
return 0;
}

ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t length) {
ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t length) const {
assert(is_aligned(offset, _block_size), "Invalid offset");
assert(is_aligned(length, _block_size), "Invalid length");

@@ -590,7 +590,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t
return err;
}

bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) {
bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const {
log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
offset / M, (offset + length) / M, length / M);

@@ -627,7 +627,7 @@ static int offset_to_node(size_t offset) {
return mapping->at((int)nindex);
}

size_t ZPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t length) {
size_t ZPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t length) const {
size_t committed = 0;

// Commit one granule at a time, so that each granule
@@ -652,7 +652,7 @@ size_t ZPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t len
return committed;
}

size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) {
size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) const {
// Try to commit the whole region
if (commit_inner(offset, length)) {
// Success
@@ -680,7 +680,7 @@ size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) {
}
}

size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
if (ZNUMA::is_enabled() && !ZLargePages::is_explicit()) {
// To get granule-level NUMA interleaving when using non-large pages,
// we must explicitly interleave the memory at commit/fallocate time.
@@ -690,7 +690,7 @@ size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
return commit_default(offset, length);
}

size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) {
size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
offset / M, (offset + length) / M, length / M);

@@ -35,8 +35,8 @@ class ZPhysicalMemoryBacking {
size_t _available;
bool _initialized;

void warn_available_space(size_t max) const;
void warn_max_map_count(size_t max) const;
void warn_available_space(size_t max_capacity) const;
void warn_max_map_count(size_t max_capacity) const;

int create_mem_fd(const char* name) const;
int create_file_fd(const char* name) const;
@@ -49,26 +49,26 @@ class ZPhysicalMemoryBacking {
ZErrno fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const;
ZErrno fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const;
ZErrno fallocate_compat_pwrite(size_t offset, size_t length) const;
ZErrno fallocate_fill_hole_compat(size_t offset, size_t length);
ZErrno fallocate_fill_hole_syscall(size_t offset, size_t length);
ZErrno fallocate_fill_hole(size_t offset, size_t length);
ZErrno fallocate_punch_hole(size_t offset, size_t length);
ZErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length);
ZErrno fallocate(bool punch_hole, size_t offset, size_t length);
ZErrno fallocate_fill_hole_compat(size_t offset, size_t length) const;
ZErrno fallocate_fill_hole_syscall(size_t offset, size_t length) const;
ZErrno fallocate_fill_hole(size_t offset, size_t length) const;
ZErrno fallocate_punch_hole(size_t offset, size_t length) const;
ZErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length) const;
ZErrno fallocate(bool punch_hole, size_t offset, size_t length) const;

bool commit_inner(size_t offset, size_t length);
size_t commit_numa_interleaved(size_t offset, size_t length);
size_t commit_default(size_t offset, size_t length);
bool commit_inner(size_t offset, size_t length) const;
size_t commit_numa_interleaved(size_t offset, size_t length) const;
size_t commit_default(size_t offset, size_t length) const;

public:
ZPhysicalMemoryBacking(size_t max_capacity);

bool is_initialized() const;

void warn_commit_limits(size_t max) const;
void warn_commit_limits(size_t max_capacity) const;

size_t commit(size_t offset, size_t length);
size_t uncommit(size_t offset, size_t length);
size_t commit(size_t offset, size_t length) const;
size_t uncommit(size_t offset, size_t length) const;

void map(uintptr_t addr, size_t size, uintptr_t offset) const;
void unmap(uintptr_t addr, size_t size) const;
@@ -42,7 +42,7 @@ bool ZPhysicalMemoryBacking::is_initialized() const {
return true;
}

void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
// Does nothing
}

@@ -44,7 +44,7 @@ class ZPhysicalMemoryBacking {

bool is_initialized() const;

void warn_commit_limits(size_t max) const;
void warn_commit_limits(size_t max_capacity) const;

size_t commit(size_t offset, size_t length);
size_t uncommit(size_t offset, size_t length);
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -77,8 +77,8 @@ typedef ZAttachedArray<ZForwarding, ZForwardingEntry> ZAttachedArrayForForwardin
volatile_nonstatic_field(ZPage, _top, uintptr_t) \
\
nonstatic_field(ZPageAllocator, _max_capacity, const size_t) \
nonstatic_field(ZPageAllocator, _capacity, size_t) \
nonstatic_field(ZPageAllocator, _used, size_t) \
volatile_nonstatic_field(ZPageAllocator, _capacity, size_t) \
volatile_nonstatic_field(ZPageAllocator, _used, size_t) \
\
nonstatic_field(ZPageTable, _map, ZGranuleMapForPageTable) \
\
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,20 +31,22 @@
// Allocation flags layout
// -----------------------
//
// 7 3 2 1 0
// +----+-+-+-+-+
// |0000|1|1|1|1|
// +----+-+-+-+-+
// | | | | |
// | | | | * 0-0 Worker Thread Flag (1-bit)
// | | | |
// | | | * 1-1 Non-Blocking Flag (1-bit)
// | | |
// | | * 2-2 Relocation Flag (1-bit)
// | |
// | * 3-3 No Reserve Flag (1-bit)
// 7 4 3 2 1 0
// +---+-+-+-+-+-+
// |000|1|1|1|1|1|
// +---+-+-+-+-+-+
// | | | | | |
// | | | | | * 0-0 Worker Thread Flag (1-bit)
// | | | | |
// | | | | * 1-1 Non-Blocking Flag (1-bit)
// | | | |
// | | | * 2-2 Relocation Flag (1-bit)
// | | |
// | | * 3-3 No Reserve Flag (1-bit)
// | |
// | * 4-4 Low Address Flag (1-bit)
// |
// * 7-4 Unused (4-bits)
// * 7-5 Unused (3-bits)
//

class ZAllocationFlags {
@@ -53,6 +55,7 @@ class ZAllocationFlags {
typedef ZBitField<uint8_t, bool, 1, 1> field_non_blocking;
typedef ZBitField<uint8_t, bool, 2, 1> field_relocation;
typedef ZBitField<uint8_t, bool, 3, 1> field_no_reserve;
typedef ZBitField<uint8_t, bool, 4, 1> field_low_address;

uint8_t _flags;

@@ -76,6 +79,10 @@ class ZAllocationFlags {
_flags |= field_no_reserve::encode(true);
}

void set_low_address() {
_flags |= field_low_address::encode(true);
}

bool worker_thread() const {
return field_worker_thread::decode(_flags);
}
@@ -91,6 +98,10 @@ class ZAllocationFlags {
bool no_reserve() const {
return field_no_reserve::decode(_flags);
}

bool low_address() const {
return field_low_address::decode(_flags);
}
};

#endif // SHARE_GC_Z_ZALLOCATIONFLAGS_HPP

0 comments on commit d7e68f3

Please sign in to comment.