Skip to content
Permalink
Browse files
8276055: ZGC: Defragment address space
Reviewed-by: eosterlund, stefank
  • Loading branch information
pliden committed Oct 28, 2021
1 parent d9b0138 commit 1750a6e2c06960b734f646018fc99b336bd966a5
Showing 8 changed files with 80 additions and 17 deletions.
@@ -85,7 +85,19 @@ void ZMemoryManager::register_callbacks(const Callbacks& callbacks) {
_callbacks = callbacks;
}

uintptr_t ZMemoryManager::alloc_from_front(size_t size) {
uintptr_t ZMemoryManager::peek_low_address() const {
ZLocker<ZLock> locker(&_lock);

const ZMemory* const area = _freelist.first();
if (area != NULL) {
return area->start();
}

// Out of memory
return UINTPTR_MAX;
}

uintptr_t ZMemoryManager::alloc_low_address(size_t size) {
ZLocker<ZLock> locker(&_lock);

ZListIterator<ZMemory> iter(&_freelist);
@@ -110,7 +122,7 @@ uintptr_t ZMemoryManager::alloc_from_front(size_t size) {
return UINTPTR_MAX;
}

uintptr_t ZMemoryManager::alloc_from_front_at_most(size_t size, size_t* allocated) {
uintptr_t ZMemoryManager::alloc_low_address_at_most(size_t size, size_t* allocated) {
ZLocker<ZLock> locker(&_lock);

ZMemory* area = _freelist.first();
@@ -136,7 +148,7 @@ uintptr_t ZMemoryManager::alloc_from_front_at_most(size_t size, size_t* allocate
return UINTPTR_MAX;
}

uintptr_t ZMemoryManager::alloc_from_back(size_t size) {
uintptr_t ZMemoryManager::alloc_high_address(size_t size) {
ZLocker<ZLock> locker(&_lock);

ZListReverseIterator<ZMemory> iter(&_freelist);
@@ -160,7 +172,7 @@ uintptr_t ZMemoryManager::alloc_from_back(size_t size) {
return UINTPTR_MAX;
}

uintptr_t ZMemoryManager::alloc_from_back_at_most(size_t size, size_t* allocated) {
uintptr_t ZMemoryManager::alloc_high_address_at_most(size_t size, size_t* allocated) {
ZLocker<ZLock> locker(&_lock);

ZMemory* area = _freelist.last();
@@ -66,7 +66,7 @@ class ZMemoryManager {
};

private:
ZLock _lock;
mutable ZLock _lock;
ZList<ZMemory> _freelist;
Callbacks _callbacks;

@@ -82,10 +82,11 @@ class ZMemoryManager {

void register_callbacks(const Callbacks& callbacks);

uintptr_t alloc_from_front(size_t size);
uintptr_t alloc_from_front_at_most(size_t size, size_t* allocated);
uintptr_t alloc_from_back(size_t size);
uintptr_t alloc_from_back_at_most(size_t size, size_t* allocated);
uintptr_t peek_low_address() const;
uintptr_t alloc_low_address(size_t size);
uintptr_t alloc_low_address_at_most(size_t size, size_t* allocated);
uintptr_t alloc_high_address(size_t size);
uintptr_t alloc_high_address_at_most(size_t size, size_t* allocated);

void free(uintptr_t start, size_t size);
};
@@ -48,6 +48,7 @@

static const ZStatCounter ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond);
static const ZStatCounter ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond);
static const ZStatCounter ZCounterDefragment("Memory", "Defragment", ZStatUnitOpsPerSecond);
static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall");

enum ZPageAllocationStall {
@@ -559,12 +560,43 @@ ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) {
return new ZPage(allocation->type(), vmem, pmem);
}

static bool is_alloc_satisfied(ZPageAllocation* allocation) {
bool ZPageAllocator::should_defragment(const ZPage* page) const {
// A small page can end up at a high address (second half of the address space)
// if we've split a larger page or we have a constrained address space. To help
// fight address space fragmentation we remap such pages to a lower address, if
// a lower address is available.
return page->type() == ZPageTypeSmall &&
page->start() >= _virtual.reserved() / 2 &&
page->start() > _virtual.lowest_available_address();
}

bool ZPageAllocator::is_alloc_satisfied(ZPageAllocation* allocation) const {
// The allocation is immediately satisfied if the list of pages contains
// exactly one page, with the type and size that was requested.
return allocation->pages()->size() == 1 &&
allocation->pages()->first()->type() == allocation->type() &&
allocation->pages()->first()->size() == allocation->size();
// exactly one page, with the type and size that was requested. However,
// even if the allocation is immediately satisfied we might still want to
// return false here to force the page to be remapped to fight address
// space fragmentation.

if (allocation->pages()->size() != 1) {
// Not a single page
return false;
}

const ZPage* const page = allocation->pages()->first();
if (page->type() != allocation->type() ||
page->size() != allocation->size()) {
// Wrong type or size
return false;
}

if (should_defragment(page)) {
// Defragment address space
ZStatInc(ZCounterDefragment);
return false;
}

// Allocation immediately satisfied
return true;
}

ZPage* ZPageAllocator::alloc_page_finalize(ZPageAllocation* allocation) {
@@ -89,6 +89,8 @@ class ZPageAllocator {
bool alloc_page_common(ZPageAllocation* allocation);
bool alloc_page_stall(ZPageAllocation* allocation);
bool alloc_page_or_stall(ZPageAllocation* allocation);
bool should_defragment(const ZPage* page) const;
bool is_alloc_satisfied(ZPageAllocation* allocation) const;
ZPage* alloc_page_create(ZPageAllocation* allocation);
ZPage* alloc_page_finalize(ZPageAllocation* allocation);
void alloc_page_failed(ZPageAllocation* allocation);
@@ -295,7 +295,7 @@ void ZPhysicalMemoryManager::alloc(ZPhysicalMemory& pmem, size_t size) {
// Allocate segments
while (size > 0) {
size_t allocated = 0;
const uintptr_t start = _manager.alloc_from_front_at_most(size, &allocated);
const uintptr_t start = _manager.alloc_low_address_at_most(size, &allocated);
assert(start != UINTPTR_MAX, "Allocation should never fail");
pmem.add_segment(ZPhysicalMemorySegment(start, allocated, false /* committed */));
size -= allocated;
@@ -33,6 +33,7 @@

ZVirtualMemoryManager::ZVirtualMemoryManager(size_t max_capacity) :
_manager(),
_reserved(0),
_initialized(false) {

// Check max supported heap size
@@ -173,6 +174,9 @@ bool ZVirtualMemoryManager::reserve(size_t max_capacity) {
log_info_p(gc, init)("Address Space Size: " SIZE_FORMAT "M x " SIZE_FORMAT " = " SIZE_FORMAT "M",
reserved / M, ZHeapViews, (reserved * ZHeapViews) / M);

// Record reserved
_reserved = reserved;

return reserved >= max_capacity;
}

@@ -191,9 +195,9 @@ ZVirtualMemory ZVirtualMemoryManager::alloc(size_t size, bool force_low_address)
// Small pages are allocated at low addresses, while medium/large pages
// are allocated at high addresses (unless forced to be at a low address).
if (force_low_address || size <= ZPageSizeSmall) {
start = _manager.alloc_from_front(size);
start = _manager.alloc_low_address(size);
} else {
start = _manager.alloc_from_back(size);
start = _manager.alloc_high_address(size);
}

return ZVirtualMemory(start, size);
@@ -48,6 +48,7 @@ class ZVirtualMemory {
class ZVirtualMemoryManager {
private:
ZMemoryManager _manager;
uintptr_t _reserved;
bool _initialized;

// Platform specific implementation
@@ -69,6 +70,9 @@ class ZVirtualMemoryManager {

bool is_initialized() const;

size_t reserved() const;
uintptr_t lowest_available_address() const;

ZVirtualMemory alloc(size_t size, bool force_low_address);
void free(const ZVirtualMemory& vmem);
};
@@ -57,4 +57,12 @@ inline ZVirtualMemory ZVirtualMemory::split(size_t size) {
return ZVirtualMemory(_start - size, size);
}

inline size_t ZVirtualMemoryManager::reserved() const {
return _reserved;
}

inline uintptr_t ZVirtualMemoryManager::lowest_available_address() const {
return _manager.peek_low_address();
}

#endif // SHARE_GC_Z_ZVIRTUALMEMORY_INLINE_HPP

1 comment on commit 1750a6e

@openjdk-notifier
Copy link

@openjdk-notifier openjdk-notifier bot commented on 1750a6e Oct 28, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.