Skip to content
This repository has been archived by the owner. It is now read-only.
Permalink
Browse files
8246220: ZGC: Introduce ZUnmapper to asynchronous unmap pages
Reviewed-by: eosterlund, stefank
  • Loading branch information
pliden committed Jun 9, 2020
1 parent d7e68f3 commit 9d0ba7ae7fa0f9f13ca5ed532231221942df999e
@@ -37,6 +37,7 @@
#include "gc/z/zTask.hpp"
#include "gc/z/zTracer.inline.hpp"
#include "gc/z/zUncommitter.hpp"
#include "gc/z/zUnmapper.hpp"
#include "gc/z/zWorkers.hpp"
#include "jfr/jfrEvents.hpp"
#include "logging/log.hpp"
@@ -149,6 +150,7 @@ ZPageAllocator::ZPageAllocator(ZWorkers* workers,
_reclaimed(0),
_stalled(),
_satisfied(),
_unmapper(new ZUnmapper(this)),
_uncommitter(new ZUncommitter(this)),
_safe_delete(),
_initialized(false) {
@@ -381,12 +383,12 @@ void ZPageAllocator::uncommit_page(ZPage* page) {

void ZPageAllocator::map_page(const ZPage* page) const {
// Map physical memory
_physical.map(page->physical_memory(), page->start());
_physical.map(page->start(), page->physical_memory());
}

void ZPageAllocator::unmap_page(const ZPage* page) const {
// Unmap physical memory
_physical.unmap(page->physical_memory(), page->start());
_physical.unmap(page->start(), page->size());
}

void ZPageAllocator::destroy_page(ZPage* page) {
@@ -550,6 +552,8 @@ ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) {

// Allocate virtual memory. To make error handling a lot more straight
// forward, we allocate virtual memory before destroying flushed pages.
// Flushed pages are also unmapped and destroyed asynchronously, so we
// can't immediately reuse that part of the address space anyway.
const ZVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address());
if (vmem.is_null()) {
log_error(gc)("Out of address space");
@@ -564,14 +568,13 @@ ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) {
for (ZPage* page; iter.next(&page);) {
flushed += page->size();

unmap_page(page);

// Harvest flushed physical memory
ZPhysicalMemory& fmem = page->physical_memory();
pmem.add_segments(fmem);
fmem.remove_segments();

destroy_page(page);
// Unmap and destroy page
_unmapper->unmap_and_destroy_page(page);
}

if (flushed > 0) {
@@ -811,21 +814,21 @@ void ZPageAllocator::disable_deferred_delete() const {

void ZPageAllocator::debug_map_page(const ZPage* page) const {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
_physical.debug_map(page->physical_memory(), page->start());
_physical.debug_map(page->start(), page->physical_memory());
}

void ZPageAllocator::debug_unmap_page(const ZPage* page) const {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
_physical.debug_unmap(page->physical_memory(), page->start());
_physical.debug_unmap(page->start(), page->size());
}

void ZPageAllocator::pages_do(ZPageClosure* cl) const {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");

ZListIterator<ZPageAllocation> iter(&_satisfied);
for (ZPageAllocation* allocation; iter.next(&allocation);) {
ZListIterator<ZPage> iter(allocation->pages());
for (ZPage* page; iter.next(&page);) {
ZListIterator<ZPageAllocation> iter_satisfied(&_satisfied);
for (ZPageAllocation* allocation; iter_satisfied.next(&allocation);) {
ZListIterator<ZPage> iter_pages(allocation->pages());
for (ZPage* page; iter_pages.next(&page);) {
cl->do_page(page);
}
}
@@ -858,5 +861,6 @@ void ZPageAllocator::check_out_of_memory() {
}

void ZPageAllocator::threads_do(ThreadClosure* tc) const {
tc->do_thread(_unmapper);
tc->do_thread(_uncommitter);
}
@@ -36,9 +36,11 @@ class ThreadClosure;
class ZPageAllocation;
class ZWorkers;
class ZUncommitter;
class ZUnmapper;

class ZPageAllocator {
friend class VMStructs;
friend class ZUnmapper;
friend class ZUncommitter;

private:
@@ -59,6 +61,7 @@ class ZPageAllocator {
ssize_t _reclaimed;
ZList<ZPageAllocation> _stalled;
ZList<ZPageAllocation> _satisfied;
ZUnmapper* _unmapper;
ZUncommitter* _uncommitter;
mutable ZSafeDelete<ZPage> _safe_delete;
bool _initialized;
@@ -319,17 +319,15 @@ void ZPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max
log_info(gc, init)("Uncommit Delay: " UINTX_FORMAT "s", ZUncommitDelay);
}

void ZPhysicalMemoryManager::nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const {
void ZPhysicalMemoryManager::nmt_commit(uintptr_t offset, size_t size) const {
// From an NMT point of view we treat the first heap view (marked0) as committed
const uintptr_t addr = ZAddress::marked0(offset);
const size_t size = pmem.size();
MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC);
}

void ZPhysicalMemoryManager::nmt_uncommit(const ZPhysicalMemory& pmem, uintptr_t offset) const {
void ZPhysicalMemoryManager::nmt_uncommit(uintptr_t offset, size_t size) const {
if (MemTracker::tracking_level() > NMT_minimal) {
const uintptr_t addr = ZAddress::marked0(offset);
const size_t size = pmem.size();
Tracker tracker(Tracker::uncommit);
tracker.record((address)addr, size);
}
@@ -403,7 +401,7 @@ void ZPhysicalMemoryManager::pretouch_view(uintptr_t addr, size_t size) const {
os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
}

void ZPhysicalMemoryManager::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
void ZPhysicalMemoryManager::map_view(uintptr_t addr, const ZPhysicalMemory& pmem) const {
size_t size = 0;

// Map segments
@@ -422,8 +420,8 @@ void ZPhysicalMemoryManager::map_view(const ZPhysicalMemory& pmem, uintptr_t add
}
}

void ZPhysicalMemoryManager::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
_backing.unmap(addr, pmem.size());
void ZPhysicalMemoryManager::unmap_view(uintptr_t addr, size_t size) const {
_backing.unmap(addr, size);
}

void ZPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const {
@@ -438,42 +436,44 @@ void ZPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const {
}
}

void ZPhysicalMemoryManager::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
void ZPhysicalMemoryManager::map(uintptr_t offset, const ZPhysicalMemory& pmem) const {
const size_t size = pmem.size();

if (ZVerifyViews) {
// Map good view
map_view(pmem, ZAddress::good(offset));
map_view(ZAddress::good(offset), pmem);
} else {
// Map all views
map_view(pmem, ZAddress::marked0(offset));
map_view(pmem, ZAddress::marked1(offset));
map_view(pmem, ZAddress::remapped(offset));
map_view(ZAddress::marked0(offset), pmem);
map_view(ZAddress::marked1(offset), pmem);
map_view(ZAddress::remapped(offset), pmem);
}

nmt_commit(pmem, offset);
nmt_commit(offset, size);
}

void ZPhysicalMemoryManager::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
nmt_uncommit(pmem, offset);
void ZPhysicalMemoryManager::unmap(uintptr_t offset, size_t size) const {
nmt_uncommit(offset, size);

if (ZVerifyViews) {
// Unmap good view
unmap_view(pmem, ZAddress::good(offset));
unmap_view(ZAddress::good(offset), size);
} else {
// Unmap all views
unmap_view(pmem, ZAddress::marked0(offset));
unmap_view(pmem, ZAddress::marked1(offset));
unmap_view(pmem, ZAddress::remapped(offset));
unmap_view(ZAddress::marked0(offset), size);
unmap_view(ZAddress::marked1(offset), size);
unmap_view(ZAddress::remapped(offset), size);
}
}

void ZPhysicalMemoryManager::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
void ZPhysicalMemoryManager::debug_map(uintptr_t offset, const ZPhysicalMemory& pmem) const {
// Map good view
assert(ZVerifyViews, "Should be enabled");
map_view(pmem, ZAddress::good(offset));
map_view(ZAddress::good(offset), pmem);
}

void ZPhysicalMemoryManager::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
void ZPhysicalMemoryManager::debug_unmap(uintptr_t offset, size_t size) const {
// Unmap good view
assert(ZVerifyViews, "Should be enabled");
unmap_view(pmem, ZAddress::good(offset));
unmap_view(ZAddress::good(offset), size);
}
@@ -85,12 +85,12 @@ class ZPhysicalMemoryManager {
ZPhysicalMemoryBacking _backing;
ZMemoryManager _manager;

void nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void nmt_uncommit(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void nmt_commit(uintptr_t offset, size_t size) const;
void nmt_uncommit(uintptr_t offset, size_t size) const;

void pretouch_view(uintptr_t addr, size_t size) const;
void map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
void unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
void map_view(uintptr_t addr, const ZPhysicalMemory& pmem) const;
void unmap_view(uintptr_t addr, size_t size) const;

public:
ZPhysicalMemoryManager(size_t max_capacity);
@@ -108,11 +108,11 @@ class ZPhysicalMemoryManager {

void pretouch(uintptr_t offset, size_t size) const;

void map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void map(uintptr_t offset, const ZPhysicalMemory& pmem) const;
void unmap(uintptr_t offset, size_t size) const;

void debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
void debug_map(uintptr_t offset, const ZPhysicalMemory& pmem) const;
void debug_unmap(uintptr_t offset, size_t size) const;
};

#endif // SHARE_GC_Z_ZPHYSICALMEMORY_HPP
@@ -0,0 +1,100 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/

#include "precompiled.hpp"
#include "gc/z/zList.inline.hpp"
#include "gc/z/zLock.inline.hpp"
#include "gc/z/zPage.inline.hpp"
#include "gc/z/zPageAllocator.hpp"
#include "gc/z/zUnmapper.hpp"
#include "jfr/jfrEvents.hpp"
#include "runtime/globals.hpp"

ZUnmapper::ZUnmapper(ZPageAllocator* page_allocator) :
_page_allocator(page_allocator),
_lock(),
_queue(),
_stop(false) {
set_name("ZUnmapper");
create_and_start();
}

ZPage* ZUnmapper::dequeue() {
ZLocker<ZConditionLock> locker(&_lock);

for (;;) {
if (_stop) {
return NULL;
}

ZPage* const page = _queue.remove_first();
if (page != NULL) {
return page;
}

_lock.wait();
}
}

void ZUnmapper::do_unmap_and_destroy_page(ZPage* page) const {
EventZUnmap event;
const size_t unmapped = page->size();

// Unmap and destroy
_page_allocator->unmap_page(page);
_page_allocator->destroy_page(page);

// Send event
event.commit(unmapped);
}

void ZUnmapper::unmap_and_destroy_page(ZPage* page) {
// Asynchronous unmap and destroy is not supported with ZVerifyViews
if (ZVerifyViews) {
// Immediately unmap and destroy
do_unmap_and_destroy_page(page);
} else {
// Enqueue for asynchronous unmap and destroy
ZLocker<ZConditionLock> locker(&_lock);
_queue.insert_last(page);
_lock.notify_all();
}
}

void ZUnmapper::run_service() {
for (;;) {
ZPage* const page = dequeue();
if (page == NULL) {
// Stop
return;
}

do_unmap_and_destroy_page(page);
}
}

void ZUnmapper::stop_service() {
ZLocker<ZConditionLock> locker(&_lock);
_stop = true;
_lock.notify_all();
}

0 comments on commit 9d0ba7a

Please sign in to comment.