Skip to content
Permalink
Browse files
8252221: Use multiple workers for Parallel GC pre-touching
Reviewed-by: kbarrett, tschatzl
  • Loading branch information
amitdpawar authored and Thomas Schatzl committed Oct 15, 2020
1 parent f44fc6d commit 9359ff03ae6b9e09e7defef148864f40e949b669
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@

#include "precompiled.hpp"
#include "gc/g1/g1PageBasedVirtualSpace.hpp"
#include "gc/shared/pretouchTask.hpp"
#include "gc/shared/workgroup.hpp"
#include "oops/markWord.hpp"
#include "oops/oop.inline.hpp"
@@ -234,56 +235,10 @@ void G1PageBasedVirtualSpace::uncommit(size_t start_page, size_t size_in_pages)
_committed.clear_range(start_page, end_page);
}

class G1PretouchTask : public AbstractGangTask {
private:
char* volatile _cur_addr;
char* const _start_addr;
char* const _end_addr;
size_t _page_size;
public:
G1PretouchTask(char* start_address, char* end_address, size_t page_size) :
AbstractGangTask("G1 PreTouch"),
_cur_addr(start_address),
_start_addr(start_address),
_end_addr(end_address),
_page_size(0) {
#ifdef LINUX
_page_size = UseTransparentHugePages ? (size_t)os::vm_page_size(): page_size;
#else
_page_size = page_size;
#endif
}

virtual void work(uint worker_id) {
size_t const actual_chunk_size = MAX2(chunk_size(), _page_size);
while (true) {
char* touch_addr = Atomic::fetch_and_add(&_cur_addr, actual_chunk_size);
if (touch_addr < _start_addr || touch_addr >= _end_addr) {
break;
}
char* end_addr = touch_addr + MIN2(actual_chunk_size, pointer_delta(_end_addr, touch_addr, sizeof(char)));
os::pretouch_memory(touch_addr, end_addr, _page_size);
}
}

static size_t chunk_size() { return PreTouchParallelChunkSize; }
};

void G1PageBasedVirtualSpace::pretouch(size_t start_page, size_t size_in_pages, WorkGang* pretouch_gang) {
G1PretouchTask cl(page_start(start_page), bounded_end_addr(start_page + size_in_pages), _page_size);

if (pretouch_gang != NULL) {
size_t num_chunks = MAX2((size_t)1, size_in_pages * _page_size / MAX2(G1PretouchTask::chunk_size(), _page_size));

uint num_workers = MIN2((uint)num_chunks, pretouch_gang->total_workers());
log_debug(gc, heap)("Running %s with %u workers for " SIZE_FORMAT " work units pre-touching " SIZE_FORMAT "B.",
cl.name(), num_workers, num_chunks, size_in_pages * _page_size);
pretouch_gang->run_task(&cl, num_workers);
} else {
log_debug(gc, heap)("Running %s pre-touching " SIZE_FORMAT "B.",
cl.name(), size_in_pages * _page_size);
cl.work(0);
}
PretouchTask::pretouch("G1 PreTouch", page_start(start_page), bounded_end_addr(start_page + size_in_pages),
_page_size, pretouch_gang);
}

bool G1PageBasedVirtualSpace::contains(const void* p) const {
@@ -26,6 +26,7 @@
#include "gc/parallel/mutableNUMASpace.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/spaceDecorator.hpp"
#include "gc/shared/workgroup.hpp"
#include "memory/allocation.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
@@ -572,7 +573,8 @@ void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersecti
void MutableNUMASpace::initialize(MemRegion mr,
bool clear_space,
bool mangle_space,
bool setup_pages) {
bool setup_pages,
WorkGang* pretouch_gang) {
assert(clear_space, "Reallocation will destroy data!");
assert(lgrp_spaces()->length() > 0, "There should be at least one space");

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -195,7 +195,11 @@ class MutableNUMASpace : public MutableSpace {
MutableNUMASpace(size_t alignment);
virtual ~MutableNUMASpace();
// Space initialization.
virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space, bool setup_pages = SetupPages);
virtual void initialize(MemRegion mr,
bool clear_space,
bool mangle_space,
bool setup_pages = SetupPages,
WorkGang* pretouch_gang = NULL);
// Update space layout if necessary. Do all adaptive resizing job.
virtual void update();
// Update allocation rate averages.
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@

#include "precompiled.hpp"
#include "gc/parallel/mutableSpace.hpp"
#include "gc/shared/pretouchTask.hpp"
#include "gc/shared/spaceDecorator.inline.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/universe.hpp"
@@ -60,14 +61,11 @@ void MutableSpace::numa_setup_pages(MemRegion mr, bool clear_space) {
}
}

void MutableSpace::pretouch_pages(MemRegion mr) {
os::pretouch_memory(mr.start(), mr.end());
}

void MutableSpace::initialize(MemRegion mr,
bool clear_space,
bool mangle_space,
bool setup_pages) {
bool setup_pages,
WorkGang* pretouch_gang) {

assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
"invalid space boundaries");
@@ -114,8 +112,13 @@ void MutableSpace::initialize(MemRegion mr,
}

if (AlwaysPreTouch) {
pretouch_pages(head);
pretouch_pages(tail);
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();

PretouchTask::pretouch("ParallelGC PreTouch head", (char*)head.start(), (char*)head.end(),
page_size, pretouch_gang);

PretouchTask::pretouch("ParallelGC PreTouch tail", (char*)tail.start(), (char*)tail.end(),
page_size, pretouch_gang);
}

// Remember where we stopped so that we can continue later.
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,8 @@
#include "memory/memRegion.hpp"
#include "utilities/copy.hpp"

class WorkGang;

// A MutableSpace is a subtype of ImmutableSpace that supports the
// concept of allocation. This includes the concepts that a space may
// be only partially full, and the query methods that go with such
@@ -56,7 +58,6 @@ class MutableSpace: public ImmutableSpace {
MutableSpaceMangler* mangler() { return _mangler; }

void numa_setup_pages(MemRegion mr, bool clear_space);
void pretouch_pages(MemRegion mr);

void set_last_setup_region(MemRegion mr) { _last_setup_region = mr; }
MemRegion last_setup_region() const { return _last_setup_region; }
@@ -87,7 +88,8 @@ class MutableSpace: public ImmutableSpace {
virtual void initialize(MemRegion mr,
bool clear_space,
bool mangle_space,
bool setup_pages = SetupPages);
bool setup_pages = SetupPages,
WorkGang* pretouch_gang = NULL);

virtual void clear(bool mangle_space);
// Does the usual initialization but optionally resets top to bottom.
@@ -88,6 +88,9 @@ jint ParallelScavengeHeap::initialize() {
ReservedSpace young_rs = heap_rs.last_part(MaxOldSize);
assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap");

// Set up WorkGang
_workers.initialize_workers();

// Create and initialize the generations.
_young_gen = new PSYoungGen(
young_rs,
@@ -132,9 +135,6 @@ jint ParallelScavengeHeap::initialize() {
return JNI_ENOMEM;
}

// Set up WorkGang
_workers.initialize_workers();

GCInitLogger::print();

return JNI_OK;
@@ -131,7 +131,9 @@ void PSOldGen::initialize_work(const char* perf_data_name, int level) {
_object_space = new MutableSpace(virtual_space()->alignment());
object_space()->initialize(cmr,
SpaceDecorator::Clear,
SpaceDecorator::Mangle);
SpaceDecorator::Mangle,
MutableSpace::SetupPages,
&ParallelScavengeHeap::heap()->workers());

// Update the start_array
start_array()->set_covered_region(cmr);
@@ -351,10 +353,15 @@ void PSOldGen::post_resize() {
start_array()->set_covered_region(new_memregion);
ParallelScavengeHeap::heap()->card_table()->resize_covered_region(new_memregion);

WorkGang* workers = Thread::current()->is_VM_thread() ?
&ParallelScavengeHeap::heap()->workers() : NULL;

// ALWAYS do this last!!
object_space()->initialize(new_memregion,
SpaceDecorator::DontClear,
SpaceDecorator::DontMangle);
SpaceDecorator::DontMangle,
MutableSpace::SetupPages,
workers);

assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
"Sanity");
@@ -189,9 +189,10 @@ void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) {
MemRegion to_mr ((HeapWord*)to_start, (HeapWord*)from_start);
MemRegion from_mr((HeapWord*)from_start, (HeapWord*)from_end);

eden_space()->initialize(eden_mr, true, ZapUnusedHeapArea);
to_space()->initialize(to_mr , true, ZapUnusedHeapArea);
from_space()->initialize(from_mr, true, ZapUnusedHeapArea);
WorkGang& pretouch_workers = ParallelScavengeHeap::heap()->workers();
eden_space()->initialize(eden_mr, true, ZapUnusedHeapArea, MutableSpace::SetupPages, &pretouch_workers);
to_space()->initialize(to_mr , true, ZapUnusedHeapArea, MutableSpace::SetupPages, &pretouch_workers);
from_space()->initialize(from_mr, true, ZapUnusedHeapArea, MutableSpace::SetupPages, &pretouch_workers);
}

#ifndef PRODUCT
@@ -636,17 +637,26 @@ void PSYoungGen::resize_spaces(size_t requested_eden_size,
from_space()->check_mangled_unused_area(limit);
to_space()->check_mangled_unused_area(limit);
}

WorkGang* workers = &ParallelScavengeHeap::heap()->workers();

// When an existing space is being initialized, it is not
// mangled because the space has been previously mangled.
eden_space()->initialize(edenMR,
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
SpaceDecorator::DontMangle,
MutableSpace::SetupPages,
workers);
to_space()->initialize(toMR,
SpaceDecorator::Clear,
SpaceDecorator::DontMangle);
SpaceDecorator::DontMangle,
MutableSpace::SetupPages,
workers);
from_space()->initialize(fromMR,
SpaceDecorator::DontClear,
SpaceDecorator::DontMangle);
SpaceDecorator::DontMangle,
MutableSpace::SetupPages,
workers);

assert(from_space()->top() == old_from_top, "from top changed!");

@@ -783,9 +793,12 @@ void PSYoungGen::reset_survivors_after_shrink() {
// Was there a shrink of the survivor space?
if (new_end < space_shrinking->end()) {
MemRegion mr(space_shrinking->bottom(), new_end);

space_shrinking->initialize(mr,
SpaceDecorator::DontClear,
SpaceDecorator::Mangle);
SpaceDecorator::Mangle,
MutableSpace::SetupPages,
&ParallelScavengeHeap::heap()->workers());
}
}

@@ -0,0 +1,82 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/

#include "precompiled.hpp"
#include "gc/shared/pretouchTask.hpp"
#include "runtime/atomic.hpp"
#include "runtime/globals.hpp"
#include "runtime/os.hpp"

PretouchTask::PretouchTask(const char* task_name, char* start_address, char* end_address, size_t page_size) :
AbstractGangTask(task_name),
_cur_addr(start_address),
_start_addr(start_address),
_end_addr(end_address),
_page_size(0) {
#ifdef LINUX
_page_size = UseTransparentHugePages ? (size_t)os::vm_page_size(): page_size;
#else
_page_size = page_size;
#endif
}

size_t PretouchTask::chunk_size() {
return PreTouchParallelChunkSize;
}

void PretouchTask::work(uint worker_id) {
size_t const actual_chunk_size = MAX2(chunk_size(), _page_size);

while (true) {
char* touch_addr = Atomic::fetch_and_add(&_cur_addr, actual_chunk_size);
if (touch_addr < _start_addr || touch_addr >= _end_addr) {
break;
}

char* end_addr = touch_addr + MIN2(actual_chunk_size, pointer_delta(_end_addr, touch_addr, sizeof(char)));

os::pretouch_memory(touch_addr, end_addr, _page_size);
}
}

void PretouchTask::pretouch(const char* task_name, char* start_address, char* end_address,
size_t page_size, WorkGang* pretouch_gang) {
PretouchTask task(task_name, start_address, end_address, page_size);
size_t total_bytes = pointer_delta(end_address, start_address, sizeof(char));

if (pretouch_gang != NULL) {
size_t num_chunks = MAX2((size_t)1, total_bytes / MAX2(PretouchTask::chunk_size(), page_size));

uint num_workers = MIN2((uint)num_chunks, pretouch_gang->total_workers());
log_debug(gc, heap)("Running %s with %u workers for " SIZE_FORMAT " work units pre-touching " SIZE_FORMAT "B.",
task.name(), num_workers, num_chunks, total_bytes);

pretouch_gang->run_task(&task, num_workers);
} else {
log_debug(gc, heap)("Running %s pre-touching " SIZE_FORMAT "B.",
task.name(), total_bytes);
task.work(0);
}
}

1 comment on commit 9359ff0

@bridgekeeper

This comment has been minimized.

Copy link

@bridgekeeper bridgekeeper bot commented on 9359ff0 Oct 15, 2020

Please sign in to comment.