Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8254739: G1: Optimize evacuation failure for regions with few failed objects #5181

Closed
Changes from 4 commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
954b5ee
speed up iterate evac failure objs in one region.
Hamlin-Li Aug 18, 2021
4d88f42
fix: reset state after iteration
Hamlin-Li Aug 18, 2021
74bc4d9
Use a segmented array rather than a linked list to record evacuation …
Hamlin-Li Aug 25, 2021
05f026a
Fix compilation issues on some platform.
Hamlin-Li Aug 25, 2021
ded8275
Fix test failures; Fix compilation failures on some platforms
Hamlin-Li Aug 26, 2021
51d19eb
Fix test failures; Fix compilation failures on some platforms
Hamlin-Li Aug 26, 2021
f454925
Fix compilation error on windows
Hamlin-Li Aug 26, 2021
34aed3f
Fix compilation error on windows
Hamlin-Li Aug 26, 2021
43a8b59
Merge branch 'master' into speedup-iterate-evac-failure-objs-in-one-r…
Hamlin-Li Oct 25, 2021
6132372
Fix wrong merge
Hamlin-Li Oct 25, 2021
44e3562
Use refactored G1SegmentedArray rather than home-made Array+Node
Hamlin-Li Oct 25, 2021
e779c3a
Add asserts, comments
Hamlin-Li Oct 26, 2021
924fec5
Rename from g1EvacuationFailureObjsInHR to g1EvacFailureObjsInHR
Hamlin-Li Oct 26, 2021
0070826
Refactor as Thomas suggested
Hamlin-Li Oct 29, 2021
c4ca77c
Fix compilation error
Hamlin-Li Oct 29, 2021
ab04f1c
Fix compilation error
Hamlin-Li Oct 29, 2021
3712037
Merge branch 'openjdk:master' into speedup-iterate-evac-failure-objs-…
Hamlin-Li Oct 29, 2021
82c172a
Refine code based on Thomas' suggestion
Hamlin-Li Oct 29, 2021
d33f87b
Merge branch 'openjdk:master' into speedup-iterate-evac-failure-objs-…
Hamlin-Li Nov 3, 2021
e588cad
Move allocation/deallocation in one place
Hamlin-Li Nov 3, 2021
3efa90b
Fix typo
Hamlin-Li Nov 5, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
@@ -114,47 +114,46 @@ class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
HeapWord* obj_addr = cast_from_oop<HeapWord*>(obj);
assert(_hr->is_in(obj_addr), "sanity");

if (obj->is_forwarded() && obj->forwardee() == obj) {
// The object failed to move.

zap_dead_objects(_last_forwarded_object_end, obj_addr);
// We consider all objects that we find self-forwarded to be
// live. What we'll do is that we'll update the prev marking
// info so that they are all under PTAMS and explicitly marked.
if (!_cm->is_marked_in_prev_bitmap(obj)) {
_cm->mark_in_prev_bitmap(obj);
}
if (_during_concurrent_start) {
// For the next marking info we'll only mark the
// self-forwarded objects explicitly if we are during
// concurrent start (since, normally, we only mark objects pointed
// to by roots if we succeed in copying them). By marking all
// self-forwarded objects we ensure that we mark any that are
// still pointed to be roots. During concurrent marking, and
// after concurrent start, we don't need to mark any objects
// explicitly and all objects in the CSet are considered
// (implicitly) live. So, we won't mark them explicitly and
// we'll leave them over NTAMS.
_cm->mark_in_next_bitmap(_worker_id, _hr, obj);
}
size_t obj_size = obj->size();

_marked_bytes += (obj_size * HeapWordSize);
PreservedMarks::init_forwarded_mark(obj);

// During evacuation failure we do not record inter-region
// references referencing regions that need a remembered set
// update originating from young regions (including eden) that
// failed evacuation. Make up for that omission now by rescanning
// these failed objects.
if (_is_young) {
obj->oop_iterate(_log_buffer_cl);
}

HeapWord* obj_end = obj_addr + obj_size;
_last_forwarded_object_end = obj_end;
_hr->cross_threshold(obj_addr, obj_end);
// The object failed to move.
assert(obj->is_forwarded() && obj->forwardee() == obj, "sanity");

zap_dead_objects(_last_forwarded_object_end, obj_addr);
// We consider all objects that we find self-forwarded to be
// live. What we'll do is that we'll update the prev marking
// info so that they are all under PTAMS and explicitly marked.
if (!_cm->is_marked_in_prev_bitmap(obj)) {
_cm->mark_in_prev_bitmap(obj);
}
if (_during_concurrent_start) {
// For the next marking info we'll only mark the
// self-forwarded objects explicitly if we are during
// concurrent start (since, normally, we only mark objects pointed
// to by roots if we succeed in copying them). By marking all
// self-forwarded objects we ensure that we mark any that are
// still pointed to be roots. During concurrent marking, and
// after concurrent start, we don't need to mark any objects
// explicitly and all objects in the CSet are considered
// (implicitly) live. So, we won't mark them explicitly and
// we'll leave them over NTAMS.
_cm->mark_in_next_bitmap(_worker_id, _hr, obj);
}
size_t obj_size = obj->size();

_marked_bytes += (obj_size * HeapWordSize);
PreservedMarks::init_forwarded_mark(obj);

// During evacuation failure we do not record inter-region
// references referencing regions that need a remembered set
// update originating from young regions (including eden) that
// failed evacuation. Make up for that omission now by rescanning
// these failed objects.
if (_is_young) {
obj->oop_iterate(_log_buffer_cl);
}

HeapWord* obj_end = obj_addr + obj_size;
_last_forwarded_object_end = obj_end;
_hr->cross_threshold(obj_addr, obj_end);
}

// Fill the memory area from start to end with filler objects, and update the BOT
@@ -223,7 +222,8 @@ class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
&_log_buffer_cl,
during_concurrent_start,
_worker_id);
hr->object_iterate(&rspc);
// Iterates evac failure objs which are recorded during evacuation.
hr->iterate_evac_failure_objs(&rspc);
// Need to zap the remainder area of the processed region.
rspc.zap_remainder();

@@ -0,0 +1,106 @@
/*
* Copyright (c) 2021, Huawei and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/

#include "precompiled.hpp"
#include "g1EvacuationFailureObjsInHR.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "utilities/quickSort.hpp"



// === G1EvacuationFailureObjsInHR ===

void G1EvacuationFailureObjsInHR::visit(Elem elem) {
uint32_t offset = elem;
_offset_array[_objs_num++] = offset;
}

void G1EvacuationFailureObjsInHR::visit(Array<NODE_LENGTH, Elem>::NODE_XXX* node, uint32_t limit) {
::memcpy(&_offset_array[_objs_num], node->_oop_offsets, limit * sizeof(Elem));
_objs_num += limit;
}

void G1EvacuationFailureObjsInHR::compact() {
assert(_offset_array == NULL, "must be");
_offset_array = NEW_C_HEAP_ARRAY(Elem, _nodes_array.objs_num(), mtGC);
// _nodes_array.iterate_elements(this);
_nodes_array.iterate_nodes(this);
uint expected = _nodes_array.objs_num();
assert(_objs_num == expected, "must be %u, %u", _objs_num, expected);
_nodes_array.reset();
}

static int order_oop(G1EvacuationFailureObjsInHR::Elem a,
G1EvacuationFailureObjsInHR::Elem b) {
return static_cast<int>(a-b);
}

void G1EvacuationFailureObjsInHR::sort() {
QuickSort::sort(_offset_array, _objs_num, order_oop, true);
}

void G1EvacuationFailureObjsInHR::clear_array() {
FREE_C_HEAP_ARRAY(oop, _offset_array);
_offset_array = NULL;
_objs_num = 0;
}

void G1EvacuationFailureObjsInHR::iterate_internal(ObjectClosure* closure) {
Elem prev = 0;
for (uint i = 0; i < _objs_num; i++) {
assert(prev < _offset_array[i], "must be");
closure->do_object(cast_from_offset(prev = _offset_array[i]));
}
clear_array();
}

G1EvacuationFailureObjsInHR::G1EvacuationFailureObjsInHR(uint region_idx, HeapWord* bottom) :
_region_idx(region_idx),
_bottom(bottom),
_nodes_array(HeapRegion::GrainWords / NODE_LENGTH + 1),
_offset_array(NULL),
_objs_num(0) {
assert(HeapRegion::LogOfHRGrainBytes < 32, "must be");
}

G1EvacuationFailureObjsInHR::~G1EvacuationFailureObjsInHR() {
clear_array();
}

void G1EvacuationFailureObjsInHR::record(oop obj) {
assert(obj != NULL, "must be");
assert(G1CollectedHeap::heap()->heap_region_containing(obj)->hrm_index() == _region_idx, "must be");
Elem offset = cast_from_oop_addr(obj);
assert(obj == cast_from_offset(offset), "must be");
assert(offset < (1<<25), "must be");
_nodes_array.add(offset);
}

void G1EvacuationFailureObjsInHR::iterate(ObjectClosure* closure) {
compact();
sort();
iterate_internal(closure);
}