Skip to content

Commit 6555996

Browse files
author
Thomas Schatzl
committed
8253600: G1: Fully support pinned regions for full gc
Reviewed-by: sjohanss, ayang
1 parent 97d6e4a commit 6555996

31 files changed

+368
-295
lines changed

src/hotspot/share/gc/g1/g1Allocator.cpp

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -398,15 +398,8 @@ size_t G1PLABAllocator::undo_waste() const {
398398
return result;
399399
}
400400

401-
bool G1ArchiveAllocator::_archive_check_enabled = false;
402-
G1ArchiveRegionMap G1ArchiveAllocator::_archive_region_map;
403-
404401
G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h, bool open) {
405-
// Create the archive allocator, and also enable archive object checking
406-
// in mark-sweep, since we will be creating archive regions.
407-
G1ArchiveAllocator* result = new G1ArchiveAllocator(g1h, open);
408-
enable_archive_object_check();
409-
return result;
402+
return new G1ArchiveAllocator(g1h, open);
410403
}
411404

412405
bool G1ArchiveAllocator::alloc_new_region() {
@@ -434,9 +427,6 @@ bool G1ArchiveAllocator::alloc_new_region() {
434427
_bottom = hr->bottom();
435428
_max = _bottom + HeapRegion::min_region_size_in_words();
436429

437-
// Tell mark-sweep that objects in this region are not to be marked.
438-
set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), _open);
439-
440430
// Since we've modified the old set, call update_sizes.
441431
_g1h->g1mm()->update_sizes();
442432
return true;

src/hotspot/share/gc/g1/g1Allocator.hpp

Lines changed: 0 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -192,19 +192,6 @@ class G1PLABAllocator : public CHeapObj<mtGC> {
192192
void undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz, uint node_index);
193193
};
194194

195-
// G1ArchiveRegionMap is an array used to mark G1 regions as
196-
// archive regions. This allows a quick check for whether an object
197-
// should not be marked because it is in an archive region.
198-
class G1ArchiveRegionMap : public G1BiasedMappedArray<uint8_t> {
199-
public:
200-
static const uint8_t NoArchive = 0;
201-
static const uint8_t OpenArchive = 1;
202-
static const uint8_t ClosedArchive = 2;
203-
204-
protected:
205-
uint8_t default_value() const { return NoArchive; }
206-
};
207-
208195
// G1ArchiveAllocator is used to allocate memory in archive
209196
// regions. Such regions are not scavenged nor compacted by GC.
210197
// There are two types of archive regions, which are
@@ -278,33 +265,6 @@ class G1ArchiveAllocator : public CHeapObj<mtGC> {
278265
void clear_used() {
279266
_summary_bytes_used = 0;
280267
}
281-
282-
// Create the _archive_region_map which is used to identify archive objects.
283-
static inline void enable_archive_object_check();
284-
285-
// Mark regions containing the specified address range as archive/non-archive.
286-
static inline void set_range_archive(MemRegion range, bool open);
287-
static inline void clear_range_archive(MemRegion range);
288-
289-
// Check if the object is in closed archive
290-
static inline bool is_closed_archive_object(oop object);
291-
// Check if the object is in open archive
292-
static inline bool is_open_archive_object(oop object);
293-
// Check if the object is either in closed archive or open archive
294-
static inline bool is_archived_object(oop object);
295-
296-
private:
297-
static bool _archive_check_enabled;
298-
static G1ArchiveRegionMap _archive_region_map;
299-
300-
// Check if an object is in a closed archive region using the _closed_archive_region_map.
301-
static inline bool in_closed_archive_range(oop object);
302-
// Check if an object is in open archive region using the _open_archive_region_map.
303-
static inline bool in_open_archive_range(oop object);
304-
305-
// Check if archive object checking is enabled, to avoid calling in_open/closed_archive_range
306-
// unnecessarily.
307-
static inline bool archive_check_enabled();
308268
};
309269

310270
#endif // SHARE_GC_G1_G1ALLOCATOR_HPP

src/hotspot/share/gc/g1/g1Allocator.inline.hpp

Lines changed: 0 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -119,63 +119,4 @@ inline HeapWord* G1PLABAllocator::allocate(G1HeapRegionAttr dest,
119119
return allocate_direct_or_new_plab(dest, word_sz, refill_failed, node_index);
120120
}
121121

122-
// Create the maps which is used to identify archive objects.
123-
inline void G1ArchiveAllocator::enable_archive_object_check() {
124-
if (_archive_check_enabled) {
125-
return;
126-
}
127-
128-
_archive_check_enabled = true;
129-
_archive_region_map.initialize(G1CollectedHeap::heap()->reserved(),
130-
HeapRegion::GrainBytes);
131-
}
132-
133-
// Set the regions containing the specified address range as archive.
134-
inline void G1ArchiveAllocator::set_range_archive(MemRegion range, bool open) {
135-
assert(_archive_check_enabled, "archive range check not enabled");
136-
log_info(gc, cds)("Mark %s archive regions in map: [" PTR_FORMAT ", " PTR_FORMAT "]",
137-
open ? "open" : "closed",
138-
p2i(range.start()),
139-
p2i(range.last()));
140-
uint8_t const value = open ? G1ArchiveRegionMap::OpenArchive : G1ArchiveRegionMap::ClosedArchive;
141-
_archive_region_map.set_by_address(range, value);
142-
}
143-
144-
// Clear the archive regions map containing the specified address range.
145-
inline void G1ArchiveAllocator::clear_range_archive(MemRegion range) {
146-
assert(_archive_check_enabled, "archive range check not enabled");
147-
log_info(gc, cds)("Clear archive regions in map: [" PTR_FORMAT ", " PTR_FORMAT "]",
148-
p2i(range.start()),
149-
p2i(range.last()));
150-
_archive_region_map.set_by_address(range, G1ArchiveRegionMap::NoArchive);
151-
}
152-
153-
// Check if an object is in a closed archive region using the _archive_region_map.
154-
inline bool G1ArchiveAllocator::in_closed_archive_range(oop object) {
155-
return _archive_region_map.get_by_address(cast_from_oop<HeapWord*>(object)) == G1ArchiveRegionMap::ClosedArchive;
156-
}
157-
158-
inline bool G1ArchiveAllocator::in_open_archive_range(oop object) {
159-
return _archive_region_map.get_by_address(cast_from_oop<HeapWord*>(object)) == G1ArchiveRegionMap::OpenArchive;
160-
}
161-
162-
// Check if archive object checking is enabled, to avoid calling in_open/closed_archive_range
163-
// unnecessarily.
164-
inline bool G1ArchiveAllocator::archive_check_enabled() {
165-
return _archive_check_enabled;
166-
}
167-
168-
inline bool G1ArchiveAllocator::is_closed_archive_object(oop object) {
169-
return (archive_check_enabled() && in_closed_archive_range(object));
170-
}
171-
172-
inline bool G1ArchiveAllocator::is_open_archive_object(oop object) {
173-
return (archive_check_enabled() && in_open_archive_range(object));
174-
}
175-
176-
inline bool G1ArchiveAllocator::is_archived_object(oop object) {
177-
return archive_check_enabled() &&
178-
(_archive_region_map.get_by_address(cast_from_oop<HeapWord*>(object)) != G1ArchiveRegionMap::NoArchive);
179-
}
180-
181122
#endif // SHARE_GC_G1_G1ALLOCATOR_INLINE_HPP

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Lines changed: 20 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -570,10 +570,6 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
570570
// when mmap'ing archived heap data in, so pre-touching is wasted.
571571
FlagSetting fs(AlwaysPreTouch, false);
572572

573-
// Enable archive object checking used by G1MarkSweep. We have to let it know
574-
// about each archive range, so that objects in those ranges aren't marked.
575-
G1ArchiveAllocator::enable_archive_object_check();
576-
577573
// For each specified MemRegion range, allocate the corresponding G1
578574
// regions and mark them as archive regions. We expect the ranges
579575
// in ascending starting address order, without overlap.
@@ -649,9 +645,6 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
649645
curr_region->set_top(top);
650646
curr_region = next_region;
651647
}
652-
653-
// Notify mark-sweep of the archive
654-
G1ArchiveAllocator::set_range_archive(curr_range, open);
655648
}
656649
return true;
657650
}
@@ -802,9 +795,6 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
802795
_hrm->shrink_at(curr_index, 1);
803796
uncommitted_regions++;
804797
}
805-
806-
// Notify mark-sweep that this is no longer an archive range.
807-
G1ArchiveAllocator::clear_range_archive(ranges[i]);
808798
}
809799

810800
if (uncommitted_regions != 0) {
@@ -815,8 +805,7 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
815805
}
816806

817807
oop G1CollectedHeap::materialize_archived_object(oop obj) {
818-
assert(obj != NULL, "archived obj is NULL");
819-
assert(G1ArchiveAllocator::is_archived_object(obj), "must be archived object");
808+
assert(is_archived_object(obj), "not an archived obj");
820809

821810
// Loading an archived object makes it strongly reachable. If it is
822811
// loaded during concurrent marking, it must be enqueued to the SATB
@@ -1016,8 +1005,7 @@ void G1CollectedHeap::prepare_heap_for_full_collection() {
10161005
// after this full GC.
10171006
abandon_collection_set(collection_set());
10181007

1019-
tear_down_region_sets(false /* free_list_only */);
1020-
1008+
hrm()->remove_all_free_regions();
10211009
hrm()->prepare_for_full_collection_start();
10221010
}
10231011

@@ -1073,17 +1061,7 @@ void G1CollectedHeap::verify_after_full_collection() {
10731061
_hrm->verify_optional();
10741062
_verifier->verify_region_sets_optional();
10751063
_verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
1076-
// Clear the previous marking bitmap, if needed for bitmap verification.
1077-
// Note we cannot do this when we clear the next marking bitmap in
1078-
// G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1079-
// objects marked during a full GC against the previous bitmap.
1080-
// But we need to clear it before calling check_bitmaps below since
1081-
// the full GC has compacted objects and updated TAMS but not updated
1082-
// the prev bitmap.
1083-
if (G1VerifyBitmaps) {
1084-
GCTraceTime(Debug, gc) tm("Clear Prev Bitmap for Verification");
1085-
_cm->clear_prev_bitmap(workers());
1086-
}
1064+
10871065
// This call implicitly verifies that the next bitmap is clear after Full GC.
10881066
_verifier->check_bitmaps("Full GC End");
10891067

@@ -1347,7 +1325,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
13471325
// Instead of tearing down / rebuilding the free lists here, we
13481326
// could instead use the remove_all_pending() method on free_list to
13491327
// remove only the ones that we need to remove.
1350-
tear_down_region_sets(true /* free_list_only */);
1328+
hrm()->remove_all_free_regions();
13511329
shrink_helper(shrink_bytes);
13521330
rebuild_region_sets(true /* free_list_only */);
13531331

@@ -2395,6 +2373,10 @@ bool G1CollectedHeap::is_heterogeneous_heap() const {
23952373
return G1Arguments::is_heterogeneous_heap();
23962374
}
23972375

2376+
bool G1CollectedHeap::is_archived_object(oop object) const {
2377+
return object != NULL && heap_region_containing(object)->is_archive();
2378+
}
2379+
23982380
class PrintRegionClosure: public HeapRegionClosure {
23992381
outputStream* _st;
24002382
public:
@@ -4565,45 +4547,23 @@ bool G1CollectedHeap::check_young_list_empty() {
45654547

45664548
#endif // ASSERT
45674549

4568-
class TearDownRegionSetsClosure : public HeapRegionClosure {
4569-
HeapRegionSet *_old_set;
4570-
4571-
public:
4572-
TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
4573-
4574-
bool do_heap_region(HeapRegion* r) {
4575-
if (r->is_old()) {
4576-
_old_set->remove(r);
4577-
} else if(r->is_young()) {
4578-
r->uninstall_surv_rate_group();
4579-
} else {
4580-
// We ignore free regions, we'll empty the free list afterwards.
4581-
// We ignore humongous and archive regions, we're not tearing down these
4582-
// sets.
4583-
assert(r->is_archive() || r->is_free() || r->is_humongous(),
4584-
"it cannot be another type");
4585-
}
4586-
return false;
4587-
}
4588-
4589-
~TearDownRegionSetsClosure() {
4590-
assert(_old_set->is_empty(), "post-condition");
4591-
}
4592-
};
4593-
4594-
void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
4595-
assert_at_safepoint_on_vm_thread();
4596-
4597-
if (!free_list_only) {
4598-
TearDownRegionSetsClosure cl(&_old_set);
4599-
heap_region_iterate(&cl);
4600-
4550+
// Remove the given HeapRegion from the appropriate region set.
4551+
void G1CollectedHeap::prepare_region_for_full_compaction(HeapRegion* hr) {
4552+
if (hr->is_old()) {
4553+
_old_set.remove(hr);
4554+
} else if (hr->is_young()) {
46014555
// Note that emptying the _young_list is postponed and instead done as
46024556
// the first step when rebuilding the regions sets again. The reason for
46034557
// this is that during a full GC string deduplication needs to know if
46044558
// a collected region was young or old when the full GC was initiated.
4559+
hr->uninstall_surv_rate_group();
4560+
} else {
4561+
// We ignore free regions, we'll empty the free list afterwards.
4562+
// We ignore humongous and archive regions, we're not tearing down these
4563+
// sets.
4564+
assert(hr->is_archive() || hr->is_free() || hr->is_humongous(),
4565+
"it cannot be another type");
46054566
}
4606-
_hrm->remove_all_free_regions();
46074567
}
46084568

46094569
void G1CollectedHeap::increase_used(size_t bytes) {

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -177,20 +177,14 @@ class G1CollectedHeap : public CollectedHeap {
177177
// The block offset table for the G1 heap.
178178
G1BlockOffsetTable* _bot;
179179

180-
// Tears down the region sets / lists so that they are empty and the
181-
// regions on the heap do not belong to a region set / list. The
182-
// only exception is the humongous set which we leave unaltered. If
183-
// free_list_only is true, it will only tear down the master free
184-
// list. It is called before a Full GC (free_list_only == false) or
185-
// before heap shrinking (free_list_only == true).
186-
void tear_down_region_sets(bool free_list_only);
180+
public:
181+
void prepare_region_for_full_compaction(HeapRegion* hr);
187182

183+
private:
188184
// Rebuilds the region sets / lists so that they are repopulated to
189185
// reflect the contents of the heap. The only exception is the
190186
// humongous set which was not torn down in the first place. If
191-
// free_list_only is true, it will only rebuild the master free
192-
// list. It is called after a Full GC (free_list_only == false) or
193-
// after heap shrinking (free_list_only == true).
187+
// free_list_only is true, it will only rebuild the free list.
194188
void rebuild_region_sets(bool free_list_only);
195189

196190
// Callback for region mapping changed events.
@@ -1424,6 +1418,8 @@ class G1CollectedHeap : public CollectedHeap {
14241418

14251419
virtual WorkGang* safepoint_workers() { return _workers; }
14261420

1421+
virtual bool is_archived_object(oop object) const;
1422+
14271423
// The methods below are here for convenience and dispatch the
14281424
// appropriate method depending on value of the given VerifyOption
14291425
// parameter. The values for that parameter, and their meanings,

src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -54,7 +54,12 @@ void G1CollectionSetCandidates::verify() const {
5454
for (; idx < _num_regions; idx++) {
5555
HeapRegion *cur = _regions[idx];
5656
guarantee(cur != NULL, "Regions after _front_idx %u cannot be NULL but %u is", _front_idx, idx);
57-
guarantee(G1CollectionSetChooser::should_add(cur), "Region %u should be eligible for addition.", cur->hrm_index());
57+
// The first disjunction filters out regions with objects that were explicitly
58+
// pinned after being added to the collection set candidates. Archive regions
59+
// should never have been added to the collection set though.
60+
guarantee((cur->is_pinned() && !cur->is_archive()) ||
61+
G1CollectionSetChooser::should_add(cur),
62+
"Region %u should be eligible for addition.", cur->hrm_index());
5863
if (prev != NULL) {
5964
guarantee(prev->gc_efficiency() >= cur->gc_efficiency(),
6065
"GC efficiency for region %u: %1.4f smaller than for region %u: %1.4f",

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -675,9 +675,9 @@ void G1ConcurrentMark::cleanup_for_next_mark() {
675675
guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
676676
}
677677

678-
void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
678+
void G1ConcurrentMark::clear_next_bitmap(WorkGang* workers) {
679679
assert_at_safepoint_on_vm_thread();
680-
clear_bitmap(_prev_mark_bitmap, workers, false);
680+
clear_bitmap(_next_mark_bitmap, workers, false);
681681
}
682682

683683
class NoteStartOfMarkHRClosure : public HeapRegionClosure {
@@ -1132,6 +1132,8 @@ void G1ConcurrentMark::remark() {
11321132

11331133
// Install newly created mark bitmap as "prev".
11341134
swap_mark_bitmaps();
1135+
1136+
_g1h->collector_state()->set_clearing_next_bitmap(true);
11351137
{
11361138
GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm);
11371139

@@ -1696,7 +1698,6 @@ void G1ConcurrentMark::swap_mark_bitmaps() {
16961698
G1CMBitMap* temp = _prev_mark_bitmap;
16971699
_prev_mark_bitmap = _next_mark_bitmap;
16981700
_next_mark_bitmap = temp;
1699-
_g1h->collector_state()->set_clearing_next_bitmap(true);
17001701
}
17011702

17021703
// Closure for marking entries in SATB buffers.
@@ -1975,7 +1976,7 @@ void G1ConcurrentMark::concurrent_cycle_abort() {
19751976
// concurrent bitmap clearing.
19761977
{
19771978
GCTraceTime(Debug, gc) debug("Clear Next Bitmap");
1978-
clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
1979+
clear_next_bitmap(_g1h->workers());
19791980
}
19801981
// Note we cannot clear the previous marking bitmap here
19811982
// since VerifyDuringGC verifies the objects marked during

0 commit comments

Comments
 (0)