Skip to content

Commit

Permalink
8253600: G1: Fully support pinned regions for full gc
Browse files Browse the repository at this point in the history
Reviewed-by: sjohanss, ayang
  • Loading branch information
Thomas Schatzl committed Nov 10, 2020
1 parent 97d6e4a commit 6555996
Show file tree
Hide file tree
Showing 31 changed files with 368 additions and 295 deletions.
12 changes: 1 addition & 11 deletions src/hotspot/share/gc/g1/g1Allocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -398,15 +398,8 @@ size_t G1PLABAllocator::undo_waste() const {
return result;
}

bool G1ArchiveAllocator::_archive_check_enabled = false;
G1ArchiveRegionMap G1ArchiveAllocator::_archive_region_map;

G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h, bool open) {
// Create the archive allocator, and also enable archive object checking
// in mark-sweep, since we will be creating archive regions.
G1ArchiveAllocator* result = new G1ArchiveAllocator(g1h, open);
enable_archive_object_check();
return result;
return new G1ArchiveAllocator(g1h, open);
}

bool G1ArchiveAllocator::alloc_new_region() {
Expand Down Expand Up @@ -434,9 +427,6 @@ bool G1ArchiveAllocator::alloc_new_region() {
_bottom = hr->bottom();
_max = _bottom + HeapRegion::min_region_size_in_words();

// Tell mark-sweep that objects in this region are not to be marked.
set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), _open);

// Since we've modified the old set, call update_sizes.
_g1h->g1mm()->update_sizes();
return true;
Expand Down
40 changes: 0 additions & 40 deletions src/hotspot/share/gc/g1/g1Allocator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -192,19 +192,6 @@ class G1PLABAllocator : public CHeapObj<mtGC> {
void undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz, uint node_index);
};

// G1ArchiveRegionMap is an array used to mark G1 regions as
// archive regions. This allows a quick check for whether an object
// should not be marked because it is in an archive region.
class G1ArchiveRegionMap : public G1BiasedMappedArray<uint8_t> {
public:
static const uint8_t NoArchive = 0;
static const uint8_t OpenArchive = 1;
static const uint8_t ClosedArchive = 2;

protected:
uint8_t default_value() const { return NoArchive; }
};

// G1ArchiveAllocator is used to allocate memory in archive
// regions. Such regions are not scavenged nor compacted by GC.
// There are two types of archive regions, which are
Expand Down Expand Up @@ -278,33 +265,6 @@ class G1ArchiveAllocator : public CHeapObj<mtGC> {
void clear_used() {
_summary_bytes_used = 0;
}

// Create the _archive_region_map which is used to identify archive objects.
static inline void enable_archive_object_check();

// Mark regions containing the specified address range as archive/non-archive.
static inline void set_range_archive(MemRegion range, bool open);
static inline void clear_range_archive(MemRegion range);

// Check if the object is in closed archive
static inline bool is_closed_archive_object(oop object);
// Check if the object is in open archive
static inline bool is_open_archive_object(oop object);
// Check if the object is either in closed archive or open archive
static inline bool is_archived_object(oop object);

private:
static bool _archive_check_enabled;
static G1ArchiveRegionMap _archive_region_map;

// Check if an object is in a closed archive region using the _closed_archive_region_map.
static inline bool in_closed_archive_range(oop object);
// Check if an object is in open archive region using the _open_archive_region_map.
static inline bool in_open_archive_range(oop object);

// Check if archive object checking is enabled, to avoid calling in_open/closed_archive_range
// unnecessarily.
static inline bool archive_check_enabled();
};

#endif // SHARE_GC_G1_G1ALLOCATOR_HPP
59 changes: 0 additions & 59 deletions src/hotspot/share/gc/g1/g1Allocator.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -119,63 +119,4 @@ inline HeapWord* G1PLABAllocator::allocate(G1HeapRegionAttr dest,
return allocate_direct_or_new_plab(dest, word_sz, refill_failed, node_index);
}

// Create the maps which is used to identify archive objects.
inline void G1ArchiveAllocator::enable_archive_object_check() {
if (_archive_check_enabled) {
return;
}

_archive_check_enabled = true;
_archive_region_map.initialize(G1CollectedHeap::heap()->reserved(),
HeapRegion::GrainBytes);
}

// Set the regions containing the specified address range as archive.
inline void G1ArchiveAllocator::set_range_archive(MemRegion range, bool open) {
assert(_archive_check_enabled, "archive range check not enabled");
log_info(gc, cds)("Mark %s archive regions in map: [" PTR_FORMAT ", " PTR_FORMAT "]",
open ? "open" : "closed",
p2i(range.start()),
p2i(range.last()));
uint8_t const value = open ? G1ArchiveRegionMap::OpenArchive : G1ArchiveRegionMap::ClosedArchive;
_archive_region_map.set_by_address(range, value);
}

// Clear the archive regions map containing the specified address range.
inline void G1ArchiveAllocator::clear_range_archive(MemRegion range) {
assert(_archive_check_enabled, "archive range check not enabled");
log_info(gc, cds)("Clear archive regions in map: [" PTR_FORMAT ", " PTR_FORMAT "]",
p2i(range.start()),
p2i(range.last()));
_archive_region_map.set_by_address(range, G1ArchiveRegionMap::NoArchive);
}

// Check if an object is in a closed archive region using the _archive_region_map.
inline bool G1ArchiveAllocator::in_closed_archive_range(oop object) {
return _archive_region_map.get_by_address(cast_from_oop<HeapWord*>(object)) == G1ArchiveRegionMap::ClosedArchive;
}

inline bool G1ArchiveAllocator::in_open_archive_range(oop object) {
return _archive_region_map.get_by_address(cast_from_oop<HeapWord*>(object)) == G1ArchiveRegionMap::OpenArchive;
}

// Check if archive object checking is enabled, to avoid calling in_open/closed_archive_range
// unnecessarily.
inline bool G1ArchiveAllocator::archive_check_enabled() {
return _archive_check_enabled;
}

inline bool G1ArchiveAllocator::is_closed_archive_object(oop object) {
return (archive_check_enabled() && in_closed_archive_range(object));
}

inline bool G1ArchiveAllocator::is_open_archive_object(oop object) {
return (archive_check_enabled() && in_open_archive_range(object));
}

inline bool G1ArchiveAllocator::is_archived_object(oop object) {
return archive_check_enabled() &&
(_archive_region_map.get_by_address(cast_from_oop<HeapWord*>(object)) != G1ArchiveRegionMap::NoArchive);
}

#endif // SHARE_GC_G1_G1ALLOCATOR_INLINE_HPP
80 changes: 20 additions & 60 deletions src/hotspot/share/gc/g1/g1CollectedHeap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -570,10 +570,6 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
// when mmap'ing archived heap data in, so pre-touching is wasted.
FlagSetting fs(AlwaysPreTouch, false);

// Enable archive object checking used by G1MarkSweep. We have to let it know
// about each archive range, so that objects in those ranges aren't marked.
G1ArchiveAllocator::enable_archive_object_check();

// For each specified MemRegion range, allocate the corresponding G1
// regions and mark them as archive regions. We expect the ranges
// in ascending starting address order, without overlap.
Expand Down Expand Up @@ -649,9 +645,6 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
curr_region->set_top(top);
curr_region = next_region;
}

// Notify mark-sweep of the archive
G1ArchiveAllocator::set_range_archive(curr_range, open);
}
return true;
}
Expand Down Expand Up @@ -802,9 +795,6 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
_hrm->shrink_at(curr_index, 1);
uncommitted_regions++;
}

// Notify mark-sweep that this is no longer an archive range.
G1ArchiveAllocator::clear_range_archive(ranges[i]);
}

if (uncommitted_regions != 0) {
Expand All @@ -815,8 +805,7 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
}

oop G1CollectedHeap::materialize_archived_object(oop obj) {
assert(obj != NULL, "archived obj is NULL");
assert(G1ArchiveAllocator::is_archived_object(obj), "must be archived object");
assert(is_archived_object(obj), "not an archived obj");

// Loading an archived object makes it strongly reachable. If it is
// loaded during concurrent marking, it must be enqueued to the SATB
Expand Down Expand Up @@ -1016,8 +1005,7 @@ void G1CollectedHeap::prepare_heap_for_full_collection() {
// after this full GC.
abandon_collection_set(collection_set());

tear_down_region_sets(false /* free_list_only */);

hrm()->remove_all_free_regions();
hrm()->prepare_for_full_collection_start();
}

Expand Down Expand Up @@ -1073,17 +1061,7 @@ void G1CollectedHeap::verify_after_full_collection() {
_hrm->verify_optional();
_verifier->verify_region_sets_optional();
_verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
// Clear the previous marking bitmap, if needed for bitmap verification.
// Note we cannot do this when we clear the next marking bitmap in
// G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
// objects marked during a full GC against the previous bitmap.
// But we need to clear it before calling check_bitmaps below since
// the full GC has compacted objects and updated TAMS but not updated
// the prev bitmap.
if (G1VerifyBitmaps) {
GCTraceTime(Debug, gc) tm("Clear Prev Bitmap for Verification");
_cm->clear_prev_bitmap(workers());
}

// This call implicitly verifies that the next bitmap is clear after Full GC.
_verifier->check_bitmaps("Full GC End");

Expand Down Expand Up @@ -1347,7 +1325,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
// Instead of tearing down / rebuilding the free lists here, we
// could instead use the remove_all_pending() method on free_list to
// remove only the ones that we need to remove.
tear_down_region_sets(true /* free_list_only */);
hrm()->remove_all_free_regions();
shrink_helper(shrink_bytes);
rebuild_region_sets(true /* free_list_only */);

Expand Down Expand Up @@ -2395,6 +2373,10 @@ bool G1CollectedHeap::is_heterogeneous_heap() const {
return G1Arguments::is_heterogeneous_heap();
}

bool G1CollectedHeap::is_archived_object(oop object) const {
return object != NULL && heap_region_containing(object)->is_archive();
}

class PrintRegionClosure: public HeapRegionClosure {
outputStream* _st;
public:
Expand Down Expand Up @@ -4565,45 +4547,23 @@ bool G1CollectedHeap::check_young_list_empty() {

#endif // ASSERT

class TearDownRegionSetsClosure : public HeapRegionClosure {
HeapRegionSet *_old_set;

public:
TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }

bool do_heap_region(HeapRegion* r) {
if (r->is_old()) {
_old_set->remove(r);
} else if(r->is_young()) {
r->uninstall_surv_rate_group();
} else {
// We ignore free regions, we'll empty the free list afterwards.
// We ignore humongous and archive regions, we're not tearing down these
// sets.
assert(r->is_archive() || r->is_free() || r->is_humongous(),
"it cannot be another type");
}
return false;
}

~TearDownRegionSetsClosure() {
assert(_old_set->is_empty(), "post-condition");
}
};

void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
assert_at_safepoint_on_vm_thread();

if (!free_list_only) {
TearDownRegionSetsClosure cl(&_old_set);
heap_region_iterate(&cl);

// Remove the given HeapRegion from the appropriate region set.
void G1CollectedHeap::prepare_region_for_full_compaction(HeapRegion* hr) {
if (hr->is_old()) {
_old_set.remove(hr);
} else if (hr->is_young()) {
// Note that emptying the _young_list is postponed and instead done as
// the first step when rebuilding the regions sets again. The reason for
// this is that during a full GC string deduplication needs to know if
// a collected region was young or old when the full GC was initiated.
hr->uninstall_surv_rate_group();
} else {
// We ignore free regions, we'll empty the free list afterwards.
// We ignore humongous and archive regions, we're not tearing down these
// sets.
assert(hr->is_archive() || hr->is_free() || hr->is_humongous(),
"it cannot be another type");
}
_hrm->remove_all_free_regions();
}

void G1CollectedHeap::increase_used(size_t bytes) {
Expand Down
16 changes: 6 additions & 10 deletions src/hotspot/share/gc/g1/g1CollectedHeap.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -177,20 +177,14 @@ class G1CollectedHeap : public CollectedHeap {
// The block offset table for the G1 heap.
G1BlockOffsetTable* _bot;

// Tears down the region sets / lists so that they are empty and the
// regions on the heap do not belong to a region set / list. The
// only exception is the humongous set which we leave unaltered. If
// free_list_only is true, it will only tear down the master free
// list. It is called before a Full GC (free_list_only == false) or
// before heap shrinking (free_list_only == true).
void tear_down_region_sets(bool free_list_only);
public:
void prepare_region_for_full_compaction(HeapRegion* hr);

private:
// Rebuilds the region sets / lists so that they are repopulated to
// reflect the contents of the heap. The only exception is the
// humongous set which was not torn down in the first place. If
// free_list_only is true, it will only rebuild the master free
// list. It is called after a Full GC (free_list_only == false) or
// after heap shrinking (free_list_only == true).
// free_list_only is true, it will only rebuild the free list.
void rebuild_region_sets(bool free_list_only);

// Callback for region mapping changed events.
Expand Down Expand Up @@ -1424,6 +1418,8 @@ class G1CollectedHeap : public CollectedHeap {

virtual WorkGang* safepoint_workers() { return _workers; }

virtual bool is_archived_object(oop object) const;

// The methods below are here for convenience and dispatch the
// appropriate method depending on value of the given VerifyOption
// parameter. The values for that parameter, and their meanings,
Expand Down
9 changes: 7 additions & 2 deletions src/hotspot/share/gc/g1/g1CollectionSetCandidates.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -54,7 +54,12 @@ void G1CollectionSetCandidates::verify() const {
for (; idx < _num_regions; idx++) {
HeapRegion *cur = _regions[idx];
guarantee(cur != NULL, "Regions after _front_idx %u cannot be NULL but %u is", _front_idx, idx);
guarantee(G1CollectionSetChooser::should_add(cur), "Region %u should be eligible for addition.", cur->hrm_index());
// The first disjunction filters out regions with objects that were explicitly
// pinned after being added to the collection set candidates. Archive regions
// should never have been added to the collection set though.
guarantee((cur->is_pinned() && !cur->is_archive()) ||
G1CollectionSetChooser::should_add(cur),
"Region %u should be eligible for addition.", cur->hrm_index());
if (prev != NULL) {
guarantee(prev->gc_efficiency() >= cur->gc_efficiency(),
"GC efficiency for region %u: %1.4f smaller than for region %u: %1.4f",
Expand Down
9 changes: 5 additions & 4 deletions src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -675,9 +675,9 @@ void G1ConcurrentMark::cleanup_for_next_mark() {
guarantee(!_g1h->collector_state()->mark_or_rebuild_in_progress(), "invariant");
}

void G1ConcurrentMark::clear_prev_bitmap(WorkGang* workers) {
void G1ConcurrentMark::clear_next_bitmap(WorkGang* workers) {
assert_at_safepoint_on_vm_thread();
clear_bitmap(_prev_mark_bitmap, workers, false);
clear_bitmap(_next_mark_bitmap, workers, false);
}

class NoteStartOfMarkHRClosure : public HeapRegionClosure {
Expand Down Expand Up @@ -1132,6 +1132,8 @@ void G1ConcurrentMark::remark() {

// Install newly created mark bitmap as "prev".
swap_mark_bitmaps();

_g1h->collector_state()->set_clearing_next_bitmap(true);
{
GCTraceTime(Debug, gc, phases) debug("Update Remembered Set Tracking Before Rebuild", _gc_timer_cm);

Expand Down Expand Up @@ -1696,7 +1698,6 @@ void G1ConcurrentMark::swap_mark_bitmaps() {
G1CMBitMap* temp = _prev_mark_bitmap;
_prev_mark_bitmap = _next_mark_bitmap;
_next_mark_bitmap = temp;
_g1h->collector_state()->set_clearing_next_bitmap(true);
}

// Closure for marking entries in SATB buffers.
Expand Down Expand Up @@ -1975,7 +1976,7 @@ void G1ConcurrentMark::concurrent_cycle_abort() {
// concurrent bitmap clearing.
{
GCTraceTime(Debug, gc) debug("Clear Next Bitmap");
clear_bitmap(_next_mark_bitmap, _g1h->workers(), false);
clear_next_bitmap(_g1h->workers());
}
// Note we cannot clear the previous marking bitmap here
// since VerifyDuringGC verifies the objects marked during
Expand Down
Loading

1 comment on commit 6555996

@openjdk-notifier
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.