Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
8275582: Don't purge metaspace mapping lists
Reviewed-by: coleenp, lkorinth
  • Loading branch information
tstuefe committed Oct 26, 2021
1 parent 10e1610 commit 3ff085e
Show file tree
Hide file tree
Showing 9 changed files with 19 additions and 162 deletions.
24 changes: 5 additions & 19 deletions src/hotspot/share/memory/metaspace/chunkManager.cpp
Expand Up @@ -316,24 +316,11 @@ void ChunkManager::purge() {

const size_t reserved_before = _vslist->reserved_words();
const size_t committed_before = _vslist->committed_words();
int num_nodes_purged = 0;

// We purge to return unused memory to the Operating System. We do this in
// two independent steps.

// 1) We purge the virtual space list: any memory mappings which are
// completely deserted can be potentially unmapped. We iterate over the list
// of mappings (VirtualSpaceList::purge) and delete every node whose memory
// only contains free chunks. Deleting that node includes unmapping its memory,
// so all chunk vanish automatically.
// Of course we need to remove the chunk headers of those vanished chunks from
// the ChunkManager freelist.
num_nodes_purged = _vslist->purge(&_chunks);
InternalStats::inc_num_purges();

// 2) Since (1) is rather ineffective - it is rare that a whole node only contains
// free chunks - we now iterate over all remaining free chunks and
// and uncommit those which can be uncommitted (>= commit granule size).

// We return unused memory to the Operating System: we iterate over all
// free chunks and uncommit the backing memory of those large enough to
// contain one or multiple commit granules (chunks larger than a granule
// always cover a whole number of granules and start at a granule boundary).
if (Settings::uncommit_free_chunks()) {
const chunklevel_t max_level =
chunklevel::level_fitting_word_size(Settings::commit_granule_words());
Expand Down Expand Up @@ -365,7 +352,6 @@ void ChunkManager::purge() {
ls.print("committed: ");
print_word_size_delta(&ls, committed_before, committed_after);
ls.cr();
ls.print_cr("full nodes purged: %d", num_nodes_purged);
}
}
DEBUG_ONLY(_vslist->verify_locked());
Expand Down
3 changes: 0 additions & 3 deletions src/hotspot/share/memory/metaspace/internalStats.hpp
Expand Up @@ -92,9 +92,6 @@ class InternalStats : public AllStatic {
/* Number of chunk in place enlargements */ \
x(num_chunks_enlarged) \
\
/* Number of times we did a purge */ \
x(num_purges) \
\
/* Number of times we read inconsistent stats. */ \
x(num_inconsistent_stats) \

Expand Down
10 changes: 0 additions & 10 deletions src/hotspot/share/memory/metaspace/rootChunkArea.cpp
Expand Up @@ -481,16 +481,6 @@ RootChunkAreaLUT::~RootChunkAreaLUT() {
FREE_C_HEAP_ARRAY(RootChunkArea, _arr);
}

// Returns true if all areas in this area table are free (only contain free chunks).
bool RootChunkAreaLUT::is_free() const {
for (int i = 0; i < _num; i++) {
if (!_arr[i].is_free()) {
return false;
}
}
return true;
}

#ifdef ASSERT

void RootChunkAreaLUT::verify() const {
Expand Down
12 changes: 0 additions & 12 deletions src/hotspot/share/memory/metaspace/rootChunkArea.hpp
Expand Up @@ -107,10 +107,6 @@ class RootChunkArea {
size_t word_size() const { return chunklevel::MAX_CHUNK_WORD_SIZE; }
const MetaWord* end() const { return _base + word_size(); }

// Direct access to the first chunk (use with care)
Metachunk* first_chunk() { return _first_chunk; }
const Metachunk* first_chunk() const { return _first_chunk; }

// Returns true if this root chunk area is completely free:
// In that case, it should only contain one chunk (maximally merged, so a root chunk)
// and it should be free.
Expand Down Expand Up @@ -182,20 +178,12 @@ class RootChunkAreaLUT {
return _arr + idx;
}

// Access area by its index
int number_of_areas() const { return _num; }
RootChunkArea* get_area_by_index(int index) { assert(index >= 0 && index < _num, "oob"); return _arr + index; }
const RootChunkArea* get_area_by_index(int index) const { assert(index >= 0 && index < _num, "oob"); return _arr + index; }

/// range ///

const MetaWord* base() const { return _base; }
size_t word_size() const { return _num * chunklevel::MAX_CHUNK_WORD_SIZE; }
const MetaWord* end() const { return _base + word_size(); }

// Returns true if all areas in this area table are free (only contain free chunks).
bool is_free() const;

DEBUG_ONLY(void verify() const;)

void print_on(outputStream* st) const;
Expand Down
37 changes: 0 additions & 37 deletions src/hotspot/share/memory/metaspace/virtualSpaceList.cpp
Expand Up @@ -134,43 +134,6 @@ Metachunk* VirtualSpaceList::allocate_root_chunk() {
return c;
}

// Attempts to purge nodes. This will remove and delete nodes which only contain free chunks.
// The free chunks are removed from the freelists before the nodes are deleted.
// Return number of purged nodes.
int VirtualSpaceList::purge(FreeChunkListVector* freelists) {
assert_lock_strong(Metaspace_lock);
UL(debug, "purging.");

VirtualSpaceNode* vsn = _first_node;
VirtualSpaceNode* prev_vsn = NULL;
int num = 0, num_purged = 0;
while (vsn != NULL) {
VirtualSpaceNode* next_vsn = vsn->next();
bool purged = vsn->attempt_purge(freelists);
if (purged) {
// Note: from now on do not dereference vsn!
UL2(debug, "purged node @" PTR_FORMAT ".", p2i(vsn));
if (_first_node == vsn) {
_first_node = next_vsn;
}
DEBUG_ONLY(vsn = (VirtualSpaceNode*)((uintptr_t)(0xdeadbeef));)
if (prev_vsn != NULL) {
prev_vsn->set_next(next_vsn);
}
num_purged++;
_nodes_counter.decrement();
} else {
prev_vsn = vsn;
}
vsn = next_vsn;
num ++;
}

UL2(debug, "purged %d nodes (before: %d, now: %d)",
num_purged, num, num_nodes());
return num_purged;
}

// Print all nodes in this space list.
void VirtualSpaceList::print_on(outputStream* st) const {
MutexLocker fcl(Metaspace_lock, Mutex::_no_safepoint_check_flag);
Expand Down
9 changes: 0 additions & 9 deletions src/hotspot/share/memory/metaspace/virtualSpaceList.hpp
Expand Up @@ -48,10 +48,6 @@ class FreeChunkListVector;
// managing a single contiguous memory region. The first node of
// this list is the current node and used for allocation of new
// root chunks.
//
// Beyond access to those nodes and the ability to grow new nodes
// (if expandable) it allows for purging: purging this list means
// removing and unmapping all memory regions which are unused.

class VirtualSpaceList : public CHeapObj<mtClass> {

Expand Down Expand Up @@ -101,11 +97,6 @@ class VirtualSpaceList : public CHeapObj<mtClass> {
// the list cannot be expanded (in practice this means we reached CompressedClassSpaceSize).
Metachunk* allocate_root_chunk();

// Attempts to purge nodes. This will remove and delete nodes which only contain free chunks.
// The free chunks are removed from the freelists before the nodes are deleted.
// Return number of purged nodes.
int purge(FreeChunkListVector* freelists);

//// Statistics ////

// Return sum of reserved words in all nodes.
Expand Down
42 changes: 0 additions & 42 deletions src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp
Expand Up @@ -369,48 +369,6 @@ bool VirtualSpaceNode::attempt_enlarge_chunk(Metachunk* c, FreeChunkListVector*
return rc;
}

// Attempts to purge the node:
//
// If all chunks living in this node are free, they will all be removed from
// the freelist they currently reside in. Then, the node will be deleted.
//
// Returns true if the node has been deleted, false if not.
// !! If this returns true, do not access the node from this point on. !!
bool VirtualSpaceNode::attempt_purge(FreeChunkListVector* freelists) {
assert_lock_strong(Metaspace_lock);

if (!_owns_rs) {
// We do not allow purging of nodes if we do not own the
// underlying ReservedSpace (CompressClassSpace case).
return false;
}

// First find out if all areas are empty. Since empty chunks collapse to root chunk
// size, if all chunks in this node are free root chunks we are good to go.
if (!_root_chunk_area_lut.is_free()) {
return false;
}

UL(debug, ": purging.");

// Okay, we can purge. Before we can do this, we need to remove all chunks from the freelist.
for (int narea = 0; narea < _root_chunk_area_lut.number_of_areas(); narea++) {
RootChunkArea* ra = _root_chunk_area_lut.get_area_by_index(narea);
Metachunk* c = ra->first_chunk();
if (c != NULL) {
UL2(trace, "removing chunk from to-be-purged node: "
METACHUNK_FULL_FORMAT ".", METACHUNK_FULL_FORMAT_ARGS(c));
assert(c->is_free() && c->is_root_chunk(), "Sanity");
freelists->remove(c);
}
}

// Now, delete the node, then right away return since this object is invalid.
delete this;

return true;
}

void VirtualSpaceNode::print_on(outputStream* st) const {
size_t scale = K;

Expand Down
9 changes: 0 additions & 9 deletions src/hotspot/share/memory/metaspace/virtualSpaceNode.hpp
Expand Up @@ -208,15 +208,6 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
// On success, true is returned, false otherwise.
bool attempt_enlarge_chunk(Metachunk* c, FreeChunkListVector* freelists);

// Attempts to purge the node:
//
// If all chunks living in this node are free, they will all be removed from
// the freelist they currently reside in. Then, the node will be deleted.
//
// Returns true if the node has been deleted, false if not.
// !! If this returns true, do not access the node from this point on. !!
bool attempt_purge(FreeChunkListVector* freelists);

// Attempts to uncommit free areas according to the rules set in settings.
// Returns number of words uncommitted.
size_t uncommit_free_areas();
Expand Down
Expand Up @@ -59,40 +59,33 @@ protected void stopAllThreads() throws InterruptedException {

void destroyArenasAndPurgeSpace() {

// This deletes the arenas, which will cause them to return all their accumulated
// metaspace chunks into the context' chunk manager (freelist) before vanishing.
// It then purges the context.
// We may return memory to the operating system:
// - with -XX:MetaspaceReclaimPolicy=balanced|aggressive (balanced is the default),
// we will scourge the freelist for chunks larger than a commit granule, and uncommit
// their backing memory. Note that since we deleted all arenas, all their chunks are
// in the freelist, should have been maximally folded by the buddy allocator, and
// therefore should all be eligible for uncommitting. Meaning the context should
// retain no memory at all, its committed counter should be zero.
// - with -XX:MetaspaceReclaimPolicy=none, we omit the purging and retain memory in the
// metaspace allocator, so the context should retain its memory.

for (RandomAllocatorThread t: threads) {
if (t.allocator.arena.isLive()) {
context.destroyArena(t.allocator.arena);
}
}

context.checkStatistics();

// After deleting all arenas, we should have no committed space left: all arena chunks have been returned to
// the freelist amd should have been maximally merged to a bunch of root chunks, which should be uncommitted
// in one go.
// Exception: if reclamation policy is none.
if (Settings.settings().doesReclaim()) {
if (context.committedWords() > 0) {
throw new RuntimeException("Expected no committed words after purging empty metaspace context (was: " + context.committedWords() + ")");
}
}

context.purge();

context.checkStatistics();

// After purging - if all arenas had been deleted before - we should have no committed space left even in
// recmalation=none mode:
// purging deletes all nodes with only free chunks, and in this case no node should still house in-use chunks,
// so all nodes would have been unmapped.
// This is independent on reclamation policy. Only one exception: if the area was created with a reserve limit
// (mimicking compressed class space), the underlying virtual space list cannot be purged.
if (context.reserveLimit == 0) {
if (Settings.settings().doesReclaim()) {
if (context.committedWords() > 0) {
throw new RuntimeException("Expected no committed words after purging empty metaspace context (was: " + context.committedWords() + ")");
}
}

}

@Override
Expand Down

3 comments on commit 3ff085e

@openjdk-notifier
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@tstuefe
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

/backport jdk17u-dev

@openjdk
Copy link

@openjdk openjdk bot commented on 3ff085e Jan 10, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@tstuefe the backport was successfully created on the branch tstuefe-backport-3ff085e2 in my personal fork of openjdk/jdk17u-dev. To create a pull request with this backport targeting openjdk/jdk17u-dev:master, just click the following link:

➡️ Create pull request

The title of the pull request is automatically filled in correctly and below you find a suggestion for the pull request body:

Hi all,

This pull request contains a backport of commit 3ff085e2 from the openjdk/jdk repository.

The commit being backported was authored by Thomas Stuefe on 26 Oct 2021 and was reviewed by Coleen Phillimore and Leo Korinth.

Thanks!

If you need to update the source branch of the pull then run the following commands in a local clone of your personal fork of openjdk/jdk17u-dev:

$ git fetch https://github.com/openjdk-bots/jdk17u-dev tstuefe-backport-3ff085e2:tstuefe-backport-3ff085e2
$ git checkout tstuefe-backport-3ff085e2
# make changes
$ git add paths/to/changed/files
$ git commit --message 'Describe additional changes made'
$ git push https://github.com/openjdk-bots/jdk17u-dev tstuefe-backport-3ff085e2

Please sign in to comment.