Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 1 addition & 6 deletions src/hotspot/share/nmt/memBaseline.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -151,11 +151,6 @@ bool MemBaseline::baseline_allocation_sites() {
return false;
}

// Walk simple thread stacks
if (!ThreadStackTracker::walk_simple_thread_stack_site(&malloc_walker)) {
return false;
}

_malloc_sites.move(malloc_walker.malloc_sites());
// The malloc sites are collected in size order
_malloc_sites_order = by_size;
Expand Down
61 changes: 18 additions & 43 deletions src/hotspot/share/nmt/memReporter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -193,17 +193,10 @@ void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag,

// Count thread's native stack in "Thread" category
if (flag == mtThread) {
if (ThreadStackTracker::track_as_vm()) {
const VirtualMemory* thread_stack_usage =
(const VirtualMemory*)_vm_snapshot->by_type(mtThreadStack);
reserved_amount += thread_stack_usage->reserved();
committed_amount += thread_stack_usage->committed();
} else {
const MallocMemory* thread_stack_usage =
(const MallocMemory*)_malloc_snapshot->by_type(mtThreadStack);
reserved_amount += thread_stack_usage->malloc_size();
committed_amount += thread_stack_usage->malloc_size();
}
const VirtualMemory* thread_stack_usage =
(const VirtualMemory*)_vm_snapshot->by_type(mtThreadStack);
reserved_amount += thread_stack_usage->reserved();
committed_amount += thread_stack_usage->committed();
} else if (flag == mtNMT) {
// Count malloc headers in "NMT" category
reserved_amount += _malloc_snapshot->malloc_overhead();
Expand Down Expand Up @@ -240,21 +233,12 @@ void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag,
out->print_cr("%27s ( instance classes #" SIZE_FORMAT ", array classes #" SIZE_FORMAT ")",
" ", _instance_class_count, _array_class_count);
} else if (flag == mtThread) {
if (ThreadStackTracker::track_as_vm()) {
const VirtualMemory* thread_stack_usage =
_vm_snapshot->by_type(mtThreadStack);
// report thread count
out->print_cr("%27s (threads #" SIZE_FORMAT ")", " ", ThreadStackTracker::thread_count());
out->print("%27s (stack: ", " ");
print_total(thread_stack_usage->reserved(), thread_stack_usage->committed(), thread_stack_usage->peak_size());
} else {
MallocMemory* thread_stack_memory = _malloc_snapshot->by_type(mtThreadStack);
const char* scale = current_scale();
// report thread count
out->print_cr("%27s (threads #" SIZE_FORMAT ")", " ", thread_stack_memory->malloc_count());
out->print("%27s (Stack: " SIZE_FORMAT "%s", " ",
amount_in_current_scale(thread_stack_memory->malloc_size()), scale);
}
const VirtualMemory* thread_stack_usage =
_vm_snapshot->by_type(mtThreadStack);
// report thread count
out->print_cr("%27s (threads #" SIZE_FORMAT ")", " ", ThreadStackTracker::thread_count());
out->print("%27s (stack: ", " ");
print_total(thread_stack_usage->reserved(), thread_stack_usage->committed(), thread_stack_usage->peak_size());
out->print_cr(")");
}

Expand Down Expand Up @@ -627,24 +611,15 @@ void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag,
out->print_cr(")");

out->print("%27s (stack: ", " ");
if (ThreadStackTracker::track_as_vm()) {
// report thread stack
const VirtualMemory* current_thread_stack =
_current_baseline.virtual_memory(mtThreadStack);
const VirtualMemory* early_thread_stack =
_early_baseline.virtual_memory(mtThreadStack);

print_virtual_memory_diff(current_thread_stack->reserved(), current_thread_stack->committed(),
early_thread_stack->reserved(), early_thread_stack->committed());
} else {
const MallocMemory* current_thread_stack =
_current_baseline.malloc_memory(mtThreadStack);
const MallocMemory* early_thread_stack =
_early_baseline.malloc_memory(mtThreadStack);
// report thread stack
const VirtualMemory* current_thread_stack =
_current_baseline.virtual_memory(mtThreadStack);
const VirtualMemory* early_thread_stack =
_early_baseline.virtual_memory(mtThreadStack);

print_virtual_memory_diff(current_thread_stack->reserved(), current_thread_stack->committed(),
early_thread_stack->reserved(), early_thread_stack->committed());

print_malloc_diff(current_thread_stack->malloc_size(), current_thread_stack->malloc_count(),
early_thread_stack->malloc_size(), early_thread_stack->malloc_count(), flag);
}
out->print_cr(")");
}

Expand Down
3 changes: 1 addition & 2 deletions src/hotspot/share/nmt/memTracker.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,7 @@ void MemTracker::initialize() {

if (level > NMT_off) {
if (!MallocTracker::initialize(level) ||
!VirtualMemoryTracker::initialize(level) ||
!ThreadStackTracker::initialize(level)) {
!VirtualMemoryTracker::initialize(level)) {
assert(false, "NMT initialization failed");
level = NMT_off;
log_warning(nmt)("NMT initialization failed. NMT disabled.");
Expand Down
14 changes: 6 additions & 8 deletions src/hotspot/share/nmt/nmtUsage.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -43,13 +43,11 @@ NMTUsage::NMTUsage(NMTUsageOptions options) :
_usage_options(options) { }

void NMTUsage::walk_thread_stacks() {
// If backed by virtual memory, snapping the thread stacks involves walking
// them to to figure out how much memory is committed if they are backed by
// virtual memory. This needs ot happen before we take the snapshot of the
// virtual memory since it will update this information.
if (ThreadStackTracker::track_as_vm()) {
VirtualMemoryTracker::snapshot_thread_stacks();
}
// Snapping the thread stacks involves walking the areas to figure out how
// much memory had been committed if they are backed by virtual memory. This
// needs to happen before we take the snapshot of the virtual memory since it
// will update this information.
VirtualMemoryTracker::snapshot_thread_stacks();
}

void NMTUsage::update_malloc_usage() {
Expand Down
96 changes: 24 additions & 72 deletions src/hotspot/share/nmt/threadStackTracker.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2019, 2021, Red Hat, Inc. All rights reserved.
* Copyright (c) 2019, 2024, Red Hat, Inc. All rights reserved.
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand All @@ -24,95 +25,46 @@

#include "precompiled.hpp"

#include "nmt/mallocTracker.hpp"
#include "nmt/memTracker.hpp"
#include "nmt/threadStackTracker.hpp"
#include "nmt/virtualMemoryTracker.hpp"
#include "runtime/os.hpp"
#include "runtime/threadCritical.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"

volatile size_t ThreadStackTracker::_thread_count = 0;
SortedLinkedList<SimpleThreadStackSite, ThreadStackTracker::compare_thread_stack_base>* ThreadStackTracker::_simple_thread_stacks = nullptr;

bool ThreadStackTracker::initialize(NMT_TrackingLevel level) {
if (level == NMT_detail && !track_as_vm()) {
_simple_thread_stacks = new (std::nothrow, mtNMT)
SortedLinkedList<SimpleThreadStackSite, ThreadStackTracker::compare_thread_stack_base>();
return (_simple_thread_stacks != nullptr);
}
return true;
}

int ThreadStackTracker::compare_thread_stack_base(const SimpleThreadStackSite& s1, const SimpleThreadStackSite& s2) {
return primitive_compare(s1.base(), s2.base());
static void align_thread_stack_boundaries_inward(void*& base, size_t& size) {
// Thread stack boundaries don't have to be aligned to page boundaries. For cases where they
// are not aligned (e.g. AIX, Alpine), this function corrects boundaries inward to the next
// page boundaries. This ensures that we can track thread stacks piggybacking on the virtual
// memory tracker.
void* const base_aligned = align_up(base, os::vm_page_size());
const size_t size_aligned = align_down(size, os::vm_page_size());
assert(size_aligned > 0, "stack size less than a page?");
base = base_aligned;
size = size_aligned;
}

void ThreadStackTracker::new_thread_stack(void* base, size_t size, const NativeCallStack& stack) {
assert(MemTracker::tracking_level() >= NMT_summary, "Must be");
assert(MemTracker::enabled(), "Must be");
assert(base != nullptr, "Should have been filtered");
align_thread_stack_boundaries_inward(base, size);

ThreadCritical tc;
if (track_as_vm()) {
VirtualMemoryTracker::add_reserved_region((address)base, size, stack, mtThreadStack);
} else {
// Use a slot in mallocMemorySummary for thread stack bookkeeping
MallocMemorySummary::record_malloc(size, mtThreadStack);
if (MemTracker::tracking_level() == NMT_detail) {
assert(_simple_thread_stacks != nullptr, "Must be initialized");
SimpleThreadStackSite site((address)base, size, stack);
_simple_thread_stacks->add(site);
}
}
VirtualMemoryTracker::add_reserved_region((address)base, size, stack, mtThreadStack);
_thread_count++;
}

void ThreadStackTracker::delete_thread_stack(void* base, size_t size) {
assert(MemTracker::tracking_level() >= NMT_summary, "Must be");
assert(MemTracker::enabled(), "Must be");
assert(base != nullptr, "Should have been filtered");
align_thread_stack_boundaries_inward(base, size);

ThreadCritical tc;
if(track_as_vm()) {
VirtualMemoryTracker::remove_released_region((address)base, size);
} else {
// Use a slot in mallocMemorySummary for thread stack bookkeeping
MallocMemorySummary::record_free(size, mtThreadStack);
if (MemTracker::tracking_level() == NMT_detail) {
assert(_simple_thread_stacks != nullptr, "Must be initialized");
SimpleThreadStackSite site((address)base, size, NativeCallStack::empty_stack()); // Fake object just to serve as compare target for delete
bool removed = _simple_thread_stacks->remove(site);
assert(removed, "Must exist");
}
}
VirtualMemoryTracker::remove_released_region((address)base, size);
_thread_count--;
}

bool ThreadStackTracker::walk_simple_thread_stack_site(MallocSiteWalker* walker) {
if (!track_as_vm()) {
LinkedListImpl<MallocSite> _sites;
{
ThreadCritical tc;
assert(_simple_thread_stacks != nullptr, "Must be initialized");
LinkedListIterator<SimpleThreadStackSite> itr(_simple_thread_stacks->head());
const SimpleThreadStackSite* ts = itr.next();
// Consolidate sites and convert to MallocSites, so we can piggyback into
// malloc snapshot
while (ts != nullptr) {
MallocSite site(*ts->call_stack(), mtThreadStack);
MallocSite* exist = _sites.find(site);
if (exist != nullptr) {
exist->allocate(ts->size());
} else {
site.allocate(ts->size());
_sites.add(site);
}
ts = itr.next();
}
}

// Piggyback to malloc snapshot
LinkedListIterator<MallocSite> site_itr(_sites.head());
const MallocSite* s = site_itr.next();
while (s != nullptr) {
walker->do_malloc_site(s);
s = site_itr.next();
}
}
return true;
}
51 changes: 4 additions & 47 deletions src/hotspot/share/nmt/threadStackTracker.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2019, 2021, Red Hat, Inc. All rights reserved.
* Copyright (c) 2019, 2024, Red Hat, Inc. All rights reserved.
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand All @@ -25,61 +26,17 @@
#ifndef SHARE_NMT_THREADSTACKTRACKER_HPP
#define SHARE_NMT_THREADSTACKTRACKER_HPP

#include "nmt/allocationSite.hpp"
#include "nmt/mallocSiteTable.hpp"
#include "nmt/nmtCommon.hpp"
#include "utilities/linkedlist.hpp"
#include "memory/allStatic.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/nativeCallStack.hpp"

class SimpleThreadStackSite : public AllocationSite {
const address _base;
const size_t _size;
public:
SimpleThreadStackSite(address base, size_t size, const NativeCallStack& stack) :
AllocationSite(stack, mtThreadStack),
_base(base),
_size(size) {}

bool equals(const SimpleThreadStackSite& mts) const {
bool eq = base() == mts.base();
assert(!eq || size() == mts.size(), "Must match");
return eq;
}

size_t size() const { return _size; }
address base() const { return _base; }
};

/*
* Most of platforms, that hotspot support, have their thread stacks backed by
* virtual memory by default. For these cases, thread stack tracker simply
* delegates tracking to virtual memory tracker.
* However, there are exceptions, (e.g. AIX), that platforms can provide stacks
* that are not page aligned. A hypothetical VM implementation, it can provide
* it own stacks. In these case, track_as_vm() should return false and manage
* stack tracking by this tracker internally.
* During memory snapshot, tracked thread stacks memory data is walked and stored
* along with malloc'd data inside baseline. The regions are not scanned and assumed
* all committed for now. Can add scanning phase when there is a need.
*/
class ThreadStackTracker : AllStatic {
private:
static volatile size_t _thread_count;

static int compare_thread_stack_base(const SimpleThreadStackSite& s1, const SimpleThreadStackSite& s2);
static SortedLinkedList<SimpleThreadStackSite, compare_thread_stack_base>* _simple_thread_stacks;
public:
static bool initialize(NMT_TrackingLevel level);

static void new_thread_stack(void* base, size_t size, const NativeCallStack& stack);
static void delete_thread_stack(void* base, size_t size);

static bool track_as_vm() { return AIX_ONLY(false) NOT_AIX(true); }
static size_t thread_count() { return _thread_count; }

// Snapshot support. Piggyback thread stack data in malloc slot, NMT always handles
// thread stack slot specially since beginning.
static bool walk_simple_thread_stack_site(MallocSiteWalker* walker);
};

#endif // SHARE_NMT_THREADSTACKTRACKER_HPP
Expand Down
9 changes: 3 additions & 6 deletions src/hotspot/share/nmt/virtualMemoryTracker.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -47,11 +47,8 @@ void VirtualMemory::update_peak(size_t size) {
}

void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
// Only if thread stack is backed by virtual memory
if (ThreadStackTracker::track_as_vm()) {
// Snapshot current thread stacks
VirtualMemoryTracker::snapshot_thread_stacks();
}
// Snapshot current thread stacks
VirtualMemoryTracker::snapshot_thread_stacks();
as_snapshot()->copy_to(s);
}

Expand Down