|
1 | 1 | /* |
2 | | - * Copyright (c) 2019, 2021, Red Hat, Inc. All rights reserved. |
| 2 | + * Copyright (c) 2019, 2024, Red Hat, Inc. All rights reserved. |
| 3 | + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. |
3 | 4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | 5 | * |
5 | 6 | * This code is free software; you can redistribute it and/or modify it |
|
24 | 25 |
|
25 | 26 | #include "precompiled.hpp" |
26 | 27 |
|
27 | | -#include "nmt/mallocTracker.hpp" |
28 | 28 | #include "nmt/memTracker.hpp" |
29 | 29 | #include "nmt/threadStackTracker.hpp" |
30 | 30 | #include "nmt/virtualMemoryTracker.hpp" |
| 31 | +#include "runtime/os.hpp" |
31 | 32 | #include "runtime/threadCritical.hpp" |
| 33 | +#include "utilities/align.hpp" |
| 34 | +#include "utilities/debug.hpp" |
| 35 | +#include "utilities/globalDefinitions.hpp" |
32 | 36 |
|
33 | 37 | volatile size_t ThreadStackTracker::_thread_count = 0; |
34 | | -SortedLinkedList<SimpleThreadStackSite, ThreadStackTracker::compare_thread_stack_base>* ThreadStackTracker::_simple_thread_stacks = nullptr; |
35 | 38 |
|
36 | | -bool ThreadStackTracker::initialize(NMT_TrackingLevel level) { |
37 | | - if (level == NMT_detail && !track_as_vm()) { |
38 | | - _simple_thread_stacks = new (std::nothrow, mtNMT) |
39 | | - SortedLinkedList<SimpleThreadStackSite, ThreadStackTracker::compare_thread_stack_base>(); |
40 | | - return (_simple_thread_stacks != nullptr); |
41 | | - } |
42 | | - return true; |
43 | | -} |
44 | | - |
45 | | -int ThreadStackTracker::compare_thread_stack_base(const SimpleThreadStackSite& s1, const SimpleThreadStackSite& s2) { |
46 | | - return primitive_compare(s1.base(), s2.base()); |
| 39 | +static void align_thread_stack_boundaries_inward(void*& base, size_t& size) { |
| 40 | + // Thread stack boundaries don't have to be aligned to page boundaries. For cases where they |
| 41 | + // are not aligned (e.g. AIX, Alpine), this function corrects boundaries inward to the next |
| 42 | + // page boundaries. This ensures that we can track thread stacks piggybacking on the virtual |
| 43 | + // memory tracker. |
| 44 | + void* const base_aligned = align_up(base, os::vm_page_size()); |
| 45 | + const size_t size_aligned = align_down(size, os::vm_page_size()); |
| 46 | + assert(size_aligned > 0, "stack size less than a page?"); |
| 47 | + base = base_aligned; |
| 48 | + size = size_aligned; |
47 | 49 | } |
48 | 50 |
|
49 | 51 | void ThreadStackTracker::new_thread_stack(void* base, size_t size, const NativeCallStack& stack) { |
50 | | - assert(MemTracker::tracking_level() >= NMT_summary, "Must be"); |
| 52 | + assert(MemTracker::enabled(), "Must be"); |
51 | 53 | assert(base != nullptr, "Should have been filtered"); |
| 54 | + align_thread_stack_boundaries_inward(base, size); |
| 55 | + |
52 | 56 | ThreadCritical tc; |
53 | | - if (track_as_vm()) { |
54 | | - VirtualMemoryTracker::add_reserved_region((address)base, size, stack, mtThreadStack); |
55 | | - } else { |
56 | | - // Use a slot in mallocMemorySummary for thread stack bookkeeping |
57 | | - MallocMemorySummary::record_malloc(size, mtThreadStack); |
58 | | - if (MemTracker::tracking_level() == NMT_detail) { |
59 | | - assert(_simple_thread_stacks != nullptr, "Must be initialized"); |
60 | | - SimpleThreadStackSite site((address)base, size, stack); |
61 | | - _simple_thread_stacks->add(site); |
62 | | - } |
63 | | - } |
| 57 | + VirtualMemoryTracker::add_reserved_region((address)base, size, stack, mtThreadStack); |
64 | 58 | _thread_count++; |
65 | 59 | } |
66 | 60 |
|
67 | 61 | void ThreadStackTracker::delete_thread_stack(void* base, size_t size) { |
68 | | - assert(MemTracker::tracking_level() >= NMT_summary, "Must be"); |
| 62 | + assert(MemTracker::enabled(), "Must be"); |
69 | 63 | assert(base != nullptr, "Should have been filtered"); |
| 64 | + align_thread_stack_boundaries_inward(base, size); |
| 65 | + |
70 | 66 | ThreadCritical tc; |
71 | | - if(track_as_vm()) { |
72 | | - VirtualMemoryTracker::remove_released_region((address)base, size); |
73 | | - } else { |
74 | | - // Use a slot in mallocMemorySummary for thread stack bookkeeping |
75 | | - MallocMemorySummary::record_free(size, mtThreadStack); |
76 | | - if (MemTracker::tracking_level() == NMT_detail) { |
77 | | - assert(_simple_thread_stacks != nullptr, "Must be initialized"); |
78 | | - SimpleThreadStackSite site((address)base, size, NativeCallStack::empty_stack()); // Fake object just to serve as compare target for delete |
79 | | - bool removed = _simple_thread_stacks->remove(site); |
80 | | - assert(removed, "Must exist"); |
81 | | - } |
82 | | - } |
| 67 | + VirtualMemoryTracker::remove_released_region((address)base, size); |
83 | 68 | _thread_count--; |
84 | 69 | } |
85 | 70 |
|
86 | | -bool ThreadStackTracker::walk_simple_thread_stack_site(MallocSiteWalker* walker) { |
87 | | - if (!track_as_vm()) { |
88 | | - LinkedListImpl<MallocSite> _sites; |
89 | | - { |
90 | | - ThreadCritical tc; |
91 | | - assert(_simple_thread_stacks != nullptr, "Must be initialized"); |
92 | | - LinkedListIterator<SimpleThreadStackSite> itr(_simple_thread_stacks->head()); |
93 | | - const SimpleThreadStackSite* ts = itr.next(); |
94 | | - // Consolidate sites and convert to MallocSites, so we can piggyback into |
95 | | - // malloc snapshot |
96 | | - while (ts != nullptr) { |
97 | | - MallocSite site(*ts->call_stack(), mtThreadStack); |
98 | | - MallocSite* exist = _sites.find(site); |
99 | | - if (exist != nullptr) { |
100 | | - exist->allocate(ts->size()); |
101 | | - } else { |
102 | | - site.allocate(ts->size()); |
103 | | - _sites.add(site); |
104 | | - } |
105 | | - ts = itr.next(); |
106 | | - } |
107 | | - } |
108 | | - |
109 | | - // Piggyback to malloc snapshot |
110 | | - LinkedListIterator<MallocSite> site_itr(_sites.head()); |
111 | | - const MallocSite* s = site_itr.next(); |
112 | | - while (s != nullptr) { |
113 | | - walker->do_malloc_site(s); |
114 | | - s = site_itr.next(); |
115 | | - } |
116 | | - } |
117 | | - return true; |
118 | | -} |
0 commit comments