|
1 | 1 | /*
|
2 |
| - * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. |
| 2 | + * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved. |
3 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
4 | 4 | *
|
5 | 5 | * This code is free software; you can redistribute it and/or modify it
|
@@ -33,12 +33,6 @@ MallocSiteHashtableEntry* MallocSiteTable::_table[MallocSiteTable::table_size];
|
33 | 33 | const NativeCallStack* MallocSiteTable::_hash_entry_allocation_stack = NULL;
|
34 | 34 | const MallocSiteHashtableEntry* MallocSiteTable::_hash_entry_allocation_site = NULL;
|
35 | 35 |
|
36 |
| -// concurrent access counter |
37 |
| -volatile int MallocSiteTable::_access_count = 0; |
38 |
| - |
39 |
| -// Tracking hashtable contention |
40 |
| -NOT_PRODUCT(int MallocSiteTable::_peak_count = 0;) |
41 |
| - |
42 | 36 | /*
|
43 | 37 | * Initialize malloc site table.
|
44 | 38 | * Hashtable entry is malloc'd, so it can cause infinite recursion.
|
@@ -202,122 +196,81 @@ void MallocSiteTable::delete_linked_list(MallocSiteHashtableEntry* head) {
|
202 | 196 | }
|
203 | 197 | }
|
204 | 198 |
|
205 |
| -void MallocSiteTable::shutdown() { |
206 |
| - AccessLock locker(&_access_count); |
207 |
| - locker.exclusiveLock(); |
208 |
| - reset(); |
209 |
| -} |
210 |
| - |
211 | 199 | bool MallocSiteTable::walk_malloc_site(MallocSiteWalker* walker) {
|
212 | 200 | assert(walker != NULL, "NuLL walker");
|
213 |
| - AccessLock locker(&_access_count); |
214 |
| - if (locker.sharedLock()) { |
215 |
| - NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);) |
216 |
| - return walk(walker); |
217 |
| - } |
218 |
| - return false; |
219 |
| -} |
220 |
| - |
221 |
| - |
222 |
| -void MallocSiteTable::AccessLock::exclusiveLock() { |
223 |
| - int target; |
224 |
| - int val; |
225 |
| - |
226 |
| - assert(_lock_state != ExclusiveLock, "Can only call once"); |
227 |
| - assert(*_lock >= 0, "Can not content exclusive lock"); |
228 |
| - |
229 |
| - // make counter negative to block out shared locks |
230 |
| - do { |
231 |
| - val = *_lock; |
232 |
| - target = _MAGIC_ + *_lock; |
233 |
| - } while (Atomic::cmpxchg(_lock, val, target) != val); |
234 |
| - |
235 |
| - // wait for all readers to exit |
236 |
| - while (*_lock != _MAGIC_) { |
237 |
| -#ifdef _WINDOWS |
238 |
| - os::naked_short_sleep(1); |
239 |
| -#else |
240 |
| - os::naked_yield(); |
241 |
| -#endif |
242 |
| - } |
243 |
| - _lock_state = ExclusiveLock; |
| 201 | + return walk(walker); |
244 | 202 | }
|
245 | 203 |
|
246 | 204 | void MallocSiteTable::print_tuning_statistics(outputStream* st) {
|
247 |
| - |
248 |
| - AccessLock locker(&_access_count); |
249 |
| - if (locker.sharedLock()) { |
250 |
| - // Total number of allocation sites, include empty sites |
251 |
| - int total_entries = 0; |
252 |
| - // Number of allocation sites that have all memory freed |
253 |
| - int empty_entries = 0; |
254 |
| - // Number of captured call stack distribution |
255 |
| - int stack_depth_distribution[NMT_TrackingStackDepth + 1] = { 0 }; |
256 |
| - // Chain lengths |
257 |
| - int lengths[table_size] = { 0 }; |
258 |
| - |
259 |
| - for (int i = 0; i < table_size; i ++) { |
260 |
| - int this_chain_length = 0; |
261 |
| - const MallocSiteHashtableEntry* head = _table[i]; |
262 |
| - while (head != NULL) { |
263 |
| - total_entries ++; |
264 |
| - this_chain_length ++; |
265 |
| - if (head->size() == 0) { |
266 |
| - empty_entries ++; |
267 |
| - } |
268 |
| - const int callstack_depth = head->peek()->call_stack()->frames(); |
269 |
| - assert(callstack_depth >= 0 && callstack_depth <= NMT_TrackingStackDepth, |
270 |
| - "Sanity (%d)", callstack_depth); |
271 |
| - stack_depth_distribution[callstack_depth] ++; |
272 |
| - head = head->next(); |
273 |
| - } |
274 |
| - lengths[i] = this_chain_length; |
275 |
| - } |
276 |
| - |
277 |
| - st->print_cr("Malloc allocation site table:"); |
278 |
| - st->print_cr("\tTotal entries: %d", total_entries); |
279 |
| - st->print_cr("\tEmpty entries: %d (%2.2f%%)", empty_entries, ((float)empty_entries * 100) / total_entries); |
280 |
| - st->cr(); |
281 |
| - |
282 |
| - // We report the hash distribution (chain length distribution) of the n shortest chains |
283 |
| - // - under the assumption that this usually contains all lengths. Reporting threshold |
284 |
| - // is 20, and the expected avg chain length is 5..6 (see table size). |
285 |
| - static const int chain_length_threshold = 20; |
286 |
| - int chain_length_distribution[chain_length_threshold] = { 0 }; |
287 |
| - int over_threshold = 0; |
288 |
| - int longest_chain_length = 0; |
289 |
| - for (int i = 0; i < table_size; i ++) { |
290 |
| - if (lengths[i] >= chain_length_threshold) { |
291 |
| - over_threshold ++; |
292 |
| - } else { |
293 |
| - chain_length_distribution[lengths[i]] ++; |
| 205 | + // Total number of allocation sites, include empty sites |
| 206 | + int total_entries = 0; |
| 207 | + // Number of allocation sites that have all memory freed |
| 208 | + int empty_entries = 0; |
| 209 | + // Number of captured call stack distribution |
| 210 | + int stack_depth_distribution[NMT_TrackingStackDepth + 1] = { 0 }; |
| 211 | + // Chain lengths |
| 212 | + int lengths[table_size] = { 0 }; |
| 213 | + |
| 214 | + for (int i = 0; i < table_size; i ++) { |
| 215 | + int this_chain_length = 0; |
| 216 | + const MallocSiteHashtableEntry* head = _table[i]; |
| 217 | + while (head != NULL) { |
| 218 | + total_entries ++; |
| 219 | + this_chain_length ++; |
| 220 | + if (head->size() == 0) { |
| 221 | + empty_entries ++; |
294 | 222 | }
|
295 |
| - longest_chain_length = MAX2(longest_chain_length, lengths[i]); |
| 223 | + const int callstack_depth = head->peek()->call_stack()->frames(); |
| 224 | + assert(callstack_depth >= 0 && callstack_depth <= NMT_TrackingStackDepth, |
| 225 | + "Sanity (%d)", callstack_depth); |
| 226 | + stack_depth_distribution[callstack_depth] ++; |
| 227 | + head = head->next(); |
296 | 228 | }
|
| 229 | + lengths[i] = this_chain_length; |
| 230 | + } |
297 | 231 |
|
298 |
| - st->print_cr("Hash distribution:"); |
299 |
| - if (chain_length_distribution[0] == 0) { |
300 |
| - st->print_cr("no empty buckets."); |
| 232 | + st->print_cr("Malloc allocation site table:"); |
| 233 | + st->print_cr("\tTotal entries: %d", total_entries); |
| 234 | + st->print_cr("\tEmpty entries: %d (%2.2f%%)", empty_entries, ((float)empty_entries * 100) / total_entries); |
| 235 | + st->cr(); |
| 236 | + |
| 237 | + // We report the hash distribution (chain length distribution) of the n shortest chains |
| 238 | + // - under the assumption that this usually contains all lengths. Reporting threshold |
| 239 | + // is 20, and the expected avg chain length is 5..6 (see table size). |
| 240 | + static const int chain_length_threshold = 20; |
| 241 | + int chain_length_distribution[chain_length_threshold] = { 0 }; |
| 242 | + int over_threshold = 0; |
| 243 | + int longest_chain_length = 0; |
| 244 | + for (int i = 0; i < table_size; i ++) { |
| 245 | + if (lengths[i] >= chain_length_threshold) { |
| 246 | + over_threshold ++; |
301 | 247 | } else {
|
302 |
| - st->print_cr("%d buckets are empty.", chain_length_distribution[0]); |
303 |
| - } |
304 |
| - for (int len = 1; len < MIN2(longest_chain_length + 1, chain_length_threshold); len ++) { |
305 |
| - st->print_cr("%2d %s: %d.", len, (len == 1 ? " entry" : "entries"), chain_length_distribution[len]); |
| 248 | + chain_length_distribution[lengths[i]] ++; |
306 | 249 | }
|
307 |
| - if (longest_chain_length >= chain_length_threshold) { |
308 |
| - st->print_cr(">=%2d entries: %d.", chain_length_threshold, over_threshold); |
309 |
| - } |
310 |
| - st->print_cr("most entries: %d.", longest_chain_length); |
311 |
| - st->cr(); |
| 250 | + longest_chain_length = MAX2(longest_chain_length, lengths[i]); |
| 251 | + } |
312 | 252 |
|
313 |
| - st->print_cr("Call stack depth distribution:"); |
314 |
| - for (int i = 0; i <= NMT_TrackingStackDepth; i ++) { |
315 |
| - st->print_cr("\t%d: %d", i, stack_depth_distribution[i]); |
316 |
| - } |
317 |
| - st->cr(); |
318 |
| - } // lock |
319 |
| -} |
| 253 | + st->print_cr("Hash distribution:"); |
| 254 | + if (chain_length_distribution[0] == 0) { |
| 255 | + st->print_cr("no empty buckets."); |
| 256 | + } else { |
| 257 | + st->print_cr("%d buckets are empty.", chain_length_distribution[0]); |
| 258 | + } |
| 259 | + for (int len = 1; len < MIN2(longest_chain_length + 1, chain_length_threshold); len ++) { |
| 260 | + st->print_cr("%2d %s: %d.", len, (len == 1 ? " entry" : "entries"), chain_length_distribution[len]); |
| 261 | + } |
| 262 | + if (longest_chain_length >= chain_length_threshold) { |
| 263 | + st->print_cr(">=%2d entries: %d.", chain_length_threshold, over_threshold); |
| 264 | + } |
| 265 | + st->print_cr("most entries: %d.", longest_chain_length); |
| 266 | + st->cr(); |
320 | 267 |
|
| 268 | + st->print_cr("Call stack depth distribution:"); |
| 269 | + for (int i = 0; i <= NMT_TrackingStackDepth; i ++) { |
| 270 | + st->print_cr("\t%d: %d", i, stack_depth_distribution[i]); |
| 271 | + } |
| 272 | + st->cr(); |
| 273 | +} |
321 | 274 |
|
322 | 275 | bool MallocSiteHashtableEntry::atomic_insert(MallocSiteHashtableEntry* entry) {
|
323 | 276 | return Atomic::replace_if_null(&_next, entry);
|
|
0 commit comments