Skip to content

Commit

Permalink
8293167: Memory leak in JfrThreadSampler if stackdepth is larger than…
Browse files Browse the repository at this point in the history
… default (64)

Reviewed-by: jbachorik
  • Loading branch information
Markus Grönlund committed Sep 5, 2022
1 parent 4067321 commit 48b3ab0
Show file tree
Hide file tree
Showing 6 changed files with 60 additions and 30 deletions.
10 changes: 4 additions & 6 deletions src/hotspot/share/jfr/periodic/sampling/jfrThreadSampler.cpp
Expand Up @@ -328,7 +328,6 @@ class JfrThreadSampler : public NonJavaThread {
int64_t _java_period_millis;
int64_t _native_period_millis;
const size_t _min_size; // for enqueue buffer monitoring
const size_t _renew_size;
int _cur_index;
const u4 _max_frames;
volatile bool _disenrolled;
Expand Down Expand Up @@ -405,8 +404,7 @@ JfrThreadSampler::JfrThreadSampler(int64_t java_period_millis, int64_t native_pe
_last_thread_native(NULL),
_java_period_millis(java_period_millis),
_native_period_millis(native_period_millis),
_min_size(JfrOptionSet::stackdepth() * sizeof(intptr_t)),
_renew_size(_min_size * 2),
_min_size(max_frames * 2 * wordSize), // each frame tags at most 2 words, min size is a full stacktrace
_cur_index(-1),
_max_frames(max_frames),
_disenrolled(true) {
Expand Down Expand Up @@ -553,13 +551,13 @@ void JfrThreadSampler::post_run() {
}

const JfrBuffer* JfrThreadSampler::get_enqueue_buffer() {
const JfrBuffer* buffer = JfrTraceIdLoadBarrier::get_enqueue_buffer(this);
return buffer != nullptr ? renew_if_full(buffer) : JfrTraceIdLoadBarrier::renew_enqueue_buffer(_renew_size, this);
const JfrBuffer* buffer = JfrTraceIdLoadBarrier::get_sampler_enqueue_buffer(this);
return buffer != nullptr ? renew_if_full(buffer) : JfrTraceIdLoadBarrier::renew_sampler_enqueue_buffer(this);
}

const JfrBuffer* JfrThreadSampler::renew_if_full(const JfrBuffer* enqueue_buffer) {
assert(enqueue_buffer != nullptr, "invariant");
return enqueue_buffer->free_size() < _min_size ? JfrTraceIdLoadBarrier::renew_enqueue_buffer(_renew_size, this) : enqueue_buffer;
return enqueue_buffer->free_size() < _min_size ? JfrTraceIdLoadBarrier::renew_sampler_enqueue_buffer(this) : enqueue_buffer;
}

void JfrThreadSampler::task_stacktrace(JfrSampleType type, JavaThread** last_thread) {
Expand Down
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -251,7 +251,7 @@ JfrBuffer* JfrTraceIdKlassQueue::get_enqueue_buffer(Thread* thread) {
return _queue->thread_local_storage(thread);
}

JfrBuffer* JfrTraceIdKlassQueue::renew_enqueue_buffer(size_t size, Thread* thread) {
JfrBuffer* JfrTraceIdKlassQueue::renew_enqueue_buffer(Thread* thread, size_t size /* 0 */) {
return _queue->renew(size, thread);
}

Expand Down
Expand Up @@ -69,7 +69,7 @@ class JfrTraceIdKlassQueue : public JfrCHeapObj {
private:
JfrEpochQueue<JfrEpochQueueKlassPolicy>* _queue;
JfrBuffer* get_enqueue_buffer(Thread* thread);
JfrBuffer* renew_enqueue_buffer(size_t size, Thread* thread);
JfrBuffer* renew_enqueue_buffer(Thread* thread, size_t size = 0);
public:
JfrTraceIdKlassQueue();
~JfrTraceIdKlassQueue();
Expand Down
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand All @@ -25,54 +25,86 @@
#include "precompiled.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdLoadBarrier.inline.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdKlassQueue.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/support/jfrThreadLocal.hpp"
#include "jfr/utilities/jfrEpochQueue.inline.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/mutexLocker.hpp"
#include "utilities/powerOfTwo.hpp"

// The queue instance used by the load barrier to enqueue tagged Klass'es.
static JfrTraceIdKlassQueue* _klass_queue = NULL;
// The queue instances are used by the load barrier to enqueue tagged Klass'es.
static JfrTraceIdKlassQueue* _klass_queue = nullptr; // Generic for all Java threads.
static JfrTraceIdKlassQueue* _sampler_klass_queue = nullptr; // Specialized for the Jfr Thread Sampler using a larger buffer size.

static JfrTraceIdKlassQueue& klass_queue() {
assert(_klass_queue != NULL, "invariant");
assert(_klass_queue != nullptr, "invariant");
return *_klass_queue;
}

const size_t buffer_size_bytes = 1 * K; // min_elem_size of storage unit
const size_t prealloc_count = 32;
static JfrTraceIdKlassQueue& sampler_klass_queue() {
assert(_sampler_klass_queue != nullptr, "invariant");
return *_sampler_klass_queue;
}

const constexpr size_t buffer_size_bytes = 1 * K; // min_elem_size of storage unit
const constexpr size_t prealloc_count = 32;
const constexpr size_t sampler_prealloc_count = 2;

// The sampler thread cannot renew a buffer in-flight because it cannot acquire the malloc lock.
// It must therefore pre-allocate at least a full stack trace of buffer space before it can suspend a thread.
// This pre-allocation implies the need for a larger buffer size compared to other threads, a size that is a function
// of the stack depth parameter. For proper accommodation, there is a specialized queue only for the Jfr Sampler Thread.
static size_t derive_sampler_buffer_size() {
size_t stackdepth_bytes = JfrOptionSet::stackdepth() * 2 * wordSize; // each frame tags at most 2 words
stackdepth_bytes = round_up_power_of_2(stackdepth_bytes * 2); // accommodate at least two full stacktraces
return MAX2(stackdepth_bytes, buffer_size_bytes);
}

bool JfrTraceIdLoadBarrier::initialize() {
assert(_klass_queue == NULL, "invariant");
assert(_klass_queue == nullptr, "invariant");
_klass_queue = new JfrTraceIdKlassQueue();
return _klass_queue != NULL && _klass_queue->initialize(buffer_size_bytes, JFR_MSPACE_UNLIMITED_CACHE_SIZE, prealloc_count);
if (_klass_queue == nullptr || !_klass_queue->initialize(buffer_size_bytes, JFR_MSPACE_UNLIMITED_CACHE_SIZE, prealloc_count)) {
return false;
}
assert(_sampler_klass_queue == nullptr, "invariant");
const size_t sampler_buffer_size_bytes = derive_sampler_buffer_size();
assert(is_power_of_2(sampler_buffer_size_bytes), "invariant");
_sampler_klass_queue = new JfrTraceIdKlassQueue();
return _sampler_klass_queue != nullptr && _sampler_klass_queue->initialize(sampler_buffer_size_bytes, JFR_MSPACE_UNLIMITED_CACHE_SIZE, sampler_prealloc_count);
}

void JfrTraceIdLoadBarrier::clear() {
if (_klass_queue != NULL) {
if (_klass_queue != nullptr) {
_klass_queue->clear();
}
if (_sampler_klass_queue != nullptr) {
_sampler_klass_queue->clear();
}
}

void JfrTraceIdLoadBarrier::destroy() {
delete _klass_queue;
_klass_queue = NULL;
_klass_queue = nullptr;
delete _sampler_klass_queue;
_sampler_klass_queue = nullptr;
}

void JfrTraceIdLoadBarrier::enqueue(const Klass* klass) {
assert(klass != NULL, "invariant");
assert(klass != nullptr, "invariant");
assert(USED_THIS_EPOCH(klass), "invariant");
klass_queue().enqueue(klass);
}

void JfrTraceIdLoadBarrier::do_klasses(klass_callback callback, bool previous_epoch) {
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
klass_queue().iterate(callback, previous_epoch);
JfrBuffer* JfrTraceIdLoadBarrier::get_sampler_enqueue_buffer(Thread* thread) {
return sampler_klass_queue().get_enqueue_buffer(thread);
}

JfrBuffer* JfrTraceIdLoadBarrier::get_enqueue_buffer(Thread* thread) {
return klass_queue().get_enqueue_buffer(thread);
JfrBuffer* JfrTraceIdLoadBarrier::renew_sampler_enqueue_buffer(Thread* thread) {
return sampler_klass_queue().renew_enqueue_buffer(thread);
}

JfrBuffer* JfrTraceIdLoadBarrier::renew_enqueue_buffer(size_t size, Thread* thread) {
return klass_queue().renew_enqueue_buffer(size, thread);
void JfrTraceIdLoadBarrier::do_klasses(klass_callback callback, bool previous_epoch) {
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
klass_queue().iterate(callback, previous_epoch);
sampler_klass_queue().iterate(callback, previous_epoch);
}
Expand Up @@ -78,8 +78,8 @@ class JfrTraceIdLoadBarrier : AllStatic {
static void destroy();
static void enqueue(const Klass* klass);
static void load_barrier(const Klass* klass);
static JfrBuffer* get_enqueue_buffer(Thread* thread);
static JfrBuffer* renew_enqueue_buffer(size_t size, Thread* thread);
static JfrBuffer* get_sampler_enqueue_buffer(Thread* thread);
static JfrBuffer* renew_sampler_enqueue_buffer(Thread* thread);
public:
static traceid load(const ClassLoaderData* cld);
static traceid load(const Klass* klass);
Expand Down
Expand Up @@ -226,7 +226,7 @@ bool JfrStackTrace::record_async(JavaThread* jt, const frame& frame) {
// We do this because if space becomes sparse, we cannot rely on the implicit allocation of a new buffer as part of the
// regular tag mechanism. If the free list is empty, a malloc could result, and the problem with that is that the thread
// we have suspended could be the holder of the malloc lock. If there is no more available space, the attempt is aborted.
const JfrBuffer* const enqueue_buffer = JfrTraceIdLoadBarrier::get_enqueue_buffer(current_thread);
const JfrBuffer* const enqueue_buffer = JfrTraceIdLoadBarrier::get_sampler_enqueue_buffer(current_thread);
HandleMark hm(current_thread); // RegisterMap uses Handles to support continuations.
JfrVframeStream vfs(jt, frame, false, true);
u4 count = 0;
Expand Down

1 comment on commit 48b3ab0

@openjdk-notifier
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.