Skip to content
This repository has been archived by the owner on Feb 2, 2023. It is now read-only.

8258414: OldObjectSample events too expensive #70

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -199,28 +199,23 @@ static bool stack_trace_precondition(const ObjectSample* sample) {

class StackTraceBlobInstaller {
private:
const JfrStackTraceRepository& _stack_trace_repo;
BlobCache _cache;
const JfrStackTrace* resolve(const ObjectSample* sample);
void install(ObjectSample* sample);
const JfrStackTrace* resolve(const ObjectSample* sample) const;
public:
StackTraceBlobInstaller(const JfrStackTraceRepository& stack_trace_repo);
StackTraceBlobInstaller() : _cache(JfrOptionSet::old_object_queue_size()) {
prepare_for_resolution();
}
~StackTraceBlobInstaller() {
JfrStackTraceRepository::clear_leak_profiler();
}
void sample_do(ObjectSample* sample) {
if (stack_trace_precondition(sample)) {
install(sample);
}
}
};

StackTraceBlobInstaller::StackTraceBlobInstaller(const JfrStackTraceRepository& stack_trace_repo) :
_stack_trace_repo(stack_trace_repo), _cache(JfrOptionSet::old_object_queue_size()) {
prepare_for_resolution();
}

const JfrStackTrace* StackTraceBlobInstaller::resolve(const ObjectSample* sample) {
return _stack_trace_repo.lookup(sample->stack_trace_hash(), sample->stack_trace_id());
}

#ifdef ASSERT
static void validate_stack_trace(const ObjectSample* sample, const JfrStackTrace* stack_trace) {
assert(!sample->has_stacktrace(), "invariant");
Expand All @@ -230,6 +225,10 @@ static void validate_stack_trace(const ObjectSample* sample, const JfrStackTrace
}
#endif

inline const JfrStackTrace* StackTraceBlobInstaller::resolve(const ObjectSample* sample) const {
return JfrStackTraceRepository::lookup_for_leak_profiler(sample->stack_trace_hash(), sample->stack_trace_id());
}

void StackTraceBlobInstaller::install(ObjectSample* sample) {
JfrBlobHandle blob = _cache.get(sample);
if (blob.valid()) {
Expand All @@ -242,23 +241,23 @@ void StackTraceBlobInstaller::install(ObjectSample* sample) {
writer.write_type(TYPE_STACKTRACE);
writer.write_count(1);
ObjectSampleCheckpoint::write_stacktrace(stack_trace, writer);
blob = writer.move();
blob = writer.copy();
_cache.put(sample, blob);
sample->set_stacktrace(blob);
}

static void install_stack_traces(const ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repo) {
static void install_stack_traces(const ObjectSampler* sampler) {
assert(sampler != NULL, "invariant");
const ObjectSample* const last = sampler->last();
if (last != sampler->last_resolved()) {
ResourceMark rm;
JfrKlassUnloading::sort();
StackTraceBlobInstaller installer(stack_trace_repo);
StackTraceBlobInstaller installer;
iterate_samples(installer);
}
}

void ObjectSampleCheckpoint::on_rotation(const ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repo) {
void ObjectSampleCheckpoint::on_rotation(const ObjectSampler* sampler) {
assert(sampler != NULL, "invariant");
assert(LeakProfiler::is_running(), "invariant");
Thread* const thread = Thread::current();
Expand All @@ -267,7 +266,7 @@ void ObjectSampleCheckpoint::on_rotation(const ObjectSampler* sampler, JfrStackT
ThreadInVMfromNative transition((JavaThread*)thread);
MutexLocker lock(ClassLoaderDataGraph_lock);
// the lock is needed to ensure the unload lists do not grow in the middle of inspection.
install_stack_traces(sampler, stack_trace_repo);
install_stack_traces(sampler);
}

static bool is_klass_unloaded(traceid klass_id) {
Expand Down
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -33,7 +33,6 @@ class InstanceKlass;
class JavaThread;
class JfrCheckpointWriter;
class JfrStackTrace;
class JfrStackTraceRepository;
class Klass;
class ObjectSample;
class ObjectSampleMarker;
Expand All @@ -53,7 +52,7 @@ class ObjectSampleCheckpoint : AllStatic {
static void on_type_set(JfrCheckpointWriter& writer);
static void on_type_set_unload(JfrCheckpointWriter& writer);
static void on_thread_exit(JavaThread* jt);
static void on_rotation(const ObjectSampler* sampler, JfrStackTraceRepository& repo);
static void on_rotation(const ObjectSampler* sampler);
};

#endif // SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP
23 changes: 17 additions & 6 deletions src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp
Expand Up @@ -120,12 +120,23 @@ static traceid get_thread_id(JavaThread* thread) {
return tl->thread_id();
}

static void record_stacktrace(JavaThread* thread) {
assert(thread != NULL, "invariant");
if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {
JfrStackTraceRepository::record_and_cache(thread);
class RecordStackTrace {
private:
JavaThread* _jt;
bool _enabled;
public:
RecordStackTrace(JavaThread* jt) : _jt(jt),
_enabled(JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {
if (_enabled) {
JfrStackTraceRepository::record_for_leak_profiler(jt);
}
}
}
~RecordStackTrace() {
if (_enabled) {
_jt->jfr_thread_local()->clear_cached_stack_trace();
}
}
};

void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) {
assert(thread != NULL, "invariant");
Expand All @@ -134,7 +145,7 @@ void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread)
if (thread_id == 0) {
return;
}
record_stacktrace(thread);
RecordStackTrace rst(thread);
// try enter critical section
JfrTryLock tryLock(&_lock);
if (!tryLock.has_lock()) {
Expand Down
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -100,7 +100,6 @@ class JfrTraceId : public AllStatic {
static traceid load_raw(jclass jc);
static traceid load_raw(const Thread* thread);
static traceid load_raw(const Method* method);
static traceid load_raw(const Klass* klass, const Method* method);
static traceid load_raw(const ModuleEntry* module);
static traceid load_raw(const PackageEntry* package);
static traceid load_raw(const ClassLoaderData* cld);
Expand Down
9 changes: 5 additions & 4 deletions src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -437,7 +437,7 @@ void JfrRecorderService::clear() {
void JfrRecorderService::pre_safepoint_clear() {
_string_pool.clear();
_storage.clear();
_stack_trace_repository.clear();
JfrStackTraceRepository::clear();
}

void JfrRecorderService::invoke_safepoint_clear() {
Expand All @@ -452,7 +452,7 @@ void JfrRecorderService::safepoint_clear() {
_string_pool.clear();
_storage.clear();
_chunkwriter.set_time_stamp();
_stack_trace_repository.clear();
JfrStackTraceRepository::clear();
_checkpoint_manager.end_epoch_shift();
}

Expand Down Expand Up @@ -539,7 +539,7 @@ void JfrRecorderService::pre_safepoint_write() {
if (LeakProfiler::is_running()) {
// Exclusive access to the object sampler instance.
// The sampler is released (unlocked) later in post_safepoint_write.
ObjectSampleCheckpoint::on_rotation(ObjectSampler::acquire(), _stack_trace_repository);
ObjectSampleCheckpoint::on_rotation(ObjectSampler::acquire());
}
if (_string_pool.is_modified()) {
write_stringpool(_string_pool, _chunkwriter);
Expand All @@ -560,6 +560,7 @@ void JfrRecorderService::invoke_safepoint_write() {
void JfrRecorderService::safepoint_write() {
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
_checkpoint_manager.begin_epoch_shift();
JfrStackTraceRepository::clear_leak_profiler();
if (_string_pool.is_modified()) {
write_stringpool(_string_pool, _chunkwriter);
}
Expand Down
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -30,18 +30,38 @@
#include "jfr/support/jfrThreadLocal.hpp"
#include "runtime/mutexLocker.hpp"

static JfrStackTraceRepository* _instance = NULL;
/*
* There are two separate repository instances.
* One instance is dedicated to stacktraces taken as part of the leak profiler subsystem.
* It is kept separate because at the point of insertion, it is unclear if a trace will be serialized,
* which is a decision postponed and taken during rotation.
*/

JfrStackTraceRepository::JfrStackTraceRepository() : _next_id(0), _entries(0) {
memset(_table, 0, sizeof(_table));
}
static JfrStackTraceRepository* _instance = NULL;
static JfrStackTraceRepository* _leak_profiler_instance = NULL;
static traceid _next_id = 0;

JfrStackTraceRepository& JfrStackTraceRepository::instance() {
assert(_instance != NULL, "invariant");
return *_instance;
}

static JfrStackTraceRepository& leak_profiler_instance() {
assert(_leak_profiler_instance != NULL, "invariant");
return *_leak_profiler_instance;
}

JfrStackTraceRepository::JfrStackTraceRepository() : _last_entries(0), _entries(0) {
memset(_table, 0, sizeof(_table));
}

JfrStackTraceRepository* JfrStackTraceRepository::create() {
assert(_instance == NULL, "invariant");
assert(_leak_profiler_instance == NULL, "invariant");
_leak_profiler_instance = new JfrStackTraceRepository();
if (_leak_profiler_instance == NULL) {
return NULL;
}
_instance = new JfrStackTraceRepository();
return _instance;
}
Expand Down Expand Up @@ -69,12 +89,12 @@ void JfrStackTraceRepository::destroy() {
assert(_instance != NULL, "invarinat");
delete _instance;
_instance = NULL;
delete _leak_profiler_instance;
_leak_profiler_instance = NULL;
}

static traceid last_id = 0;

bool JfrStackTraceRepository::is_modified() const {
return last_id != _next_id;
return _last_entries != _entries;
}

size_t JfrStackTraceRepository::write(JfrChunkWriter& sw, bool clear) {
Expand Down Expand Up @@ -102,26 +122,27 @@ size_t JfrStackTraceRepository::write(JfrChunkWriter& sw, bool clear) {
memset(_table, 0, sizeof(_table));
_entries = 0;
}
last_id = _next_id;
_last_entries = _entries;
return count;
}

size_t JfrStackTraceRepository::clear() {
size_t JfrStackTraceRepository::clear(JfrStackTraceRepository& repo) {
MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
if (_entries == 0) {
if (repo._entries == 0) {
return 0;
}
for (u4 i = 0; i < TABLE_SIZE; ++i) {
JfrStackTrace* stacktrace = _table[i];
JfrStackTrace* stacktrace = repo._table[i];
while (stacktrace != NULL) {
JfrStackTrace* next = const_cast<JfrStackTrace*>(stacktrace->next());
delete stacktrace;
stacktrace = next;
}
}
memset(_table, 0, sizeof(_table));
const size_t processed = _entries;
_entries = 0;
memset(repo._table, 0, sizeof(repo._table));
const size_t processed = repo._entries;
repo._entries = 0;
repo._last_entries = 0;
return processed;
}

Expand All @@ -147,20 +168,23 @@ traceid JfrStackTraceRepository::record(Thread* thread, int skip /* 0 */) {

traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames) {
JfrStackTrace stacktrace(frames, max_frames);
return stacktrace.record_safe(thread, skip) ? add(stacktrace) : 0;
return stacktrace.record_safe(thread, skip) ? add(instance(), stacktrace) : 0;
}

traceid JfrStackTraceRepository::add(const JfrStackTrace& stacktrace) {
traceid tid = instance().add_trace(stacktrace);
traceid JfrStackTraceRepository::add(JfrStackTraceRepository& repo, const JfrStackTrace& stacktrace) {
traceid tid = repo.add_trace(stacktrace);
if (tid == 0) {
stacktrace.resolve_linenos();
tid = instance().add_trace(stacktrace);
tid = repo.add_trace(stacktrace);
}
assert(tid != 0, "invariant");
return tid;
}

void JfrStackTraceRepository::record_and_cache(JavaThread* thread, int skip /* 0 */) {
traceid JfrStackTraceRepository::add(const JfrStackTrace& stacktrace) {
return add(instance(), stacktrace);
}

void JfrStackTraceRepository::record_for_leak_profiler(JavaThread* thread, int skip /* 0 */) {
assert(thread != NULL, "invariant");
JfrThreadLocal* const tl = thread->jfr_thread_local();
assert(tl != NULL, "invariant");
Expand All @@ -169,7 +193,7 @@ void JfrStackTraceRepository::record_and_cache(JavaThread* thread, int skip /* 0
stacktrace.record_safe(thread, skip);
const unsigned int hash = stacktrace.hash();
if (hash != 0) {
tl->set_cached_stack_trace_id(instance().add(stacktrace), hash);
tl->set_cached_stack_trace_id(add(leak_profiler_instance(), stacktrace), hash);
}
}

Expand All @@ -196,9 +220,9 @@ traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
}

// invariant is that the entry to be resolved actually exists in the table
const JfrStackTrace* JfrStackTraceRepository::lookup(unsigned int hash, traceid id) const {
const JfrStackTrace* JfrStackTraceRepository::lookup_for_leak_profiler(unsigned int hash, traceid id) {
const size_t index = (hash % TABLE_SIZE);
const JfrStackTrace* trace = _table[index];
const JfrStackTrace* trace = leak_profiler_instance()._table[index];
while (trace != NULL && trace->id() != id) {
trace = trace->next();
}
Expand All @@ -207,3 +231,12 @@ const JfrStackTrace* JfrStackTraceRepository::lookup(unsigned int hash, traceid
assert(trace->id() == id, "invariant");
return trace;
}

void JfrStackTraceRepository::clear_leak_profiler() {
clear(leak_profiler_instance());
}

size_t JfrStackTraceRepository::clear() {
clear_leak_profiler();
return clear(instance());
}