Skip to content

Commit

Permalink
8074101: Add verification that all tasks are actually claimed during …
Browse files Browse the repository at this point in the history
…roots processing

Reviewed-by: kbarrett, tschatzl
  • Loading branch information
albertnetymk authored and Thomas Schatzl committed Jan 18, 2021
1 parent 917f7e9 commit e93f08e
Show file tree
Hide file tree
Showing 8 changed files with 73 additions and 44 deletions.
20 changes: 13 additions & 7 deletions src/hotspot/share/gc/g1/g1RootProcessor.cpp
Expand Up @@ -73,7 +73,8 @@ void G1RootProcessor::evacuate_roots(G1ParScanThreadState* pss, uint worker_id)
}
}

_process_strong_tasks.all_tasks_completed(n_workers());
// CodeCache is already processed in java roots
_process_strong_tasks.all_tasks_completed(n_workers(), G1RP_PS_CodeCache_oops_do);
}

// Adaptor to pass the closures to the strong roots in the VM.
Expand Down Expand Up @@ -102,7 +103,11 @@ void G1RootProcessor::process_strong_roots(OopClosure* oops,
process_java_roots(&closures, NULL, 0);
process_vm_roots(&closures, NULL, 0);

_process_strong_tasks.all_tasks_completed(n_workers());
// CodeCache is already processed in java roots
// refProcessor is not needed since we are inside a safe point
_process_strong_tasks.all_tasks_completed(n_workers(),
G1RP_PS_CodeCache_oops_do,
G1RP_PS_refProcessor_oops_do);
}

// Adaptor to pass the closures to all the roots in the VM.
Expand Down Expand Up @@ -137,7 +142,8 @@ void G1RootProcessor::process_all_roots(OopClosure* oops,

process_code_cache_roots(blobs, NULL, 0);

_process_strong_tasks.all_tasks_completed(n_workers());
// refProcessor is not needed since we are inside a safe point
_process_strong_tasks.all_tasks_completed(n_workers(), G1RP_PS_refProcessor_oops_do);
}

void G1RootProcessor::process_java_roots(G1RootClosures* closures,
Expand Down Expand Up @@ -181,10 +187,10 @@ void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
OopClosure* strong_roots = closures->strong_oops();

#if INCLUDE_AOT
if (UseAOT) {
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::AOTCodeRoots, worker_id);
if (_process_strong_tasks.try_claim_task(G1RP_PS_aot_oops_do)) {
AOTLoader::oops_do(strong_roots);
if (_process_strong_tasks.try_claim_task(G1RP_PS_aot_oops_do)) {
if (UseAOT) {
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::AOTCodeRoots, worker_id);
AOTLoader::oops_do(strong_roots);
}
}
#endif
Expand Down
3 changes: 0 additions & 3 deletions src/hotspot/share/gc/g1/g1RootProcessor.hpp
Expand Up @@ -53,10 +53,7 @@ class G1RootProcessor : public StackObj {
OopStorageSetStrongParState<false, false> _oop_storage_set_strong_par_state;

enum G1H_process_roots_tasks {
G1RP_PS_Universe_oops_do,
G1RP_PS_Management_oops_do,
G1RP_PS_ClassLoaderDataGraph_oops_do,
G1RP_PS_jvmti_oops_do,
G1RP_PS_CodeCache_oops_do,
AOT_ONLY(G1RP_PS_aot_oops_do COMMA)
G1RP_PS_refProcessor_oops_do,
Expand Down
4 changes: 0 additions & 4 deletions src/hotspot/share/gc/serial/serialHeap.cpp
Expand Up @@ -98,10 +98,6 @@ void SerialHeap::young_process_roots(StrongRootsScope* scope,
process_roots(scope, SO_ScavengeCodeCache, root_closure,
cld_closure, cld_closure, &mark_code_closure);

if (_process_strong_tasks->try_claim_task(GCH_PS_younger_gens)) {

}

rem_set()->at_younger_refs_iterate();
old_gen()->younger_refs_iterate(old_gen_closure, scope->n_threads());

Expand Down
6 changes: 4 additions & 2 deletions src/hotspot/share/gc/shared/genCollectedHeap.cpp
Expand Up @@ -819,8 +819,10 @@ void GenCollectedHeap::process_roots(StrongRootsScope* scope,
Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_code_p);

#if INCLUDE_AOT
if (UseAOT && _process_strong_tasks->try_claim_task(GCH_PS_aot_oops_do)) {
AOTLoader::oops_do(strong_roots);
if (_process_strong_tasks->try_claim_task(GCH_PS_aot_oops_do)) {
if (UseAOT) {
AOTLoader::oops_do(strong_roots);
}
}
#endif
if (_process_strong_tasks->try_claim_task(GCH_PS_OopStorageSet_oops_do)) {
Expand Down
1 change: 0 additions & 1 deletion src/hotspot/share/gc/shared/genCollectedHeap.hpp
Expand Up @@ -109,7 +109,6 @@ class GenCollectedHeap : public CollectedHeap {
GCH_PS_ClassLoaderDataGraph_oops_do,
GCH_PS_CodeCache_oops_do,
AOT_ONLY(GCH_PS_aot_oops_do COMMA)
GCH_PS_younger_gens,
// Leave this one last.
GCH_PS_NumElements
};
Expand Down
48 changes: 31 additions & 17 deletions src/hotspot/share/gc/shared/workgroup.cpp
Expand Up @@ -366,28 +366,32 @@ void SubTasksDone::clear() {
_tasks[i] = 0;
}
_threads_completed = 0;
#ifdef ASSERT
_claimed = 0;
#endif
}

bool SubTasksDone::try_claim_task(uint t) {
assert(t < _n_tasks, "bad task id.");
uint old = _tasks[t];
if (old == 0) {
old = Atomic::cmpxchg(&_tasks[t], 0u, 1u);
}
bool res = old == 0;
void SubTasksDone::all_tasks_completed_impl(uint n_threads,
uint skipped[],
size_t skipped_size) {
#ifdef ASSERT
if (res) {
assert(_claimed < _n_tasks, "Too many tasks claimed; missing clear?");
Atomic::inc(&_claimed);
// all non-skipped tasks are claimed
for (uint i = 0; i < _n_tasks; ++i) {
if (_tasks[i] == 0) {
auto is_skipped = false;
for (size_t j = 0; j < skipped_size; ++j) {
if (i == skipped[j]) {
is_skipped = true;
break;
}
}
assert(is_skipped, "%d not claimed.", i);
}
}
// all skipped tasks are *not* claimed
for (size_t i = 0; i < skipped_size; ++i) {
auto task_index = skipped[i];
assert(task_index < _n_tasks, "Array in range.");
assert(_tasks[task_index] == 0, "%d is both claimed and skipped.", task_index);
}
#endif
return res;
}

void SubTasksDone::all_tasks_completed(uint n_threads) {
uint observed = _threads_completed;
uint old;
do {
Expand All @@ -401,6 +405,16 @@ void SubTasksDone::all_tasks_completed(uint n_threads) {
}
}

bool SubTasksDone::try_claim_task(uint t) {
assert(t < _n_tasks, "bad task id.");
uint old = _tasks[t];
if (old == 0) {
old = Atomic::cmpxchg(&_tasks[t], 0u, 1u);
}
bool res = old == 0;
return res;
}


SubTasksDone::~SubTasksDone() {
FREE_C_HEAP_ARRAY(uint, _tasks);
Expand Down
25 changes: 19 additions & 6 deletions src/hotspot/share/gc/shared/workgroup.hpp
Expand Up @@ -26,6 +26,8 @@
#define SHARE_GC_SHARED_WORKGROUP_HPP

#include "memory/allocation.hpp"
#include "metaprogramming/enableIf.hpp"
#include "metaprogramming/logical.hpp"
#include "runtime/globals.hpp"
#include "runtime/thread.hpp"
#include "gc/shared/gcId.hpp"
Expand Down Expand Up @@ -303,13 +305,12 @@ class SubTasksDone: public CHeapObj<mtInternal> {
volatile uint* _tasks;
uint _n_tasks;
volatile uint _threads_completed;
#ifdef ASSERT
volatile uint _claimed;
#endif

// Set all tasks to unclaimed.
void clear();

void all_tasks_completed_impl(uint n_threads, uint skipped[], size_t skipped_size);

NONCOPYABLE(SubTasksDone);

public:
Expand All @@ -326,13 +327,25 @@ class SubTasksDone: public CHeapObj<mtInternal> {
// to be within the range of "this".
bool try_claim_task(uint t);

// The calling thread asserts that it has attempted to claim all the
// tasks that it will try to claim. Every thread in the parallel task
// The calling thread asserts that it has attempted to claim all the tasks
// that it will try to claim. Tasks that are meant to be skipped must be
// explicitly passed as extra arguments. Every thread in the parallel task
// must execute this. (When the last thread does so, the task array is
// cleared.)
//
// n_threads - Number of threads executing the sub-tasks.
void all_tasks_completed(uint n_threads);
void all_tasks_completed(uint n_threads) {
all_tasks_completed_impl(n_threads, nullptr, 0);
}

// Augmented by variadic args, each for a skipped task.
template<typename T0, typename... Ts,
ENABLE_IF(Conjunction<std::is_same<T0, Ts>...>::value)>
void all_tasks_completed(uint n_threads, T0 first_skipped, Ts... more_skipped) {
static_assert(std::is_convertible<T0, uint>::value, "not convertible");
uint skipped[] = { static_cast<uint>(first_skipped), static_cast<uint>(more_skipped)... };
all_tasks_completed_impl(n_threads, skipped, ARRAY_SIZE(skipped));
}

// Destructor.
~SubTasksDone();
Expand Down
10 changes: 6 additions & 4 deletions src/hotspot/share/runtime/safepoint.cpp
Expand Up @@ -546,10 +546,12 @@ class ParallelSPCleanupTask : public AbstractGangTask {
Universe::heap()->uses_stack_watermark_barrier()) {}

void work(uint worker_id) {
if (_do_lazy_roots && _subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_LAZY_ROOT_PROCESSING)) {
Tracer t("lazy partial thread root processing");
ParallelSPCleanupThreadClosure cl;
Threads::threads_do(&cl);
if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_LAZY_ROOT_PROCESSING)) {
if (_do_lazy_roots) {
Tracer t("lazy partial thread root processing");
ParallelSPCleanupThreadClosure cl;
Threads::threads_do(&cl);
}
}

if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_UPDATE_INLINE_CACHES)) {
Expand Down

1 comment on commit e93f08e

@openjdk-notifier
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.