Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8264788: Make SequentialSubTasksDone use-once #3372

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
5 changes: 1 addition & 4 deletions src/hotspot/share/gc/parallel/psParallelCompact.cpp
Expand Up @@ -2055,18 +2055,15 @@ class MarkFromRootsTask : public AbstractGangTask {
MarkFromRootsTask(uint active_workers) :
AbstractGangTask("MarkFromRootsTask"),
_strong_roots_scope(active_workers),
_subtasks(),
_subtasks(ParallelRootType::sentinel),
_terminator(active_workers, ParCompactionManager::oop_task_queues()),
_active_workers(active_workers) {
_subtasks.set_n_threads(active_workers);
_subtasks.set_n_tasks(ParallelRootType::sentinel);
}

virtual void work(uint worker_id) {
for (uint task = 0; _subtasks.try_claim_task(task); /*empty*/ ) {
mark_from_roots_work(static_cast<ParallelRootType::Value>(task), worker_id);
}
_subtasks.all_tasks_completed();

PCAddThreadRootsMarkingTaskClosure closure(worker_id);
Threads::possibly_parallel_threads_do(true /*parallel */, &closure);
Expand Down
5 changes: 1 addition & 4 deletions src/hotspot/share/gc/parallel/psScavenge.cpp
Expand Up @@ -303,14 +303,12 @@ class ScavengeRootsTask : public AbstractGangTask {
bool is_empty) :
AbstractGangTask("ScavengeRootsTask"),
_strong_roots_scope(active_workers),
_subtasks(),
_subtasks(ParallelRootType::sentinel),
_old_gen(old_gen),
_gen_top(gen_top),
_active_workers(active_workers),
_is_empty(is_empty),
_terminator(active_workers, PSPromotionManager::vm_thread_promotion_manager()->stack_array_depth()) {
_subtasks.set_n_threads(active_workers);
_subtasks.set_n_tasks(ParallelRootType::sentinel);
}

virtual void work(uint worker_id) {
Expand Down Expand Up @@ -345,7 +343,6 @@ class ScavengeRootsTask : public AbstractGangTask {
for (uint root_type = 0; _subtasks.try_claim_task(root_type); /* empty */ ) {
scavenge_roots_work(static_cast<ParallelRootType::Value>(root_type), worker_id);
}
_subtasks.all_tasks_completed();

PSThreadRootsTaskClosure closure(worker_id);
Threads::possibly_parallel_threads_do(true /*parallel */, &closure);
Expand Down
13 changes: 5 additions & 8 deletions src/hotspot/share/gc/shared/preservedMarks.cpp
Expand Up @@ -104,18 +104,15 @@ class ParRestoreTask : public AbstractGangTask {
while (_sub_tasks.try_claim_task(/* reference */ task_id)) {
_preserved_marks_set->get(task_id)->restore_and_increment(_total_size_addr);
}
_sub_tasks.all_tasks_completed();
}

ParRestoreTask(uint worker_num,
PreservedMarksSet* preserved_marks_set,
ParRestoreTask(PreservedMarksSet* preserved_marks_set,
volatile size_t* total_size_addr)
: AbstractGangTask("Parallel Preserved Mark Restoration"),
_preserved_marks_set(preserved_marks_set),
_sub_tasks(preserved_marks_set->num()),
_total_size_addr(total_size_addr) {
_sub_tasks.set_n_threads(worker_num);
_sub_tasks.set_n_tasks(preserved_marks_set->num());
}
}
};

void PreservedMarksSet::restore(WorkGang* workers) {
Expand All @@ -127,15 +124,15 @@ void PreservedMarksSet::restore(WorkGang* workers) {
for (uint i = 0; i < _num; i += 1) {
total_size_before += get(i)->size();
}
#endif // def ASSERT
#endif // ASSERT

if (workers == NULL) {
for (uint i = 0; i < num(); i += 1) {
total_size += get(i)->size();
get(i)->restore();
}
} else {
ParRestoreTask task(workers->active_workers(), this, &total_size);
ParRestoreTask task(this, &total_size);
workers->run_task(&task);
}

Expand Down
37 changes: 4 additions & 33 deletions src/hotspot/share/gc/shared/workgroup.cpp
Expand Up @@ -399,39 +399,10 @@ SubTasksDone::~SubTasksDone() {

// *** SequentialSubTasksDone

void SequentialSubTasksDone::clear() {
_n_tasks = _n_claimed = 0;
_n_threads = _n_completed = 0;
}

bool SequentialSubTasksDone::valid() {
return _n_threads > 0;
}

bool SequentialSubTasksDone::try_claim_task(uint& t) {
t = _n_claimed;
while (t < _n_tasks) {
uint res = Atomic::cmpxchg(&_n_claimed, t, t+1);
if (res == t) {
return true;
}
t = res;
}
return false;
}

bool SequentialSubTasksDone::all_tasks_completed() {
uint complete = _n_completed;
while (true) {
uint res = Atomic::cmpxchg(&_n_completed, complete, complete+1);
if (res == complete) {
break;
}
complete = res;
}
if (complete+1 == _n_threads) {
clear();
return true;
t = _num_claimed;
if (t < _num_tasks) {
t = Atomic::add(&_num_claimed, 1u) - 1;
}
return false;
return t < _num_tasks;
}
54 changes: 12 additions & 42 deletions src/hotspot/share/gc/shared/workgroup.hpp
Expand Up @@ -346,57 +346,27 @@ class SubTasksDone: public CHeapObj<mtInternal> {
// sub-tasks from a set (possibly an enumeration), claim sub-tasks
// in sequential order. This is ideal for claiming dynamically
// partitioned tasks (like striding in the parallel remembered
// set scanning). Note that unlike the above class this is
// a stack object - is there any reason for it not to be?
// set scanning).

class SequentialSubTasksDone : public StackObj {
protected:
uint _n_tasks; // Total number of tasks available.
volatile uint _n_claimed; // Number of tasks claimed.
// _n_threads is used to determine when a sub task is done.
// See comments on SubTasksDone::_n_threads
uint _n_threads; // Total number of parallel threads.
volatile uint _n_completed; // Number of completed threads.
class SequentialSubTasksDone : public CHeapObj<mtInternal> {

uint _num_tasks; // Total number of tasks available.
volatile uint _num_claimed; // Number of tasks claimed.

void clear();
NONCOPYABLE(SequentialSubTasksDone);

public:
SequentialSubTasksDone() {
clear();
SequentialSubTasksDone(uint num_tasks) : _num_tasks(num_tasks), _num_claimed(0) { }
~SequentialSubTasksDone() {
// Claiming may try to claim more tasks than there are.
assert(_num_claimed >= _num_tasks, "Claimed %u tasks of %u", _num_claimed, _num_tasks);
}
~SequentialSubTasksDone() {}

// True iff the object is in a valid state.
bool valid();

// number of tasks
uint n_tasks() const { return _n_tasks; }

// Get/set the number of parallel threads doing the tasks to t.
// Should be called before the task starts but it is safe
// to call this once a task is running provided that all
// threads agree on the number of threads.
uint n_threads() { return _n_threads; }
void set_n_threads(uint t) { _n_threads = t; }

// Set the number of tasks to be claimed to t. As above,
// should be called before the tasks start but it is safe
// to call this once a task is running provided all threads
// agree on the number of tasks.
void set_n_tasks(uint t) { _n_tasks = t; }

// Attempt to claim the next unclaimed task in the sequence,
// returning true if successful, with t set to the index of the
// claimed task. Returns false if there are no more unclaimed tasks
// in the sequence.
// claimed task. Returns false if there are no more unclaimed tasks
// in the sequence. In this case t is undefined.
bool try_claim_task(uint& t);

// The calling thread asserts that it has attempted to claim
// all the tasks it possibly can in the sequence. Every thread
// claiming tasks must promise call this. Returns true if this
// is the last thread to complete so that the thread can perform
// cleanup if necessary.
bool all_tasks_completed();
};

#endif // SHARE_GC_SHARED_WORKGROUP_HPP