Skip to content
Permalink
Browse files
8275035: Clean up worker thread infrastructure
Reviewed-by: stefank, ayang
  • Loading branch information
pliden committed Oct 14, 2021
1 parent 3b0b6ad commit 54b887076612c0eaa410a849178f8ba0c4ed3eeb
Showing 103 changed files with 1,096 additions and 1,298 deletions.
@@ -27,6 +27,7 @@
#include "gc/epsilon/epsilonHeap.hpp"
#include "gc/shared/gcArguments.hpp"
#include "gc/shared/tlab_globals.hpp"
#include "logging/log.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"

@@ -29,6 +29,7 @@
#include "gc/epsilon/epsilonThreadLocalData.hpp"
#include "gc/shared/gcArguments.hpp"
#include "gc/shared/locationPrinter.inline.hpp"
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/metaspaceUtils.hpp"
@@ -24,7 +24,7 @@

#include "precompiled.hpp"

#include "gc/g1/g1BatchedGangTask.hpp"
#include "gc/g1/g1BatchedTask.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1GCParPhaseTimesTracker.hpp"
#include "runtime/atomic.hpp"
@@ -40,30 +40,30 @@ const char* G1AbstractSubTask::name() const {
return g1h->phase_times()->phase_name(_tag);
}

bool G1BatchedGangTask::try_claim_serial_task(int& task) {
bool G1BatchedTask::try_claim_serial_task(int& task) {
task = Atomic::fetch_and_add(&_num_serial_tasks_done, 1);
return task < _serial_tasks.length();
}

void G1BatchedGangTask::add_serial_task(G1AbstractSubTask* task) {
void G1BatchedTask::add_serial_task(G1AbstractSubTask* task) {
assert(task != nullptr, "must be");
_serial_tasks.push(task);
}

void G1BatchedGangTask::add_parallel_task(G1AbstractSubTask* task) {
void G1BatchedTask::add_parallel_task(G1AbstractSubTask* task) {
assert(task != nullptr, "must be");
_parallel_tasks.push(task);
}

G1BatchedGangTask::G1BatchedGangTask(const char* name, G1GCPhaseTimes* phase_times) :
AbstractGangTask(name),
G1BatchedTask::G1BatchedTask(const char* name, G1GCPhaseTimes* phase_times) :
WorkerTask(name),
_num_serial_tasks_done(0),
_phase_times(phase_times),
_serial_tasks(),
_parallel_tasks() {
}

uint G1BatchedGangTask::num_workers_estimate() const {
uint G1BatchedTask::num_workers_estimate() const {
double sum = 0.0;
for (G1AbstractSubTask* task : _serial_tasks) {
sum += task->worker_cost();
@@ -74,7 +74,7 @@ uint G1BatchedGangTask::num_workers_estimate() const {
return ceil(sum);
}

void G1BatchedGangTask::set_max_workers(uint max_workers) {
void G1BatchedTask::set_max_workers(uint max_workers) {
for (G1AbstractSubTask* task : _serial_tasks) {
task->set_max_workers(max_workers);
}
@@ -83,7 +83,7 @@ void G1BatchedGangTask::set_max_workers(uint max_workers) {
}
}

void G1BatchedGangTask::work(uint worker_id) {
void G1BatchedTask::work(uint worker_id) {
int t = 0;
while (try_claim_serial_task(t)) {
G1AbstractSubTask* task = _serial_tasks.at(t);
@@ -96,7 +96,7 @@ void G1BatchedGangTask::work(uint worker_id) {
}
}

G1BatchedGangTask::~G1BatchedGangTask() {
G1BatchedTask::~G1BatchedTask() {
assert(Atomic::load(&_num_serial_tasks_done) >= _serial_tasks.length(),
"Only %d tasks of %d claimed", Atomic::load(&_num_serial_tasks_done), _serial_tasks.length());

@@ -22,18 +22,18 @@
*
*/

#ifndef SHARE_GC_G1_G1BATCHEDGANGTASK_HPP
#define SHARE_GC_G1_G1BATCHEDGANGTASK_HPP
#ifndef SHARE_GC_G1_G1BATCHEDTASK_HPP
#define SHARE_GC_G1_G1BATCHEDTASK_HPP

#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/shared/workgroup.hpp"
#include "gc/shared/workerThread.hpp"
#include "memory/allocation.hpp"

template <typename E, MEMFLAGS F>
class GrowableArrayCHeap;

// G1AbstractSubTask represents a task to be performed either within a
// G1BatchedGangTask running on a single worker ("serially") or multiple workers
// G1BatchedTask running on a single worker ("serially") or multiple workers
// ("in parallel"). A G1AbstractSubTask is always associated with a phase tag
// that is used to automatically store timing information.
//
@@ -46,7 +46,7 @@ class GrowableArrayCHeap;
// splits across the heap in some way. Current examples would be clearing the
// card table.
//
// See G1BatchedGangTask for information on execution.
// See G1BatchedTask for information on execution.
class G1AbstractSubTask : public CHeapObj<mtGC> {
G1GCPhaseTimes::GCParPhases _tag;

@@ -65,10 +65,10 @@ class G1AbstractSubTask : public CHeapObj<mtGC> {

// How many workers (threads) would this task be able to keep busy for at least
// as long as to amortize worker startup costs.
// Called by G1BatchedGangTask to determine total number of workers.
// Called by G1BatchedTask to determine total number of workers.
virtual double worker_cost() const = 0;

// Called by G1BatchedGangTask to provide information about the the maximum
// Called by G1BatchedTask to provide information about the the maximum
// number of workers for all subtasks after it has been determined.
virtual void set_max_workers(uint max_workers) { }

@@ -81,7 +81,7 @@ class G1AbstractSubTask : public CHeapObj<mtGC> {
const char* name() const;
};

// G1BatchedGangTask runs a set of G1AbstractSubTask using a work gang.
// G1BatchedTask runs a set of G1AbstractSubTask using workers.
//
// Subclasses of this class add their G1AbstractSubTasks into either the list
// of "serial" or the list of "parallel" tasks. They are supposed to be the owners
@@ -94,7 +94,7 @@ class G1AbstractSubTask : public CHeapObj<mtGC> {
// add_parallel_task(new SomeOtherSubTask());
// [...]
//
// During execution in the work gang, this class will make sure that the "serial"
// During execution in workers, this class will make sure that the "serial"
// tasks are executed by a single worker exactly once, but different "serial"
// tasks may be executed in parallel using different workers. "Parallel" tasks'
// do_work() method may be called by different workers passing a different
@@ -119,13 +119,13 @@ class G1AbstractSubTask : public CHeapObj<mtGC> {
// 4) T::do_work() // potentially in parallel with any other registered G1AbstractSubTask
// 5) ~T()
//
class G1BatchedGangTask : public AbstractGangTask {
class G1BatchedTask : public WorkerTask {
volatile int _num_serial_tasks_done;
G1GCPhaseTimes* _phase_times;

bool try_claim_serial_task(int& task);

NONCOPYABLE(G1BatchedGangTask);
NONCOPYABLE(G1BatchedTask);

GrowableArrayCHeap<G1AbstractSubTask*, mtGC> _serial_tasks;
GrowableArrayCHeap<G1AbstractSubTask*, mtGC> _parallel_tasks;
@@ -134,19 +134,19 @@ class G1BatchedGangTask : public AbstractGangTask {
void add_serial_task(G1AbstractSubTask* task);
void add_parallel_task(G1AbstractSubTask* task);

G1BatchedGangTask(const char* name, G1GCPhaseTimes* phase_times);
G1BatchedTask(const char* name, G1GCPhaseTimes* phase_times);

public:
void work(uint worker_id) override;

// How many workers can this gang task keep busy and should be started for
// How many workers can this task keep busy and should be started for
// "optimal" performance.
uint num_workers_estimate() const;
// Informs the G1AbstractSubTasks about that we will start execution with the
// given number of workers.
void set_max_workers(uint max_workers);

~G1BatchedGangTask();
~G1BatchedTask();
};

#endif // SHARE_GC_G1_G1BATCHEDGANGTASK_HPP
#endif // SHARE_GC_G1_G1BATCHEDTASK_HPP
@@ -32,7 +32,7 @@
#include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BatchedGangTask.hpp"
#include "gc/g1/g1BatchedTask.hpp"
#include "gc/g1/g1CardSetFreeMemoryTask.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
@@ -138,7 +138,7 @@ void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_region
reset_from_card_cache(start_idx, num_regions);
}

void G1CollectedHeap::run_batch_task(G1BatchedGangTask* cl) {
void G1CollectedHeap::run_batch_task(G1BatchedTask* cl) {
uint num_workers = MAX2(1u, MIN2(cl->num_workers_estimate(), workers()->active_workers()));
cl->set_max_workers(num_workers);
workers()->run_task(cl, num_workers);
@@ -1273,7 +1273,7 @@ HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
return NULL;
}

bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
bool G1CollectedHeap::expand(size_t expand_bytes, WorkerThreads* pretouch_workers, double* expand_time_ms) {
size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
aligned_expand_bytes = align_up(aligned_expand_bytes,
HeapRegion::GrainBytes);
@@ -1682,7 +1682,7 @@ jint G1CollectedHeap::initialize() {
_humongous_reclaim_candidates.initialize(reserved(), granularity);
}

_workers = new WorkGang("GC Thread", ParallelGCThreads);
_workers = new WorkerThreads("GC Thread", ParallelGCThreads);
if (_workers == NULL) {
return JNI_ENOMEM;
}
@@ -64,7 +64,7 @@
// Forward declarations
class G1Allocator;
class G1ArchiveAllocator;
class G1BatchedGangTask;
class G1BatchedTask;
class G1CardTableEntryClosure;
class G1ConcurrentMark;
class G1ConcurrentMarkThread;
@@ -83,7 +83,7 @@ class MemoryPool;
class nmethod;
class ReferenceProcessor;
class STWGCTimer;
class WorkGang;
class WorkerThreads;

typedef OverflowTaskQueue<ScannerTask, mtGC> G1ScannerTasksQueue;
typedef GenericTaskQueueSet<G1ScannerTasksQueue, mtGC> G1ScannerTasksQueueSet;
@@ -145,7 +145,7 @@ class G1CollectedHeap : public CollectedHeap {
G1ServiceTask* _periodic_gc_task;
G1CardSetFreeMemoryTask* _free_card_set_memory_task;

WorkGang* _workers;
WorkerThreads* _workers;
G1CardTable* _card_table;

Ticks _collection_pause_end;
@@ -538,10 +538,10 @@ class G1CollectedHeap : public CollectedHeap {

G1ServiceThread* service_thread() const { return _service_thread; }

WorkGang* workers() const { return _workers; }
WorkerThreads* workers() const { return _workers; }

// Run the given batch task using the work gang.
void run_batch_task(G1BatchedGangTask* cl);
// Run the given batch task using the workers.
void run_batch_task(G1BatchedTask* cl);

G1Allocator* allocator() {
return _allocator;
@@ -572,7 +572,7 @@ class G1CollectedHeap : public CollectedHeap {
// Returns true if the heap was expanded by the requested amount;
// false otherwise.
// (Rounds up to a HeapRegion boundary.)
bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
bool expand(size_t expand_bytes, WorkerThreads* pretouch_workers = NULL, double* expand_time_ms = NULL);
bool expand_single_region(uint node_index);

// Returns the PLAB statistics for a given destination.
@@ -1317,7 +1317,7 @@ class G1CollectedHeap : public CollectedHeap {
// WhiteBox testing support.
virtual bool supports_concurrent_gc_breakpoints() const;

virtual WorkGang* safepoint_workers() { return _workers; }
virtual WorkerThreads* safepoint_workers() { return _workers; }

virtual bool is_archived_object(oop object) const;

@@ -26,7 +26,7 @@
#define SHARE_GC_G1_G1COLLECTIONSETCANDIDATES_HPP

#include "gc/g1/g1CollectionSetCandidates.hpp"
#include "gc/shared/workgroup.hpp"
#include "gc/shared/workerThread.hpp"
#include "memory/allocation.hpp"
#include "runtime/globals.hpp"

@@ -69,7 +69,7 @@ static int order_regions(HeapRegion* hr1, HeapRegion* hr2) {
// put them into some work area unsorted. At the end the array is sorted and
// copied into the G1CollectionSetCandidates instance; the caller will be the new
// owner of this object.
class G1BuildCandidateRegionsTask : public AbstractGangTask {
class G1BuildCandidateRegionsTask : public WorkerTask {

// Work area for building the set of collection set candidates. Contains references
// to heap regions with their GC efficiencies calculated. To reduce contention
@@ -223,7 +223,7 @@ class G1BuildCandidateRegionsTask : public AbstractGangTask {

public:
G1BuildCandidateRegionsTask(uint max_num_regions, uint chunk_size, uint num_workers) :
AbstractGangTask("G1 Build Candidate Regions"),
WorkerTask("G1 Build Candidate Regions"),
_g1h(G1CollectedHeap::heap()),
_hrclaimer(num_workers),
_num_regions_added(0),
@@ -311,7 +311,7 @@ void G1CollectionSetChooser::prune(G1CollectionSetCandidates* candidates) {
}
}

G1CollectionSetCandidates* G1CollectionSetChooser::build(WorkGang* workers, uint max_num_regions) {
G1CollectionSetCandidates* G1CollectionSetChooser::build(WorkerThreads* workers, uint max_num_regions) {
uint num_workers = workers->active_workers();
uint chunk_size = calculate_work_chunk_size(num_workers, max_num_regions);

@@ -30,7 +30,7 @@
#include "runtime/globals.hpp"

class G1CollectionSetCandidates;
class WorkGang;
class WorkerThreads;

// Helper class to calculate collection set candidates, and containing some related
// methods.
@@ -59,7 +59,7 @@ class G1CollectionSetChooser : public AllStatic {

// Build and return set of collection set candidates sorted by decreasing gc
// efficiency.
static G1CollectionSetCandidates* build(WorkGang* workers, uint max_num_regions);
static G1CollectionSetCandidates* build(WorkerThreads* workers, uint max_num_regions);
};

#endif // SHARE_GC_G1_G1COLLECTIONSETCHOOSER_HPP

1 comment on commit 54b8870

@openjdk-notifier
Copy link

@openjdk-notifier openjdk-notifier bot commented on 54b8870 Oct 14, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.