Skip to content
Permalink
Browse files
8273545: Remove Thread::is_GC_task_thread()
Reviewed-by: stefank, coleenp, shade
  • Loading branch information
pliden committed Sep 9, 2021
1 parent dd1209e commit 185eacacdde9de12936520a1cda847f7e541c62f
Showing 17 changed files with 10 additions and 28 deletions.
@@ -1684,7 +1684,6 @@ jint G1CollectedHeap::initialize() {
}

_workers = new WorkGang("GC Thread", ParallelGCThreads,
true /* are_GC_task_threads */,
false /* are_ConcurrentGC_threads */);
if (_workers == NULL) {
return JNI_ENOMEM;
@@ -432,7 +432,7 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
_num_concurrent_workers = ConcGCThreads;
_max_concurrent_workers = _num_concurrent_workers;

_concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, false, true);
_concurrent_workers = new WorkGang("G1 Conc", _max_concurrent_workers, true);
_concurrent_workers->initialize_workers();

if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) {
@@ -120,7 +120,6 @@ class ParallelScavengeHeap : public CollectedHeap {
_old_pool(NULL),
_workers("GC Thread",
ParallelGCThreads,
true /* are_GC_task_threads */,
false /* are_ConcurrentGC_threads */) { }

// For use by VM operations
@@ -2145,7 +2145,7 @@ void PCAdjustPointerClosure::verify_cm(ParCompactionManager* cm) {
if (Thread::current()->is_VM_thread()) {
assert(cm == vmthread_cm, "VM threads should use ParCompactionManager from get_vmthread_cm()");
} else {
assert(Thread::current()->is_GC_task_thread(), "Must be a GC thread");
assert(Thread::current()->is_Worker_thread(), "Must be a GC thread");
assert(cm != vmthread_cm, "GC threads should use ParCompactionManager from gc_thread_compaction_manager()");
}
}
@@ -129,13 +129,12 @@ class GangTaskDispatcher : public CHeapObj<mtGC> {
};
// Definitions of WorkGang methods.

WorkGang::WorkGang(const char* name, uint workers, bool are_GC_task_threads, bool are_ConcurrentGC_threads) :
WorkGang::WorkGang(const char* name, uint workers, bool are_ConcurrentGC_threads) :
_workers(NULL),
_total_workers(workers),
_active_workers(UseDynamicNumberOfGCThreads ? 1U : workers),
_created_workers(0),
_name(name),
_are_GC_task_threads(are_GC_task_threads),
_are_ConcurrentGC_threads(are_ConcurrentGC_threads),
_dispatcher(new GangTaskDispatcher())
{ }
@@ -95,7 +95,6 @@ class WorkGang : public CHeapObj<mtInternal> {
const char* _name;

// Initialize only instance data.
const bool _are_GC_task_threads;
const bool _are_ConcurrentGC_threads;

// To get access to the GangTaskDispatcher instance.
@@ -116,14 +115,13 @@ class WorkGang : public CHeapObj<mtInternal> {
GangWorker* allocate_worker(uint which);

public:
WorkGang(const char* name, uint workers, bool are_GC_task_threads, bool are_ConcurrentGC_threads);
WorkGang(const char* name, uint workers, bool are_ConcurrentGC_threads);

~WorkGang();

// Initialize workers in the gang. Return true if initialization succeeded.
void initialize_workers();

bool are_GC_task_threads() const { return _are_GC_task_threads; }
bool are_ConcurrentGC_threads() const { return _are_ConcurrentGC_threads; }

uint total_workers() const { return _total_workers; }
@@ -217,7 +215,6 @@ class GangWorker: public WorkerThread {
GangWorker(WorkGang* gang, uint id);

// Predicate for Thread
bool is_GC_task_thread() const override { return gang()->are_GC_task_threads(); }
bool is_ConcurrentGC_thread() const override { return gang()->are_ConcurrentGC_threads(); }

// Printing
@@ -496,7 +496,6 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :

_max_workers = MAX2(_max_workers, 1U);
_workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
/* are_GC_task_threads */ true,
/* are_ConcurrentGC_threads */ true);
if (_workers == NULL) {
vm_exit_during_initialization("Failed necessary allocation.");
@@ -507,7 +506,6 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
if (ParallelGCThreads > 1) {
_safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
ParallelGCThreads,
/* are_GC_task_threads */ false,
/* are_ConcurrentGC_threads */ false);
_safepoint_workers->initialize_workers();
}
@@ -57,9 +57,8 @@ class ShenandoahWorkGang : public WorkGang {
public:
ShenandoahWorkGang(const char* name,
uint workers,
bool are_GC_task_threads,
bool are_ConcurrentGC_threads) :
WorkGang(name, workers, are_GC_task_threads, are_ConcurrentGC_threads), _initialize_gclab(false) {
WorkGang(name, workers, are_ConcurrentGC_threads), _initialize_gclab(false) {
}

// Create a GC worker and install it into the work gang.
@@ -80,8 +80,7 @@ void ZCollectedHeap::initialize_serviceability() {
class ZStopConcurrentGCThreadClosure : public ThreadClosure {
public:
virtual void do_thread(Thread* thread) {
if (thread->is_ConcurrentGC_thread() &&
!thread->is_GC_task_thread()) {
if (thread->is_ConcurrentGC_thread() && !thread->is_Worker_thread()) {
static_cast<ConcurrentGCThread*>(thread)->stop();
}
}
@@ -60,7 +60,6 @@ class ZRuntimeWorkersInitializeTask : public AbstractGangTask {
ZRuntimeWorkers::ZRuntimeWorkers() :
_workers("RuntimeWorker",
ParallelGCThreads,
false /* are_GC_task_threads */,
false /* are_ConcurrentGC_threads */) {

log_info_p(gc, init)("Runtime Workers: %u", _workers.total_workers());
@@ -64,7 +64,6 @@ class ZWorkersInitializeTask : public AbstractGangTask {
ZWorkers::ZWorkers() :
_workers("ZWorker",
UseDynamicNumberOfGCThreads ? ConcGCThreads : MAX2(ConcGCThreads, ParallelGCThreads),
true /* are_GC_task_threads */,
true /* are_ConcurrentGC_threads */) {

if (UseDynamicNumberOfGCThreads) {
@@ -334,10 +334,6 @@ class Thread: public ThreadShadow {
virtual bool is_monitor_deflation_thread() const { return false; }
virtual bool is_hidden_from_external_view() const { return false; }
virtual bool is_jvmti_agent_thread() const { return false; }
// True iff the thread can perform GC operations at a safepoint.
// Generally will be true only of VM thread and parallel GC WorkGang
// threads.
virtual bool is_GC_task_thread() const { return false; }
virtual bool is_Watcher_thread() const { return false; }
virtual bool is_ConcurrentGC_thread() const { return false; }
virtual bool is_Named_thread() const { return false; }
@@ -33,7 +33,7 @@ class G1BatchedGangTaskWorkers : AllStatic {
static WorkGang* _work_gang;
static WorkGang* work_gang() {
if (_work_gang == nullptr) {
_work_gang = new WorkGang("G1 Small Workers", MaxWorkers, false, false);
_work_gang = new WorkGang("G1 Small Workers", MaxWorkers, false);
_work_gang->initialize_workers();
_work_gang->update_active_workers(MaxWorkers);
}
@@ -50,7 +50,7 @@ class G1CardSetTest : public ::testing::Test {
static WorkGang* workers() {
if (_workers == NULL) {
_max_workers = os::processor_count();
_workers = new WorkGang("G1CardSetTest Work Gang", _max_workers, false, false);
_workers = new WorkGang("G1CardSetTest Work Gang", _max_workers, false);
_workers->initialize_workers();
_workers->update_active_workers(_max_workers);
}
@@ -35,7 +35,7 @@ class G1MapperWorkers : AllStatic {
static WorkGang* _work_gang;
static WorkGang* work_gang() {
if (_work_gang == NULL) {
_work_gang = new WorkGang("G1 Small Workers", MaxWorkers, false, false);
_work_gang = new WorkGang("G1 Small Workers", MaxWorkers, false);
_work_gang->initialize_workers();
_work_gang->update_active_workers(MaxWorkers);
}
@@ -117,4 +117,4 @@ TEST_VM(G1RegionToSpaceMapper, largeStressAdjacent) {

G1TestCommitUncommit task(large_mapper);
G1MapperWorkers::run_task(&task);
}
}
@@ -880,7 +880,6 @@ WorkGang* OopStorageTestParIteration::workers() {
if (_workers == NULL) {
_workers = new WorkGang("OopStorageTestParIteration workers",
_max_workers,
false,
false);
_workers->initialize_workers();
_workers->update_active_workers(_max_workers);
@@ -78,7 +78,6 @@ WorkGang* OopStorageParIterPerf::workers() const {
if (_workers == NULL) {
WorkGang* wg = new WorkGang("OopStorageParIterPerf workers",
_num_workers,
false,
false);
wg->initialize_workers();
wg->update_active_workers(_num_workers);

1 comment on commit 185eaca

@openjdk-notifier
Copy link

@openjdk-notifier openjdk-notifier bot commented on 185eaca Sep 9, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.