Skip to content
Permalink
Browse files
8260574: Remove parallel constructs in GenCollectedHeap::process_roots
Reviewed-by: tschatzl, kbarrett
  • Loading branch information
albertnetymk authored and Thomas Schatzl committed Jan 31, 2021
1 parent 0da9cad commit 8a9004da9b30c2ea3c616a0ef29be66f546faa78
@@ -575,13 +575,9 @@ void DefNewGeneration::collect(bool full,
"save marks have not been newly set.");

{
// DefNew needs to run with n_threads == 0, to make sure the serial
// version of the card table scanning code is used.
// See: CardTableRS::non_clean_card_iterate_possibly_parallel.
StrongRootsScope srs(0);

heap->young_process_roots(&srs,
&scan_closure,
heap->young_process_roots(&scan_closure,
&younger_gen_closure,
&cld_scan_closure);
}
@@ -182,10 +182,9 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
ClassLoaderDataGraph::clear_claimed_marks();

{
StrongRootsScope srs(1);
StrongRootsScope srs(0);

gch->full_process_roots(&srs,
false, // not the adjust phase
gch->full_process_roots(false, // not the adjust phase
GenCollectedHeap::SO_None,
ClassUnloading, // only strong roots if ClassUnloading
// is enabled
@@ -272,10 +271,9 @@ void GenMarkSweep::mark_sweep_phase3() {
ClassLoaderDataGraph::clear_claimed_marks();

{
StrongRootsScope srs(1);
StrongRootsScope srs(0);

gch->full_process_roots(&srs,
true, // this is the adjust phase
gch->full_process_roots(true, // this is the adjust phase
GenCollectedHeap::SO_AllCodeCache,
false, // all roots
&adjust_pointer_closure,
@@ -89,17 +89,14 @@ GrowableArray<MemoryPool*> SerialHeap::memory_pools() {
return memory_pools;
}

void SerialHeap::young_process_roots(StrongRootsScope* scope,
OopIterateClosure* root_closure,
void SerialHeap::young_process_roots(OopIterateClosure* root_closure,
OopIterateClosure* old_gen_closure,
CLDClosure* cld_closure) {
MarkingCodeBlobClosure mark_code_closure(root_closure, CodeBlobToOopClosure::FixRelocations);

process_roots(scope, SO_ScavengeCodeCache, root_closure,
process_roots(SO_ScavengeCodeCache, root_closure,
cld_closure, cld_closure, &mark_code_closure);

rem_set()->at_younger_refs_iterate();
old_gen()->younger_refs_iterate(old_gen_closure, scope->n_threads());

_process_strong_tasks->all_tasks_completed(scope->n_threads());
old_gen()->younger_refs_iterate(old_gen_closure, 0);
}
@@ -77,8 +77,7 @@ class SerialHeap : public GenCollectedHeap {
void oop_since_save_marks_iterate(OopClosureType1* cur,
OopClosureType2* older);

void young_process_roots(StrongRootsScope* scope,
OopIterateClosure* root_closure,
void young_process_roots(OopIterateClosure* root_closure,
OopIterateClosure* old_gen_closure,
CLDClosure* cld_closure);
};
@@ -99,7 +99,6 @@ GenCollectedHeap::GenCollectedHeap(Generation::Name young,
_gc_policy_counters(new GCPolicyCounters(policy_counters_name, 2, 2)),
_incremental_collection_failed(false),
_full_collections_completed(0),
_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
_young_manager(NULL),
_old_manager(NULL) {
}
@@ -546,9 +545,7 @@ void GenCollectedHeap::do_collection(bool full,
DEBUG_ONLY(Thread* my_thread = Thread::current();)

assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(my_thread->is_VM_thread() ||
my_thread->is_ConcurrentGC_thread(),
"incorrect thread type capability");
assert(my_thread->is_VM_thread(), "only VM thread");
assert(Heap_lock->is_locked(),
"the requesting thread should have the Heap_lock");
guarantee(!is_gc_active(), "collection is not reentrant");
@@ -795,72 +792,56 @@ class AssertNonScavengableClosure: public OopClosure {
static AssertNonScavengableClosure assert_is_non_scavengable_closure;
#endif

void GenCollectedHeap::process_roots(StrongRootsScope* scope,
ScanningOption so,
void GenCollectedHeap::process_roots(ScanningOption so,
OopClosure* strong_roots,
CLDClosure* strong_cld_closure,
CLDClosure* weak_cld_closure,
CodeBlobToOopClosure* code_roots) {
// General roots.
assert(code_roots != NULL, "code root closure should always be set");
// _n_termination for _process_strong_tasks should be set up stream
// in a method not running in a GC worker. Otherwise the GC worker
// could be trying to change the termination condition while the task
// is executing in another GC worker.

if (_process_strong_tasks->try_claim_task(GCH_PS_ClassLoaderDataGraph_oops_do)) {
ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
}
ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);

// Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;

bool is_par = scope->n_threads() > 1;
Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_code_p);
Threads::oops_do(strong_roots, roots_from_code_p);

#if INCLUDE_AOT
if (_process_strong_tasks->try_claim_task(GCH_PS_aot_oops_do)) {
if (UseAOT) {
AOTLoader::oops_do(strong_roots);
}
if (UseAOT) {
AOTLoader::oops_do(strong_roots);
}
#endif
if (_process_strong_tasks->try_claim_task(GCH_PS_OopStorageSet_oops_do)) {
OopStorageSet::strong_oops_do(strong_roots);
}
OopStorageSet::strong_oops_do(strong_roots);

if (_process_strong_tasks->try_claim_task(GCH_PS_CodeCache_oops_do)) {
if (so & SO_ScavengeCodeCache) {
assert(code_roots != NULL, "must supply closure for code cache");
if (so & SO_ScavengeCodeCache) {
assert(code_roots != NULL, "must supply closure for code cache");

// We only visit parts of the CodeCache when scavenging.
ScavengableNMethods::nmethods_do(code_roots);
}
if (so & SO_AllCodeCache) {
assert(code_roots != NULL, "must supply closure for code cache");
// We only visit parts of the CodeCache when scavenging.
ScavengableNMethods::nmethods_do(code_roots);
}
if (so & SO_AllCodeCache) {
assert(code_roots != NULL, "must supply closure for code cache");

// CMSCollector uses this to do intermediate-strength collections.
// We scan the entire code cache, since CodeCache::do_unloading is not called.
CodeCache::blobs_do(code_roots);
}
// Verify that the code cache contents are not subject to
// movement by a scavenging collection.
DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
// CMSCollector uses this to do intermediate-strength collections.
// We scan the entire code cache, since CodeCache::do_unloading is not called.
CodeCache::blobs_do(code_roots);
}
// Verify that the code cache contents are not subject to
// movement by a scavenging collection.
DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
}

void GenCollectedHeap::full_process_roots(StrongRootsScope* scope,
bool is_adjust_phase,
void GenCollectedHeap::full_process_roots(bool is_adjust_phase,
ScanningOption so,
bool only_strong_roots,
OopClosure* root_closure,
CLDClosure* cld_closure) {
MarkingCodeBlobClosure mark_code_closure(root_closure, is_adjust_phase);
CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;

process_roots(scope, so, root_closure, cld_closure, weak_cld_closure, &mark_code_closure);
_process_strong_tasks->all_tasks_completed(scope->n_threads());
process_roots(so, root_closure, cld_closure, weak_cld_closure, &mark_code_closure);
}

void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
@@ -103,20 +103,6 @@ class GenCollectedHeap : public CollectedHeap {

protected:

// The set of potentially parallel tasks in root scanning.
enum GCH_strong_roots_tasks {
GCH_PS_OopStorageSet_oops_do,
GCH_PS_ClassLoaderDataGraph_oops_do,
GCH_PS_CodeCache_oops_do,
AOT_ONLY(GCH_PS_aot_oops_do COMMA)
// Leave this one last.
GCH_PS_NumElements
};

// Data structure for claiming the (potentially) parallel tasks in
// (gen-specific) roots processing.
SubTasksDone* _process_strong_tasks;

GCMemoryManager* _young_manager;
GCMemoryManager* _old_manager;

@@ -357,8 +343,7 @@ class GenCollectedHeap : public CollectedHeap {
};

protected:
void process_roots(StrongRootsScope* scope,
ScanningOption so,
void process_roots(ScanningOption so,
OopClosure* strong_roots,
CLDClosure* strong_cld_closure,
CLDClosure* weak_cld_closure,
@@ -368,8 +353,7 @@ class GenCollectedHeap : public CollectedHeap {
virtual void gc_epilogue(bool full);

public:
void full_process_roots(StrongRootsScope* scope,
bool is_adjust_phase,
void full_process_roots(bool is_adjust_phase,
ScanningOption so,
bool only_strong_roots,
OopClosure* root_closure,
@@ -37,9 +37,16 @@ MarkScope::~MarkScope() {
}

StrongRootsScope::StrongRootsScope(uint n_threads) : _n_threads(n_threads) {
Threads::change_thread_claim_token();
// No need for thread claim for statically-known sequential case (_n_threads == 0)
// For positive values, clients of this class often unify sequential/parallel
// cases, so they expect the thread claim token to be updated.
if (_n_threads != 0) {
Threads::change_thread_claim_token();
}
}

StrongRootsScope::~StrongRootsScope() {
Threads::assert_all_threads_claimed();
if (_n_threads != 0) {
Threads::assert_all_threads_claimed();
}
}
@@ -33,10 +33,10 @@ class MarkScope : public StackObj {
~MarkScope();
};

// Sets up and tears down the required state for parallel root processing.

// Sets up and tears down the required state for sequential/parallel root processing.
class StrongRootsScope : public MarkScope {
// Number of threads participating in the roots processing.
// 0 means statically-known sequential root processing; used only by Serial GC
const uint _n_threads;

public:

0 comments on commit 8a9004d

Please sign in to comment.