@@ -436,11 +436,11 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
436436 log_debug (gc)(" ConcGCThreads: %u offset %u" , ConcGCThreads, _worker_id_offset);
437437 log_debug (gc)(" ParallelGCThreads: %u" , ParallelGCThreads);
438438
439- _num_concurrent_workers = ConcGCThreads;
440- _max_concurrent_workers = _num_concurrent_workers;
439+ _max_concurrent_workers = ConcGCThreads;
441440
442441 _concurrent_workers = new WorkerThreads (" G1 Conc" , _max_concurrent_workers);
443442 _concurrent_workers->initialize_workers ();
443+ _num_concurrent_workers = _concurrent_workers->active_workers ();
444444
445445 if (!_global_mark_stack.initialize (MarkStackSize, MarkStackSizeMax)) {
446446 vm_exit_during_initialization (" Failed to allocate initial concurrent mark overflow mark stack." );
@@ -976,17 +976,14 @@ void G1ConcurrentMark::scan_root_regions() {
976976 if (root_regions ()->scan_in_progress ()) {
977977 assert (!has_aborted (), " Aborting before root region scanning is finished not supported." );
978978
979- _num_concurrent_workers = MIN2 (calc_active_marking_workers (),
980- // We distribute work on a per-region basis, so starting
981- // more threads than that is useless.
982- root_regions ()->num_root_regions ());
983- assert (_num_concurrent_workers <= _max_concurrent_workers,
984- " Maximum number of marking threads exceeded" );
979+ // Assign one worker to each root-region but subject to the max constraint.
980+ const uint num_workers = MIN2 (root_regions ()->num_root_regions (),
981+ _max_concurrent_workers);
985982
986983 G1CMRootRegionScanTask task (this );
987984 log_debug (gc, ergo)(" Running %s using %u workers for %u work units." ,
988- task.name (), _num_concurrent_workers , root_regions ()->num_root_regions ());
989- _concurrent_workers->run_task (&task, _num_concurrent_workers );
985+ task.name (), num_workers , root_regions ()->num_root_regions ());
986+ _concurrent_workers->run_task (&task, num_workers );
990987
991988 // It's possible that has_aborted() is true here without actually
992989 // aborting the survivor scan earlier. This is OK as it's
@@ -1046,16 +1043,16 @@ void G1ConcurrentMark::concurrent_cycle_end(bool mark_cycle_completed) {
10461043void G1ConcurrentMark::mark_from_roots () {
10471044 _restart_for_overflow = false ;
10481045
1049- _num_concurrent_workers = calc_active_marking_workers ();
1050-
1051- uint active_workers = MAX2 (1U , _num_concurrent_workers);
1046+ uint active_workers = calc_active_marking_workers ();
10521047
10531048 // Setting active workers is not guaranteed since fewer
10541049 // worker threads may currently exist and more may not be
10551050 // available.
10561051 active_workers = _concurrent_workers->set_active_workers (active_workers);
10571052 log_info (gc, task)(" Using %u workers of %u for marking" , active_workers, _concurrent_workers->max_workers ());
10581053
1054+ _num_concurrent_workers = active_workers;
1055+
10591056 // Parallel task terminator is set in "set_concurrency_and_phase()"
10601057 set_concurrency_and_phase (active_workers, true /* concurrent */ );
10611058
0 commit comments