42
42
#include " runtime/handles.inline.hpp"
43
43
#include " runtime/vmThread.hpp"
44
44
#include " utilities/debug.hpp"
45
-
46
- // ======= Concurrent Mark Thread ========
45
+ #include " utilities/ticks.hpp"
47
46
48
47
G1ConcurrentMarkThread::G1ConcurrentMarkThread (G1ConcurrentMark* cm) :
49
48
ConcurrentGCThread(),
50
49
_vtime_start(0.0 ),
51
50
_vtime_accum(0.0 ),
52
- _vtime_mark_accum(0.0 ),
53
51
_cm(cm),
54
52
_state(Idle)
55
53
{
@@ -77,7 +75,7 @@ class CMCleanup : public VoidClosure {
77
75
}
78
76
};
79
77
80
- double G1ConcurrentMarkThread::mmu_delay_end (G1Policy* g1_policy , bool remark) {
78
+ double G1ConcurrentMarkThread::mmu_delay_end (G1Policy* policy , bool remark) {
81
79
// There are 3 reasons to use SuspendibleThreadSetJoiner.
82
80
// 1. To avoid concurrency problem.
83
81
// - G1MMUTracker::add_pause(), when_sec() and when_max_gc_sec() can be called
@@ -88,29 +86,29 @@ double G1ConcurrentMarkThread::mmu_delay_end(G1Policy* g1_policy, bool remark) {
88
86
// And then sleep for predicted amount of time by delay_to_keep_mmu().
89
87
SuspendibleThreadSetJoiner sts_join;
90
88
91
- const G1Analytics* analytics = g1_policy ->analytics ();
89
+ const G1Analytics* analytics = policy ->analytics ();
92
90
double prediction_ms = remark ? analytics->predict_remark_time_ms ()
93
91
: analytics->predict_cleanup_time_ms ();
94
92
double prediction = prediction_ms / MILLIUNITS;
95
- G1MMUTracker *mmu_tracker = g1_policy ->mmu_tracker ();
93
+ G1MMUTracker *mmu_tracker = policy ->mmu_tracker ();
96
94
double now = os::elapsedTime ();
97
95
return now + mmu_tracker->when_sec (now, prediction);
98
96
}
99
97
100
- void G1ConcurrentMarkThread::delay_to_keep_mmu (G1Policy* g1_policy, bool remark) {
101
- if (g1_policy->use_adaptive_young_list_length ()) {
102
- double delay_end_sec = mmu_delay_end (g1_policy, remark);
98
+ void G1ConcurrentMarkThread::delay_to_keep_mmu (bool remark) {
99
+ G1Policy* policy = G1CollectedHeap::heap ()->policy ();
100
+
101
+ if (policy->use_adaptive_young_list_length ()) {
102
+ double delay_end_sec = mmu_delay_end (policy, remark);
103
103
// Wait for timeout or thread termination request.
104
104
MonitorLocker ml (CGC_lock, Monitor::_no_safepoint_check_flag);
105
- while (!_cm->has_aborted ()) {
105
+ while (!_cm->has_aborted () && ! should_terminate () ) {
106
106
double sleep_time_sec = (delay_end_sec - os::elapsedTime ());
107
107
jlong sleep_time_ms = ceil (sleep_time_sec * MILLIUNITS);
108
108
if (sleep_time_ms <= 0 ) {
109
109
break ; // Passed end time.
110
110
} else if (ml.wait (sleep_time_ms, Monitor::_no_safepoint_check_flag)) {
111
111
break ; // Timeout => reached end time.
112
- } else if (should_terminate ()) {
113
- break ; // Wakeup for pending termination request.
114
112
}
115
113
// Other (possibly spurious) wakeup. Retry with updated sleep time.
116
114
}
@@ -136,144 +134,16 @@ class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc,
136
134
void G1ConcurrentMarkThread::run_service () {
137
135
_vtime_start = os::elapsedVTime ();
138
136
139
- G1CollectedHeap* g1h = G1CollectedHeap::heap ();
140
- G1Policy* policy = g1h->policy ();
141
-
142
- while (!should_terminate ()) {
143
- // wait until started is set.
144
- sleep_before_next_cycle ();
145
- if (should_terminate ()) {
146
- break ;
147
- }
137
+ while (wait_for_next_cycle ()) {
148
138
149
139
GCIdMark gc_id_mark;
150
-
151
- _cm->concurrent_cycle_start ();
152
-
153
140
GCTraceConcTime (Info, gc) tt (" Concurrent Cycle" );
154
- {
155
- ResourceMark rm;
156
-
157
- double cycle_start = os::elapsedVTime ();
158
-
159
- {
160
- G1ConcPhaseTimer p (_cm, " Concurrent Clear Claimed Marks" );
161
- ClassLoaderDataGraph::clear_claimed_marks ();
162
- }
163
-
164
- // We have to ensure that we finish scanning the root regions
165
- // before the next GC takes place. To ensure this we have to
166
- // make sure that we do not join the STS until the root regions
167
- // have been scanned. If we did then it's possible that a
168
- // subsequent GC could block us from joining the STS and proceed
169
- // without the root regions have been scanned which would be a
170
- // correctness issue.
171
-
172
- {
173
- G1ConcPhaseTimer p (_cm, " Concurrent Scan Root Regions" );
174
- _cm->scan_root_regions ();
175
- }
176
-
177
- // Note: ConcurrentGCBreakpoints before here risk deadlock,
178
- // because a young GC must wait for root region scanning.
179
-
180
- // It would be nice to use the G1ConcPhaseTimer class here but
181
- // the "end" logging is inside the loop and not at the end of
182
- // a scope. Also, the timer doesn't support nesting.
183
- // Mimicking the same log output instead.
184
- jlong mark_start = os::elapsed_counter ();
185
- log_info (gc, marking)(" Concurrent Mark (%.3fs)" ,
186
- TimeHelper::counter_to_seconds (mark_start));
187
- for (uint iter = 1 ; !_cm->has_aborted (); ++iter) {
188
- // Concurrent marking.
189
- {
190
- ConcurrentGCBreakpoints::at (" AFTER MARKING STARTED" );
191
- G1ConcPhaseTimer p (_cm, " Concurrent Mark From Roots" );
192
- _cm->mark_from_roots ();
193
- }
194
- if (_cm->has_aborted ()) {
195
- break ;
196
- }
197
-
198
- if (G1UseReferencePrecleaning) {
199
- G1ConcPhaseTimer p (_cm, " Concurrent Preclean" );
200
- _cm->preclean ();
201
- }
202
- if (_cm->has_aborted ()) {
203
- break ;
204
- }
205
-
206
- // Delay remark pause for MMU.
207
- double mark_end_time = os::elapsedVTime ();
208
- jlong mark_end = os::elapsed_counter ();
209
- _vtime_mark_accum += (mark_end_time - cycle_start);
210
- delay_to_keep_mmu (policy, true /* remark */ );
211
- if (_cm->has_aborted ()) {
212
- break ;
213
- }
214
-
215
- // Pause Remark.
216
- ConcurrentGCBreakpoints::at (" BEFORE MARKING COMPLETED" );
217
- log_info (gc, marking)(" Concurrent Mark (%.3fs, %.3fs) %.3fms" ,
218
- TimeHelper::counter_to_seconds (mark_start),
219
- TimeHelper::counter_to_seconds (mark_end),
220
- TimeHelper::counter_to_millis (mark_end - mark_start));
221
- CMRemark cl (_cm);
222
- VM_G1Concurrent op (&cl, " Pause Remark" );
223
- VMThread::execute (&op);
224
- if (_cm->has_aborted ()) {
225
- break ;
226
- } else if (!_cm->restart_for_overflow ()) {
227
- break ; // Exit loop if no restart requested.
228
- } else {
229
- // Loop to restart for overflow.
230
- log_info (gc, marking)(" Concurrent Mark Restart for Mark Stack Overflow (iteration #%u)" ,
231
- iter);
232
- }
233
- }
234
-
235
- if (!_cm->has_aborted ()) {
236
- G1ConcPhaseTimer p (_cm, " Concurrent Rebuild Remembered Sets" );
237
- _cm->rebuild_rem_set_concurrently ();
238
- }
239
-
240
- double end_time = os::elapsedVTime ();
241
- // Update the total virtual time before doing this, since it will try
242
- // to measure it to get the vtime for this marking.
243
- _vtime_accum = (end_time - _vtime_start);
244
-
245
- if (!_cm->has_aborted ()) {
246
- delay_to_keep_mmu (policy, false /* cleanup */ );
247
- }
248
-
249
- if (!_cm->has_aborted ()) {
250
- CMCleanup cl_cl (_cm);
251
- VM_G1Concurrent op (&cl_cl, " Pause Cleanup" );
252
- VMThread::execute (&op);
253
- }
254
141
255
- // We now want to allow clearing of the marking bitmap to be
256
- // suspended by a collection pause.
257
- // We may have aborted just before the remark. Do not bother clearing the
258
- // bitmap then, as it has been done during mark abort.
259
- if (!_cm->has_aborted ()) {
260
- G1ConcPhaseTimer p (_cm, " Concurrent Cleanup for Next Mark" );
261
- _cm->cleanup_for_next_mark ();
262
- }
263
- }
142
+ concurrent_cycle_start ();
143
+ full_concurrent_cycle_do ();
144
+ concurrent_cycle_end ();
264
145
265
- // Update the number of full collections that have been
266
- // completed. This will also notify the G1OldGCCount_lock in case a
267
- // Java thread is waiting for a full GC to happen (e.g., it
268
- // called System.gc() with +ExplicitGCInvokesConcurrent).
269
- {
270
- SuspendibleThreadSetJoiner sts_join;
271
- g1h->increment_old_marking_cycles_completed (true /* concurrent */ ,
272
- !_cm->has_aborted () /* liveness_completed */ );
273
-
274
- _cm->concurrent_cycle_end ();
275
- ConcurrentGCBreakpoints::notify_active_to_idle ();
276
- }
146
+ _vtime_accum = (os::elapsedVTime () - _vtime_start);
277
147
}
278
148
_cm->root_regions ()->cancel_scan ();
279
149
}
@@ -283,10 +153,7 @@ void G1ConcurrentMarkThread::stop_service() {
283
153
CGC_lock->notify_all ();
284
154
}
285
155
286
-
287
- void G1ConcurrentMarkThread::sleep_before_next_cycle () {
288
- // We join here because we don't want to do the "shouldConcurrentMark()"
289
- // below while the world is otherwise stopped.
156
+ bool G1ConcurrentMarkThread::wait_for_next_cycle () {
290
157
assert (!in_progress (), " should have been cleared" );
291
158
292
159
MonitorLocker ml (CGC_lock, Mutex::_no_safepoint_check_flag);
@@ -297,4 +164,160 @@ void G1ConcurrentMarkThread::sleep_before_next_cycle() {
297
164
if (started ()) {
298
165
set_in_progress ();
299
166
}
167
+
168
+ return !should_terminate ();
169
+ }
170
+
171
+ void G1ConcurrentMarkThread::phase_clear_cld_claimed_marks () {
172
+ G1ConcPhaseTimer p (_cm, " Concurrent Clear Claimed Marks" );
173
+ ClassLoaderDataGraph::clear_claimed_marks ();
174
+ }
175
+
176
+ bool G1ConcurrentMarkThread::phase_scan_root_regions () {
177
+ G1ConcPhaseTimer p (_cm, " Concurrent Scan Root Regions" );
178
+ _cm->scan_root_regions ();
179
+ return _cm->has_aborted ();
180
+ }
181
+
182
+ bool G1ConcurrentMarkThread::phase_mark_loop () {
183
+ Ticks mark_start = Ticks::now ();
184
+ log_info (gc, marking)(" Concurrent Mark" );
185
+
186
+ for (uint iter = 1 ; true ; ++iter) {
187
+ // Subphase 1: Mark From Roots.
188
+ if (subphase_mark_from_roots ()) return true ;
189
+
190
+ // Subphase 2: Preclean (optional)
191
+ if (G1UseReferencePrecleaning) {
192
+ if (subphase_preclean ()) return true ;
193
+ }
194
+
195
+ // Subphase 3: Wait for Remark.
196
+ if (subphase_delay_to_keep_mmu_before_remark ()) return true ;
197
+
198
+ // Subphase 4: Remark pause
199
+ if (subphase_remark ()) return true ;
200
+
201
+ // Check if we need to restart the marking loop.
202
+ if (!mark_loop_needs_restart ()) break ;
203
+
204
+ log_info (gc, marking)(" Concurrent Mark Restart for Mark Stack Overflow (iteration #%u)" ,
205
+ iter);
206
+ }
207
+
208
+ log_info (gc, marking)(" Concurrent Mark %.3fms" ,
209
+ (Ticks::now () - mark_start).seconds () * 1000.0 );
210
+
211
+ return false ;
212
+ }
213
+
214
+ bool G1ConcurrentMarkThread::mark_loop_needs_restart () const {
215
+ return _cm->has_overflown ();
216
+ }
217
+
218
+ bool G1ConcurrentMarkThread::subphase_mark_from_roots () {
219
+ ConcurrentGCBreakpoints::at (" AFTER MARKING STARTED" );
220
+ G1ConcPhaseTimer p (_cm, " Concurrent Mark From Roots" );
221
+ _cm->mark_from_roots ();
222
+ return _cm->has_aborted ();
223
+ }
224
+
225
+ bool G1ConcurrentMarkThread::subphase_preclean () {
226
+ G1ConcPhaseTimer p (_cm, " Concurrent Preclean" );
227
+ _cm->preclean ();
228
+ return _cm->has_aborted ();
229
+ }
230
+
231
+ bool G1ConcurrentMarkThread::subphase_delay_to_keep_mmu_before_remark () {
232
+ delay_to_keep_mmu (true /* remark */ );
233
+ return _cm->has_aborted ();
234
+ }
235
+
236
+ bool G1ConcurrentMarkThread::subphase_remark () {
237
+ ConcurrentGCBreakpoints::at (" BEFORE MARKING COMPLETED" );
238
+ CMRemark cl (_cm);
239
+ VM_G1Concurrent op (&cl, " Pause Remark" );
240
+ VMThread::execute (&op);
241
+ return _cm->has_aborted ();
242
+ }
243
+
244
+ bool G1ConcurrentMarkThread::phase_rebuild_remembered_sets () {
245
+ G1ConcPhaseTimer p (_cm, " Concurrent Rebuild Remembered Sets" );
246
+ _cm->rebuild_rem_set_concurrently ();
247
+ return _cm->has_aborted ();
248
+ }
249
+
250
+ bool G1ConcurrentMarkThread::phase_delay_to_keep_mmu_before_cleanup () {
251
+ delay_to_keep_mmu (false /* cleanup */ );
252
+ return _cm->has_aborted ();
253
+ }
254
+
255
+ bool G1ConcurrentMarkThread::phase_cleanup () {
256
+ CMCleanup cl (_cm);
257
+ VM_G1Concurrent op (&cl, " Pause Cleanup" );
258
+ VMThread::execute (&op);
259
+ return _cm->has_aborted ();
260
+ }
261
+
262
+ bool G1ConcurrentMarkThread::phase_clear_bitmap_for_next_mark () {
263
+ G1ConcPhaseTimer p (_cm, " Concurrent Cleanup for Next Mark" );
264
+ _cm->cleanup_for_next_mark ();
265
+ return _cm->has_aborted ();
266
+ }
267
+
268
+ void G1ConcurrentMarkThread::concurrent_cycle_start () {
269
+ _cm->concurrent_cycle_start ();
270
+ }
271
+
272
+ void G1ConcurrentMarkThread::full_concurrent_cycle_do () {
273
+ HandleMark hm (Thread::current ());
274
+ ResourceMark rm;
275
+
276
+ // Phase 1: Clear CLD claimed marks.
277
+ phase_clear_cld_claimed_marks ();
278
+
279
+ // We have to ensure that we finish scanning the root regions
280
+ // before the next GC takes place. To ensure this we have to
281
+ // make sure that we do not join the STS until the root regions
282
+ // have been scanned. If we did then it's possible that a
283
+ // subsequent GC could block us from joining the STS and proceed
284
+ // without the root regions have been scanned which would be a
285
+ // correctness issue.
286
+ //
287
+ // So do not return before the scan root regions phase as a GC waits for a
288
+ // notification from it.
289
+ //
290
+ // For the same reason ConcurrentGCBreakpoints (in the phase methods) before
291
+ // here risk deadlock, because a young GC must wait for root region scanning.
292
+
293
+ // Phase 2: Scan root regions.
294
+ if (phase_scan_root_regions ()) return ;
295
+
296
+ // Phase 3: Actual mark loop.
297
+ if (phase_mark_loop ()) return ;
298
+
299
+ // Phase 4: Rebuild remembered sets.
300
+ if (phase_rebuild_remembered_sets ()) return ;
301
+
302
+ // Phase 5: Wait for Cleanup.
303
+ if (phase_delay_to_keep_mmu_before_cleanup ()) return ;
304
+
305
+ // Phase 6: Cleanup pause
306
+ if (phase_cleanup ()) return ;
307
+
308
+ // Phase 7: Clear bitmap for next mark.
309
+ phase_clear_bitmap_for_next_mark ();
310
+ }
311
+
312
+ void G1ConcurrentMarkThread::concurrent_cycle_end () {
313
+ // Update the number of full collections that have been
314
+ // completed. This will also notify the G1OldGCCount_lock in case a
315
+ // Java thread is waiting for a full GC to happen (e.g., it
316
+ // called System.gc() with +ExplicitGCInvokesConcurrent).
317
+ SuspendibleThreadSetJoiner sts_join;
318
+ G1CollectedHeap::heap ()->increment_old_marking_cycles_completed (true /* concurrent */ ,
319
+ !_cm->has_aborted ());
320
+
321
+ _cm->concurrent_cycle_end ();
322
+ ConcurrentGCBreakpoints::notify_active_to_idle ();
300
323
}
0 commit comments