/
shenandoahControlThread.cpp
647 lines (534 loc) · 23.5 KB
/
shenandoahControlThread.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
/*
* Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahControlThread.hpp"
#include "gc/shenandoah/shenandoahFreeSet.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "gc/shenandoah/shenandoahVMOperations.hpp"
#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
#include "memory/iterator.hpp"
#include "memory/universe.hpp"
#include "runtime/atomic.hpp"
ShenandoahControlThread::ShenandoahControlThread() :
ConcurrentGCThread(),
_alloc_failure_waiters_lock(Mutex::leaf, "ShenandoahAllocFailureGC_lock", true, Monitor::_safepoint_check_always),
_gc_waiters_lock(Mutex::leaf, "ShenandoahRequestedGC_lock", true, Monitor::_safepoint_check_always),
_periodic_task(this),
_requested_gc_cause(GCCause::_no_cause_specified),
_degen_point(ShenandoahHeap::_degenerated_outside_cycle),
_allocs_seen(0) {
reset_gc_id();
create_and_start(ShenandoahCriticalControlThreadPriority ? CriticalPriority : NearMaxPriority);
_periodic_task.enroll();
_periodic_satb_flush_task.enroll();
}
ShenandoahControlThread::~ShenandoahControlThread() {
// This is here so that super is called.
}
void ShenandoahPeriodicTask::task() {
_thread->handle_force_counters_update();
_thread->handle_counters_update();
}
void ShenandoahPeriodicSATBFlushTask::task() {
ShenandoahHeap::heap()->force_satb_flush_all_threads();
}
void ShenandoahControlThread::run_service() {
ShenandoahHeap* heap = ShenandoahHeap::heap();
GCMode default_mode = concurrent_normal;
GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc;
int sleep = ShenandoahControlIntervalMin;
double last_shrink_time = os::elapsedTime();
double last_sleep_adjust_time = os::elapsedTime();
// Shrink period avoids constantly polling regions for shrinking.
// Having a period 10x lower than the delay would mean we hit the
// shrinking with lag of less than 1/10-th of true delay.
// ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds.
double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10;
ShenandoahCollectorPolicy* policy = heap->shenandoah_policy();
ShenandoahHeuristics* heuristics = heap->heuristics();
while (!in_graceful_shutdown() && !should_terminate()) {
// Figure out if we have pending requests.
bool alloc_failure_pending = _alloc_failure_gc.is_set();
bool explicit_gc_requested = _gc_requested.is_set() && is_explicit_gc(_requested_gc_cause);
bool implicit_gc_requested = _gc_requested.is_set() && !is_explicit_gc(_requested_gc_cause);
// This control loop iteration have seen this much allocations.
size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0);
// Choose which GC mode to run in. The block below should select a single mode.
GCMode mode = none;
GCCause::Cause cause = GCCause::_last_gc_cause;
ShenandoahHeap::ShenandoahDegenPoint degen_point = ShenandoahHeap::_degenerated_unset;
if (alloc_failure_pending) {
// Allocation failure takes precedence: we have to deal with it first thing
log_info(gc)("Trigger: Handle Allocation Failure");
cause = GCCause::_allocation_failure;
// Consume the degen point, and seed it with default value
degen_point = _degen_point;
_degen_point = ShenandoahHeap::_degenerated_outside_cycle;
if (ShenandoahDegeneratedGC && heuristics->should_degenerate_cycle()) {
heuristics->record_allocation_failure_gc();
policy->record_alloc_failure_to_degenerated(degen_point);
mode = stw_degenerated;
} else {
heuristics->record_allocation_failure_gc();
policy->record_alloc_failure_to_full();
mode = stw_full;
}
} else if (explicit_gc_requested) {
cause = _requested_gc_cause;
log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause));
heuristics->record_requested_gc();
if (ExplicitGCInvokesConcurrent) {
policy->record_explicit_to_concurrent();
mode = default_mode;
// Unload and clean up everything
heap->set_process_references(heuristics->can_process_references());
heap->set_unload_classes(heuristics->can_unload_classes());
} else {
policy->record_explicit_to_full();
mode = stw_full;
}
} else if (implicit_gc_requested) {
cause = _requested_gc_cause;
log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause));
heuristics->record_requested_gc();
if (ShenandoahImplicitGCInvokesConcurrent) {
policy->record_implicit_to_concurrent();
mode = default_mode;
// Unload and clean up everything
heap->set_process_references(heuristics->can_process_references());
heap->set_unload_classes(heuristics->can_unload_classes());
} else {
policy->record_implicit_to_full();
mode = stw_full;
}
} else {
// Potential normal cycle: ask heuristics if it wants to act
if (heuristics->should_start_gc()) {
mode = default_mode;
cause = default_cause;
}
// Ask policy if this cycle wants to process references or unload classes
heap->set_process_references(heuristics->should_process_references());
heap->set_unload_classes(heuristics->should_unload_classes());
}
// Blow all soft references on this cycle, if handling allocation failure,
// or we are requested to do so unconditionally.
if (alloc_failure_pending || ShenandoahAlwaysClearSoftRefs) {
heap->soft_ref_policy()->set_should_clear_all_soft_refs(true);
}
bool gc_requested = (mode != none);
assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
if (gc_requested) {
// GC is starting, bump the internal ID
update_gc_id();
heap->reset_bytes_allocated_since_gc_start();
// Use default constructor to snapshot the Metaspace state before GC.
metaspace::MetaspaceSizesSnapshot meta_sizes;
// If GC was requested, we are sampling the counters even without actual triggers
// from allocation machinery. This captures GC phases more accurately.
set_forced_counters_update(true);
// If GC was requested, we better dump freeset data for performance debugging
{
ShenandoahHeapLocker locker(heap->lock());
heap->free_set()->log_status();
}
switch (mode) {
case concurrent_normal:
service_concurrent_normal_cycle(cause);
break;
case stw_degenerated:
service_stw_degenerated_cycle(cause, degen_point);
break;
case stw_full:
service_stw_full_cycle(cause);
break;
default:
ShouldNotReachHere();
}
// If this was the requested GC cycle, notify waiters about it
if (explicit_gc_requested || implicit_gc_requested) {
notify_gc_waiters();
}
// If this was the allocation failure GC cycle, notify waiters about it
if (alloc_failure_pending) {
notify_alloc_failure_waiters();
}
// Report current free set state at the end of cycle, whether
// it is a normal completion, or the abort.
{
ShenandoahHeapLocker locker(heap->lock());
heap->free_set()->log_status();
// Notify Universe about new heap usage. This has implications for
// global soft refs policy, and we better report it every time heap
// usage goes down.
Universe::update_heap_info_at_gc();
}
// Disable forced counters update, and update counters one more time
// to capture the state at the end of GC session.
handle_force_counters_update();
set_forced_counters_update(false);
// Retract forceful part of soft refs policy
heap->soft_ref_policy()->set_should_clear_all_soft_refs(false);
// Clear metaspace oom flag, if current cycle unloaded classes
if (heap->unload_classes()) {
heuristics->clear_metaspace_oom();
}
// Commit worker statistics to cycle data
heap->phase_timings()->flush_par_workers_to_cycle();
// Print GC stats for current cycle
{
LogTarget(Info, gc, stats) lt;
if (lt.is_enabled()) {
ResourceMark rm;
LogStream ls(lt);
heap->phase_timings()->print_cycle_on(&ls);
}
}
// Commit statistics to globals
heap->phase_timings()->flush_cycle_to_global();
// Print Metaspace change following GC (if logging is enabled).
MetaspaceUtils::print_metaspace_change(meta_sizes);
// GC is over, we are at idle now
if (ShenandoahPacing) {
heap->pacer()->setup_for_idle();
}
} else {
// Allow allocators to know we have seen this much regions
if (ShenandoahPacing && (allocs_seen > 0)) {
heap->pacer()->report_alloc(allocs_seen);
}
}
double current = os::elapsedTime();
if (ShenandoahUncommit && (explicit_gc_requested || (current - last_shrink_time > shrink_period))) {
// Try to uncommit enough stale regions. Explicit GC tries to uncommit everything.
// Regular paths uncommit only occasionally.
double shrink_before = explicit_gc_requested ?
current :
current - (ShenandoahUncommitDelay / 1000.0);
service_uncommit(shrink_before);
heap->phase_timings()->flush_cycle_to_global();
last_shrink_time = current;
}
// Wait before performing the next action. If allocation happened during this wait,
// we exit sooner, to let heuristics re-evaluate new conditions. If we are at idle,
// back off exponentially.
if (_heap_changed.try_unset()) {
sleep = ShenandoahControlIntervalMin;
} else if ((current - last_sleep_adjust_time) * 1000 > ShenandoahControlIntervalAdjustPeriod){
sleep = MIN2<int>(ShenandoahControlIntervalMax, MAX2(1, sleep * 2));
last_sleep_adjust_time = current;
}
os::naked_short_sleep(sleep);
}
// Wait for the actual stop(), can't leave run_service() earlier.
while (!should_terminate()) {
os::naked_short_sleep(ShenandoahControlIntervalMin);
}
}
void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cause) {
// Normal cycle goes via all concurrent phases. If allocation failure (af) happens during
// any of the concurrent phases, it first degrades to Degenerated GC and completes GC there.
// If second allocation failure happens during Degenerated GC cycle (for example, when GC
// tries to evac something and no memory is available), cycle degrades to Full GC.
//
// There are also a shortcut through the normal cycle: immediate garbage shortcut, when
// heuristics says there are no regions to compact, and all the collection comes from immediately
// reclaimable regions.
//
// ................................................................................................
//
// (immediate garbage shortcut) Concurrent GC
// /-------------------------------------------\
// | |
// | |
// | |
// | v
// [START] ----> Conc Mark ----o----> Conc Evac --o--> Conc Update-Refs ---o----> [END]
// | | | ^
// | (af) | (af) | (af) |
// ..................|....................|.................|..............|.......................
// | | | |
// | | | | Degenerated GC
// v v v |
// STW Mark ----------> STW Evac ----> STW Update-Refs ----->o
// | | | ^
// | (af) | (af) | (af) |
// ..................|....................|.................|..............|.......................
// | | | |
// | v | | Full GC
// \------------------->o<----------------/ |
// | |
// v |
// Full GC --------------------------/
//
ShenandoahHeap* heap = ShenandoahHeap::heap();
if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_outside_cycle)) return;
GCIdMark gc_id_mark;
ShenandoahGCSession session(cause);
TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
// Reset for upcoming marking
heap->entry_reset();
// Start initial mark under STW
heap->vmop_entry_init_mark();
// Continue concurrent mark
heap->entry_mark();
if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_mark)) return;
// If not cancelled, can try to concurrently pre-clean
heap->entry_preclean();
// Complete marking under STW, and start evacuation
heap->vmop_entry_final_mark();
// Process weak roots that might still point to regions that would be broken by cleanup
if (heap->is_concurrent_weak_root_in_progress()) {
heap->entry_weak_roots();
}
// Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
// the space. This would be the last action if there is nothing to evacuate.
heap->entry_cleanup_early();
{
ShenandoahHeapLocker locker(heap->lock());
heap->free_set()->log_status();
}
// Perform concurrent class unloading
if (heap->is_concurrent_weak_root_in_progress()) {
heap->entry_class_unloading();
}
// Processing strong roots
// This may be skipped if there is nothing to update/evacuate.
// If so, strong_root_in_progress would be unset.
if (heap->is_concurrent_strong_root_in_progress()) {
heap->entry_strong_roots();
}
// Continue the cycle with evacuation and optional update-refs.
// This may be skipped if there is nothing to evacuate.
// If so, evac_in_progress would be unset by collection set preparation code.
if (heap->is_evacuation_in_progress()) {
// Concurrently evacuate
heap->entry_evac();
if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_evac)) return;
// Perform update-refs phase.
heap->vmop_entry_init_updaterefs();
heap->entry_updaterefs();
if (check_cancellation_or_degen(ShenandoahHeap::_degenerated_updaterefs)) return;
heap->vmop_entry_final_updaterefs();
// Update references freed up collection set, kick the cleanup to reclaim the space.
heap->entry_cleanup_complete();
}
// Cycle is complete
heap->heuristics()->record_success_concurrent();
heap->shenandoah_policy()->record_success_concurrent();
}
bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
if (heap->cancelled_gc()) {
assert (is_alloc_failure_gc() || in_graceful_shutdown(), "Cancel GC either for alloc failure GC, or gracefully exiting");
if (!in_graceful_shutdown()) {
assert (_degen_point == ShenandoahHeap::_degenerated_outside_cycle,
"Should not be set yet: %s", ShenandoahHeap::degen_point_to_string(_degen_point));
_degen_point = point;
}
return true;
}
return false;
}
void ShenandoahControlThread::stop_service() {
// Nothing to do here.
}
void ShenandoahControlThread::service_stw_full_cycle(GCCause::Cause cause) {
GCIdMark gc_id_mark;
ShenandoahGCSession session(cause);
ShenandoahHeap* heap = ShenandoahHeap::heap();
heap->vmop_entry_full(cause);
heap->heuristics()->record_success_full();
heap->shenandoah_policy()->record_success_full();
}
void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahHeap::ShenandoahDegenPoint point) {
assert (point != ShenandoahHeap::_degenerated_unset, "Degenerated point should be set");
GCIdMark gc_id_mark;
ShenandoahGCSession session(cause);
ShenandoahHeap* heap = ShenandoahHeap::heap();
heap->vmop_degenerated(point);
heap->heuristics()->record_success_degenerated();
heap->shenandoah_policy()->record_success_degenerated();
}
void ShenandoahControlThread::service_uncommit(double shrink_before) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
// Determine if there is work to do. This avoids taking heap lock if there is
// no work available, avoids spamming logs with superfluous logging messages,
// and minimises the amount of work while locks are taken.
if (heap->committed() <= heap->min_capacity()) return;
bool has_work = false;
for (size_t i = 0; i < heap->num_regions(); i++) {
ShenandoahHeapRegion *r = heap->get_region(i);
if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
has_work = true;
break;
}
}
if (has_work) {
heap->entry_uncommit(shrink_before);
}
}
bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const {
return GCCause::is_user_requested_gc(cause) ||
GCCause::is_serviceability_requested_gc(cause);
}
void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
assert(GCCause::is_user_requested_gc(cause) ||
GCCause::is_serviceability_requested_gc(cause) ||
cause == GCCause::_metadata_GC_clear_soft_refs ||
cause == GCCause::_full_gc_alot ||
cause == GCCause::_wb_full_gc ||
cause == GCCause::_scavenge_alot,
"only requested GCs here");
if (is_explicit_gc(cause)) {
if (!DisableExplicitGC) {
handle_requested_gc(cause);
}
} else {
handle_requested_gc(cause);
}
}
void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
// Make sure we have at least one complete GC cycle before unblocking
// from the explicit GC request.
//
// This is especially important for weak references cleanup and/or native
// resources (e.g. DirectByteBuffers) machinery: when explicit GC request
// comes very late in the already running cycle, it would miss lots of new
// opportunities for cleanup that were made available before the caller
// requested the GC.
size_t required_gc_id = get_gc_id() + 1;
MonitorLocker ml(&_gc_waiters_lock);
while (get_gc_id() < required_gc_id) {
_gc_requested.set();
_requested_gc_cause = cause;
ml.wait();
}
}
void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
assert(current()->is_Java_thread(), "expect Java thread here");
if (try_set_alloc_failure_gc()) {
// Only report the first allocation failure
log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
req.type_string(),
byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
// Now that alloc failure GC is scheduled, we can abort everything else
heap->cancel_gc(GCCause::_allocation_failure);
}
MonitorLocker ml(&_alloc_failure_waiters_lock);
while (is_alloc_failure_gc()) {
ml.wait();
}
}
void ShenandoahControlThread::handle_alloc_failure_evac(size_t words) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
if (try_set_alloc_failure_gc()) {
// Only report the first allocation failure
log_info(gc)("Failed to allocate " SIZE_FORMAT "%s for evacuation",
byte_size_in_proper_unit(words * HeapWordSize), proper_unit_for_byte_size(words * HeapWordSize));
}
// Forcefully report allocation failure
heap->cancel_gc(GCCause::_shenandoah_allocation_failure_evac);
}
void ShenandoahControlThread::notify_alloc_failure_waiters() {
_alloc_failure_gc.unset();
MonitorLocker ml(&_alloc_failure_waiters_lock);
ml.notify_all();
}
bool ShenandoahControlThread::try_set_alloc_failure_gc() {
return _alloc_failure_gc.try_set();
}
bool ShenandoahControlThread::is_alloc_failure_gc() {
return _alloc_failure_gc.is_set();
}
void ShenandoahControlThread::notify_gc_waiters() {
_gc_requested.unset();
MonitorLocker ml(&_gc_waiters_lock);
ml.notify_all();
}
void ShenandoahControlThread::handle_counters_update() {
if (_do_counters_update.is_set()) {
_do_counters_update.unset();
ShenandoahHeap::heap()->monitoring_support()->update_counters();
}
}
void ShenandoahControlThread::handle_force_counters_update() {
if (_force_counters_update.is_set()) {
_do_counters_update.unset(); // reset these too, we do update now!
ShenandoahHeap::heap()->monitoring_support()->update_counters();
}
}
void ShenandoahControlThread::notify_heap_changed() {
// This is called from allocation path, and thus should be fast.
// Update monitoring counters when we took a new region. This amortizes the
// update costs on slow path.
if (_do_counters_update.is_unset()) {
_do_counters_update.set();
}
// Notify that something had changed.
if (_heap_changed.is_unset()) {
_heap_changed.set();
}
}
void ShenandoahControlThread::pacing_notify_alloc(size_t words) {
assert(ShenandoahPacing, "should only call when pacing is enabled");
Atomic::add(&_allocs_seen, words);
}
void ShenandoahControlThread::set_forced_counters_update(bool value) {
_force_counters_update.set_cond(value);
}
void ShenandoahControlThread::reset_gc_id() {
Atomic::store(&_gc_id, (size_t)0);
}
void ShenandoahControlThread::update_gc_id() {
Atomic::inc(&_gc_id);
}
size_t ShenandoahControlThread::get_gc_id() {
return Atomic::load(&_gc_id);
}
void ShenandoahControlThread::print() const {
print_on(tty);
}
void ShenandoahControlThread::print_on(outputStream* st) const {
st->print("Shenandoah Concurrent Thread");
Thread::print_on(st);
st->cr();
}
void ShenandoahControlThread::start() {
create_and_start();
}
void ShenandoahControlThread::prepare_for_graceful_shutdown() {
_graceful_shutdown.set();
}
bool ShenandoahControlThread::in_graceful_shutdown() {
return _graceful_shutdown.is_set();
}