/
g1CollectedHeap.cpp
4332 lines (3635 loc) · 162 KB
/
g1CollectedHeap.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/metadataOnStackMark.hpp"
#include "classfile/stringTable.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "compiler/oopMap.hpp"
#include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSet.hpp"
#include "gc/g1/g1CollectionSetCandidates.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentRefine.hpp"
#include "gc/g1/g1ConcurrentRefineThread.hpp"
#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
#include "gc/g1/g1DirtyCardQueue.hpp"
#include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/g1/g1FullCollector.hpp"
#include "gc/g1/g1GCParPhaseTimesTracker.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1GCPauseType.hpp"
#include "gc/g1/g1HeapSizingPolicy.hpp"
#include "gc/g1/g1HeapTransition.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/g1HotCardCache.hpp"
#include "gc/g1/g1InitLogger.hpp"
#include "gc/g1/g1MemoryPool.hpp"
#include "gc/g1/g1OopClosures.inline.hpp"
#include "gc/g1/g1ParallelCleaning.hpp"
#include "gc/g1/g1ParScanThreadState.inline.hpp"
#include "gc/g1/g1PeriodicGCTask.hpp"
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1RedirtyCardsQueue.hpp"
#include "gc/g1/g1RegionToSpaceMapper.hpp"
#include "gc/g1/g1RemSet.hpp"
#include "gc/g1/g1RootClosures.hpp"
#include "gc/g1/g1RootProcessor.hpp"
#include "gc/g1/g1SATBMarkQueueSet.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/g1Trace.hpp"
#include "gc/g1/g1ServiceThread.hpp"
#include "gc/g1/g1UncommitRegionTask.hpp"
#include "gc/g1/g1VMOperations.hpp"
#include "gc/g1/g1YoungGCPostEvacuateTasks.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionRemSet.inline.hpp"
#include "gc/g1/heapRegionSet.inline.hpp"
#include "gc/shared/concurrentGCBreakpoints.hpp"
#include "gc/shared/gcBehaviours.hpp"
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/generationSpec.hpp"
#include "gc/shared/isGCActiveMark.hpp"
#include "gc/shared/locationPrinter.inline.hpp"
#include "gc/shared/oopStorageParState.hpp"
#include "gc/shared/preservedMarks.inline.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/shared/referenceProcessor.inline.hpp"
#include "gc/shared/taskTerminator.hpp"
#include "gc/shared/taskqueue.inline.hpp"
#include "gc/shared/tlab_globals.hpp"
#include "gc/shared/weakProcessor.inline.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "logging/log.hpp"
#include "memory/allocation.hpp"
#include "memory/iterator.hpp"
#include "memory/heapInspection.hpp"
#include "memory/metaspaceUtils.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/java.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/threadSMR.hpp"
#include "runtime/vmThread.hpp"
#include "utilities/align.hpp"
#include "utilities/autoRestore.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/stack.inline.hpp"
size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
// INVARIANTS/NOTES
//
// All allocation activity covered by the G1CollectedHeap interface is
// serialized by acquiring the HeapLock. This happens in mem_allocate
// and allocate_new_tlab, which are the "entry" points to the
// allocation code from the rest of the JVM. (Note that this does not
// apply to TLAB allocation, which is not part of this interface: it
// is done by clients of this interface.)
void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
}
void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
// The from card cache is not the memory that is actually committed. So we cannot
// take advantage of the zero_filled parameter.
reset_from_card_cache(start_idx, num_regions);
}
Tickspan G1CollectedHeap::run_task_timed(AbstractGangTask* task) {
Ticks start = Ticks::now();
workers()->run_task(task);
return Ticks::now() - start;
}
void G1CollectedHeap::run_batch_task(G1BatchedGangTask* cl) {
uint num_workers = MAX2(1u, MIN2(cl->num_workers_estimate(), workers()->active_workers()));
cl->set_max_workers(num_workers);
workers()->run_task(cl, num_workers);
}
HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
MemRegion mr) {
return new HeapRegion(hrs_index, bot(), mr, &_card_set_config);
}
// Private methods.
HeapRegion* G1CollectedHeap::new_region(size_t word_size,
HeapRegionType type,
bool do_expand,
uint node_index) {
assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
"the only time we use this to allocate a humongous region is "
"when we are allocating a single humongous region");
HeapRegion* res = _hrm.allocate_free_region(type, node_index);
if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
// Currently, only attempts to allocate GC alloc regions set
// do_expand to true. So, we should only reach here during a
// safepoint. If this assumption changes we might have to
// reconsider the use of _expand_heap_after_alloc_failure.
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",
word_size * HeapWordSize);
assert(word_size * HeapWordSize < HeapRegion::GrainBytes,
"This kind of expansion should never be more than one region. Size: " SIZE_FORMAT,
word_size * HeapWordSize);
if (expand_single_region(node_index)) {
// Given that expand_single_region() succeeded in expanding the heap, and we
// always expand the heap by an amount aligned to the heap
// region size, the free list should in theory not be empty.
// In either case allocate_free_region() will check for NULL.
res = _hrm.allocate_free_region(type, node_index);
} else {
_expand_heap_after_alloc_failure = false;
}
}
return res;
}
HeapWord*
G1CollectedHeap::humongous_obj_allocate_initialize_regions(HeapRegion* first_hr,
uint num_regions,
size_t word_size) {
assert(first_hr != NULL, "pre-condition");
assert(is_humongous(word_size), "word_size should be humongous");
assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
// Index of last region in the series.
uint first = first_hr->hrm_index();
uint last = first + num_regions - 1;
// We need to initialize the region(s) we just discovered. This is
// a bit tricky given that it can happen concurrently with
// refinement threads refining cards on these regions and
// potentially wanting to refine the BOT as they are scanning
// those cards (this can happen shortly after a cleanup; see CR
// 6991377). So we have to set up the region(s) carefully and in
// a specific order.
// The word size sum of all the regions we will allocate.
size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
assert(word_size <= word_size_sum, "sanity");
// The passed in hr will be the "starts humongous" region. The header
// of the new object will be placed at the bottom of this region.
HeapWord* new_obj = first_hr->bottom();
// This will be the new top of the new object.
HeapWord* obj_top = new_obj + word_size;
// First, we need to zero the header of the space that we will be
// allocating. When we update top further down, some refinement
// threads might try to scan the region. By zeroing the header we
// ensure that any thread that will try to scan the region will
// come across the zero klass word and bail out.
//
// NOTE: It would not have been correct to have used
// CollectedHeap::fill_with_object() and make the space look like
// an int array. The thread that is doing the allocation will
// later update the object header to a potentially different array
// type and, for a very short period of time, the klass and length
// fields will be inconsistent. This could cause a refinement
// thread to calculate the object size incorrectly.
Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
// Next, pad out the unused tail of the last region with filler
// objects, for improved usage accounting.
// How many words we use for filler objects.
size_t word_fill_size = word_size_sum - word_size;
// How many words memory we "waste" which cannot hold a filler object.
size_t words_not_fillable = 0;
if (word_fill_size >= min_fill_size()) {
fill_with_objects(obj_top, word_fill_size);
} else if (word_fill_size > 0) {
// We have space to fill, but we cannot fit an object there.
words_not_fillable = word_fill_size;
word_fill_size = 0;
}
// We will set up the first region as "starts humongous". This
// will also update the BOT covering all the regions to reflect
// that there is a single object that starts at the bottom of the
// first region.
first_hr->set_starts_humongous(obj_top, word_fill_size);
_policy->remset_tracker()->update_at_allocate(first_hr);
// Then, if there are any, we will set up the "continues
// humongous" regions.
HeapRegion* hr = NULL;
for (uint i = first + 1; i <= last; ++i) {
hr = region_at(i);
hr->set_continues_humongous(first_hr);
_policy->remset_tracker()->update_at_allocate(hr);
}
// Up to this point no concurrent thread would have been able to
// do any scanning on any region in this series. All the top
// fields still point to bottom, so the intersection between
// [bottom,top] and [card_start,card_end] will be empty. Before we
// update the top fields, we'll do a storestore to make sure that
// no thread sees the update to top before the zeroing of the
// object header and the BOT initialization.
OrderAccess::storestore();
// Now, we will update the top fields of the "continues humongous"
// regions except the last one.
for (uint i = first; i < last; ++i) {
hr = region_at(i);
hr->set_top(hr->end());
}
hr = region_at(last);
// If we cannot fit a filler object, we must set top to the end
// of the humongous object, otherwise we cannot iterate the heap
// and the BOT will not be complete.
hr->set_top(hr->end() - words_not_fillable);
assert(hr->bottom() < obj_top && obj_top <= hr->end(),
"obj_top should be in last region");
_verifier->check_bitmaps("Humongous Region Allocation", first_hr);
assert(words_not_fillable == 0 ||
first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(),
"Miscalculation in humongous allocation");
increase_used((word_size_sum - words_not_fillable) * HeapWordSize);
for (uint i = first; i <= last; ++i) {
hr = region_at(i);
_humongous_set.add(hr);
_hr_printer.alloc(hr);
}
return new_obj;
}
size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
return align_up(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
}
// If could fit into free regions w/o expansion, try.
// Otherwise, if can expand, do so.
// Otherwise, if using ex regions might help, try with ex given back.
HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
_verifier->verify_region_sets_optional();
uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
// Policy: First try to allocate a humongous object in the free list.
HeapRegion* humongous_start = _hrm.allocate_humongous(obj_regions);
if (humongous_start == NULL) {
// Policy: We could not find enough regions for the humongous object in the
// free list. Look through the heap to find a mix of free and uncommitted regions.
// If so, expand the heap and allocate the humongous object.
humongous_start = _hrm.expand_and_allocate_humongous(obj_regions);
if (humongous_start != NULL) {
// We managed to find a region by expanding the heap.
log_debug(gc, ergo, heap)("Heap expansion (humongous allocation request). Allocation request: " SIZE_FORMAT "B",
word_size * HeapWordSize);
policy()->record_new_heap_size(num_regions());
} else {
// Policy: Potentially trigger a defragmentation GC.
}
}
HeapWord* result = NULL;
if (humongous_start != NULL) {
result = humongous_obj_allocate_initialize_regions(humongous_start, obj_regions, word_size);
assert(result != NULL, "it should always return a valid result");
// A successful humongous object allocation changes the used space
// information of the old generation so we need to recalculate the
// sizes and update the jstat counters here.
g1mm()->update_sizes();
}
_verifier->verify_region_sets_optional();
return result;
}
HeapWord* G1CollectedHeap::allocate_new_tlab(size_t min_size,
size_t requested_size,
size_t* actual_size) {
assert_heap_not_locked_and_not_at_safepoint();
assert(!is_humongous(requested_size), "we do not allow humongous TLABs");
return attempt_allocation(min_size, requested_size, actual_size);
}
HeapWord*
G1CollectedHeap::mem_allocate(size_t word_size,
bool* gc_overhead_limit_was_exceeded) {
assert_heap_not_locked_and_not_at_safepoint();
if (is_humongous(word_size)) {
return attempt_allocation_humongous(word_size);
}
size_t dummy = 0;
return attempt_allocation(word_size, word_size, &dummy);
}
HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
ResourceMark rm; // For retrieving the thread names in log messages.
// Make sure you read the note in attempt_allocation_humongous().
assert_heap_not_locked_and_not_at_safepoint();
assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
"be called for humongous allocation requests");
// We should only get here after the first-level allocation attempt
// (attempt_allocation()) failed to allocate.
// We will loop until a) we manage to successfully perform the
// allocation or b) we successfully schedule a collection which
// fails to perform the allocation. b) is the only case when we'll
// return NULL.
HeapWord* result = NULL;
for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
bool should_try_gc;
bool preventive_collection_required = false;
uint gc_count_before;
{
MutexLocker x(Heap_lock);
// Now that we have the lock, we first retry the allocation in case another
// thread changed the region while we were waiting to acquire the lock.
size_t actual_size;
result = _allocator->attempt_allocation(word_size, word_size, &actual_size);
if (result != NULL) {
return result;
}
preventive_collection_required = policy()->preventive_collection_required(1);
if (!preventive_collection_required) {
// We've already attempted a lock-free allocation above, so we don't want to
// do it again. Let's jump straight to replacing the active region.
result = _allocator->attempt_allocation_using_new_region(word_size);
if (result != NULL) {
return result;
}
// If the GCLocker is active and we are bound for a GC, try expanding young gen.
// This is different to when only GCLocker::needs_gc() is set: try to avoid
// waiting because the GCLocker is active to not wait too long.
if (GCLocker::is_active_and_needs_gc() && policy()->can_expand_young_list()) {
// No need for an ergo message here, can_expand_young_list() does this when
// it returns true.
result = _allocator->attempt_allocation_force(word_size);
if (result != NULL) {
return result;
}
}
}
// Only try a GC if the GCLocker does not signal the need for a GC. Wait until
// the GCLocker initiated GC has been performed and then retry. This includes
// the case when the GC Locker is not active but has not been performed.
should_try_gc = !GCLocker::needs_gc();
// Read the GC count while still holding the Heap_lock.
gc_count_before = total_collections();
}
if (should_try_gc) {
GCCause::Cause gc_cause = preventive_collection_required ? GCCause::_g1_preventive_collection
: GCCause::_g1_inc_collection_pause;
bool succeeded;
result = do_collection_pause(word_size, gc_count_before, &succeeded, gc_cause);
if (result != NULL) {
assert(succeeded, "only way to get back a non-NULL result");
log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
Thread::current()->name(), p2i(result));
return result;
}
if (succeeded) {
// We successfully scheduled a collection which failed to allocate. No
// point in trying to allocate further. We'll just return NULL.
log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
SIZE_FORMAT " words", Thread::current()->name(), word_size);
return NULL;
}
log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT " words",
Thread::current()->name(), word_size);
} else {
// Failed to schedule a collection.
if (gclocker_retry_count > GCLockerRetryAllocationCount) {
log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
SIZE_FORMAT " words", Thread::current()->name(), word_size);
return NULL;
}
log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
// The GCLocker is either active or the GCLocker initiated
// GC has not yet been performed. Stall until it is and
// then retry the allocation.
GCLocker::stall_until_clear();
gclocker_retry_count += 1;
}
// We can reach here if we were unsuccessful in scheduling a
// collection (because another thread beat us to it) or if we were
// stalled due to the GC locker. In either can we should retry the
// allocation attempt in case another thread successfully
// performed a collection and reclaimed enough space. We do the
// first attempt (without holding the Heap_lock) here and the
// follow-on attempt will be at the start of the next loop
// iteration (after taking the Heap_lock).
size_t dummy = 0;
result = _allocator->attempt_allocation(word_size, word_size, &dummy);
if (result != NULL) {
return result;
}
// Give a warning if we seem to be looping forever.
if ((QueuedAllocationWarningCount > 0) &&
(try_count % QueuedAllocationWarningCount == 0)) {
log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
Thread::current()->name(), try_count, word_size);
}
}
ShouldNotReachHere();
return NULL;
}
void G1CollectedHeap::begin_archive_alloc_range(bool open) {
assert_at_safepoint_on_vm_thread();
if (_archive_allocator == NULL) {
_archive_allocator = G1ArchiveAllocator::create_allocator(this, open);
}
}
bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
// Allocations in archive regions cannot be of a size that would be considered
// humongous even for a minimum-sized region, because G1 region sizes/boundaries
// may be different at archive-restore time.
return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
}
HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
assert_at_safepoint_on_vm_thread();
assert(_archive_allocator != NULL, "_archive_allocator not initialized");
if (is_archive_alloc_too_large(word_size)) {
return NULL;
}
return _archive_allocator->archive_mem_allocate(word_size);
}
void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
size_t end_alignment_in_bytes) {
assert_at_safepoint_on_vm_thread();
assert(_archive_allocator != NULL, "_archive_allocator not initialized");
// Call complete_archive to do the real work, filling in the MemRegion
// array with the archive regions.
_archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
delete _archive_allocator;
_archive_allocator = NULL;
}
bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MemRegion reserved = _hrm.reserved();
for (size_t i = 0; i < count; i++) {
if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
return false;
}
}
return true;
}
bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
size_t count,
bool open) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MutexLocker x(Heap_lock);
MemRegion reserved = _hrm.reserved();
HeapWord* prev_last_addr = NULL;
HeapRegion* prev_last_region = NULL;
// Temporarily disable pretouching of heap pages. This interface is used
// when mmap'ing archived heap data in, so pre-touching is wasted.
FlagSetting fs(AlwaysPreTouch, false);
// For each specified MemRegion range, allocate the corresponding G1
// regions and mark them as archive regions. We expect the ranges
// in ascending starting address order, without overlap.
for (size_t i = 0; i < count; i++) {
MemRegion curr_range = ranges[i];
HeapWord* start_address = curr_range.start();
size_t word_size = curr_range.word_size();
HeapWord* last_address = curr_range.last();
size_t commits = 0;
guarantee(reserved.contains(start_address) && reserved.contains(last_address),
"MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
p2i(start_address), p2i(last_address));
guarantee(start_address > prev_last_addr,
"Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
p2i(start_address), p2i(prev_last_addr));
prev_last_addr = last_address;
// Check for ranges that start in the same G1 region in which the previous
// range ended, and adjust the start address so we don't try to allocate
// the same region again. If the current range is entirely within that
// region, skip it, just adjusting the recorded top.
HeapRegion* start_region = _hrm.addr_to_region(start_address);
if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
start_address = start_region->end();
if (start_address > last_address) {
increase_used(word_size * HeapWordSize);
start_region->set_top(last_address + 1);
continue;
}
start_region->set_top(start_address);
curr_range = MemRegion(start_address, last_address + 1);
start_region = _hrm.addr_to_region(start_address);
}
// Perform the actual region allocation, exiting if it fails.
// Then note how much new space we have allocated.
if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) {
return false;
}
increase_used(word_size * HeapWordSize);
if (commits != 0) {
log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
HeapRegion::GrainWords * HeapWordSize * commits);
}
// Mark each G1 region touched by the range as archive, add it to
// the old set, and set top.
HeapRegion* curr_region = _hrm.addr_to_region(start_address);
HeapRegion* last_region = _hrm.addr_to_region(last_address);
prev_last_region = last_region;
while (curr_region != NULL) {
assert(curr_region->is_empty() && !curr_region->is_pinned(),
"Region already in use (index %u)", curr_region->hrm_index());
if (open) {
curr_region->set_open_archive();
} else {
curr_region->set_closed_archive();
}
_hr_printer.alloc(curr_region);
_archive_set.add(curr_region);
HeapWord* top;
HeapRegion* next_region;
if (curr_region != last_region) {
top = curr_region->end();
next_region = _hrm.next_region_in_heap(curr_region);
} else {
top = last_address + 1;
next_region = NULL;
}
curr_region->set_top(top);
curr_region = next_region;
}
}
return true;
}
void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MemRegion reserved = _hrm.reserved();
HeapWord *prev_last_addr = NULL;
HeapRegion* prev_last_region = NULL;
// For each MemRegion, create filler objects, if needed, in the G1 regions
// that contain the address range. The address range actually within the
// MemRegion will not be modified. That is assumed to have been initialized
// elsewhere, probably via an mmap of archived heap data.
MutexLocker x(Heap_lock);
for (size_t i = 0; i < count; i++) {
HeapWord* start_address = ranges[i].start();
HeapWord* last_address = ranges[i].last();
assert(reserved.contains(start_address) && reserved.contains(last_address),
"MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
p2i(start_address), p2i(last_address));
assert(start_address > prev_last_addr,
"Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
p2i(start_address), p2i(prev_last_addr));
HeapRegion* start_region = _hrm.addr_to_region(start_address);
HeapRegion* last_region = _hrm.addr_to_region(last_address);
HeapWord* bottom_address = start_region->bottom();
// Check for a range beginning in the same region in which the
// previous one ended.
if (start_region == prev_last_region) {
bottom_address = prev_last_addr + 1;
}
// Verify that the regions were all marked as archive regions by
// alloc_archive_regions.
HeapRegion* curr_region = start_region;
while (curr_region != NULL) {
guarantee(curr_region->is_archive(),
"Expected archive region at index %u", curr_region->hrm_index());
if (curr_region != last_region) {
curr_region = _hrm.next_region_in_heap(curr_region);
} else {
curr_region = NULL;
}
}
prev_last_addr = last_address;
prev_last_region = last_region;
// Fill the memory below the allocated range with dummy object(s),
// if the region bottom does not match the range start, or if the previous
// range ended within the same G1 region, and there is a gap.
assert(start_address >= bottom_address, "bottom address should not be greater than start address");
if (start_address > bottom_address) {
size_t fill_size = pointer_delta(start_address, bottom_address);
G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
increase_used(fill_size * HeapWordSize);
}
}
}
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size,
size_t desired_word_size,
size_t* actual_word_size) {
assert_heap_not_locked_and_not_at_safepoint();
assert(!is_humongous(desired_word_size), "attempt_allocation() should not "
"be called for humongous allocation requests");
HeapWord* result = _allocator->attempt_allocation(min_word_size, desired_word_size, actual_word_size);
if (result == NULL) {
*actual_word_size = desired_word_size;
result = attempt_allocation_slow(desired_word_size);
}
assert_heap_not_locked();
if (result != NULL) {
assert(*actual_word_size != 0, "Actual size must have been set here");
dirty_young_block(result, *actual_word_size);
} else {
*actual_word_size = 0;
}
return result;
}
void G1CollectedHeap::populate_archive_regions_bot_part(MemRegion* ranges, size_t count) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
HeapWord* st = ranges[0].start();
HeapWord* last = ranges[count-1].last();
HeapRegion* hr_st = _hrm.addr_to_region(st);
HeapRegion* hr_last = _hrm.addr_to_region(last);
HeapRegion* hr_curr = hr_st;
while (hr_curr != NULL) {
hr_curr->update_bot();
if (hr_curr != hr_last) {
hr_curr = _hrm.next_region_in_heap(hr_curr);
} else {
hr_curr = NULL;
}
}
}
void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MemRegion reserved = _hrm.reserved();
HeapWord* prev_last_addr = NULL;
HeapRegion* prev_last_region = NULL;
size_t size_used = 0;
uint shrink_count = 0;
// For each Memregion, free the G1 regions that constitute it, and
// notify mark-sweep that the range is no longer to be considered 'archive.'
MutexLocker x(Heap_lock);
for (size_t i = 0; i < count; i++) {
HeapWord* start_address = ranges[i].start();
HeapWord* last_address = ranges[i].last();
assert(reserved.contains(start_address) && reserved.contains(last_address),
"MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
p2i(start_address), p2i(last_address));
assert(start_address > prev_last_addr,
"Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
p2i(start_address), p2i(prev_last_addr));
size_used += ranges[i].byte_size();
prev_last_addr = last_address;
HeapRegion* start_region = _hrm.addr_to_region(start_address);
HeapRegion* last_region = _hrm.addr_to_region(last_address);
// Check for ranges that start in the same G1 region in which the previous
// range ended, and adjust the start address so we don't try to free
// the same region again. If the current range is entirely within that
// region, skip it.
if (start_region == prev_last_region) {
start_address = start_region->end();
if (start_address > last_address) {
continue;
}
start_region = _hrm.addr_to_region(start_address);
}
prev_last_region = last_region;
// After verifying that each region was marked as an archive region by
// alloc_archive_regions, set it free and empty and uncommit it.
HeapRegion* curr_region = start_region;
while (curr_region != NULL) {
guarantee(curr_region->is_archive(),
"Expected archive region at index %u", curr_region->hrm_index());
uint curr_index = curr_region->hrm_index();
_archive_set.remove(curr_region);
curr_region->set_free();
curr_region->set_top(curr_region->bottom());
if (curr_region != last_region) {
curr_region = _hrm.next_region_in_heap(curr_region);
} else {
curr_region = NULL;
}
_hrm.shrink_at(curr_index, 1);
shrink_count++;
}
}
if (shrink_count != 0) {
log_debug(gc, ergo, heap)("Attempt heap shrinking (archive regions). Total size: " SIZE_FORMAT "B",
HeapRegion::GrainWords * HeapWordSize * shrink_count);
// Explicit uncommit.
uncommit_regions(shrink_count);
}
decrease_used(size_used);
}
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
ResourceMark rm; // For retrieving the thread names in log messages.
// The structure of this method has a lot of similarities to
// attempt_allocation_slow(). The reason these two were not merged
// into a single one is that such a method would require several "if
// allocation is not humongous do this, otherwise do that"
// conditional paths which would obscure its flow. In fact, an early
// version of this code did use a unified method which was harder to
// follow and, as a result, it had subtle bugs that were hard to
// track down. So keeping these two methods separate allows each to
// be more readable. It will be good to keep these two in sync as
// much as possible.
assert_heap_not_locked_and_not_at_safepoint();
assert(is_humongous(word_size), "attempt_allocation_humongous() "
"should only be called for humongous allocations");
// Humongous objects can exhaust the heap quickly, so we should check if we
// need to start a marking cycle at each humongous object allocation. We do
// the check before we do the actual allocation. The reason for doing it
// before the allocation is that we avoid having to keep track of the newly
// allocated memory while we do a GC.
if (policy()->need_to_start_conc_mark("concurrent humongous allocation",
word_size)) {
collect(GCCause::_g1_humongous_allocation);
}
// We will loop until a) we manage to successfully perform the
// allocation or b) we successfully schedule a collection which
// fails to perform the allocation. b) is the only case when we'll
// return NULL.
HeapWord* result = NULL;
for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
bool should_try_gc;
bool preventive_collection_required = false;
uint gc_count_before;
{
MutexLocker x(Heap_lock);
size_t size_in_regions = humongous_obj_size_in_regions(word_size);
preventive_collection_required = policy()->preventive_collection_required((uint)size_in_regions);
if (!preventive_collection_required) {
// Given that humongous objects are not allocated in young
// regions, we'll first try to do the allocation without doing a
// collection hoping that there's enough space in the heap.
result = humongous_obj_allocate(word_size);
if (result != NULL) {
policy()->old_gen_alloc_tracker()->
add_allocated_humongous_bytes_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
return result;
}
}
// Only try a GC if the GCLocker does not signal the need for a GC. Wait until
// the GCLocker initiated GC has been performed and then retry. This includes
// the case when the GC Locker is not active but has not been performed.
should_try_gc = !GCLocker::needs_gc();
// Read the GC count while still holding the Heap_lock.
gc_count_before = total_collections();
}
if (should_try_gc) {
GCCause::Cause gc_cause = preventive_collection_required ? GCCause::_g1_preventive_collection
: GCCause::_g1_humongous_allocation;
bool succeeded;
result = do_collection_pause(word_size, gc_count_before, &succeeded, gc_cause);
if (result != NULL) {
assert(succeeded, "only way to get back a non-NULL result");
log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
Thread::current()->name(), p2i(result));
size_t size_in_regions = humongous_obj_size_in_regions(word_size);
policy()->old_gen_alloc_tracker()->
record_collection_pause_humongous_allocation(size_in_regions * HeapRegion::GrainBytes);
return result;
}
if (succeeded) {
// We successfully scheduled a collection which failed to allocate. No
// point in trying to allocate further. We'll just return NULL.
log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
SIZE_FORMAT " words", Thread::current()->name(), word_size);
return NULL;
}
log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT "",
Thread::current()->name(), word_size);
} else {
// Failed to schedule a collection.
if (gclocker_retry_count > GCLockerRetryAllocationCount) {
log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
SIZE_FORMAT " words", Thread::current()->name(), word_size);
return NULL;
}
log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
// The GCLocker is either active or the GCLocker initiated
// GC has not yet been performed. Stall until it is and
// then retry the allocation.
GCLocker::stall_until_clear();
gclocker_retry_count += 1;
}
// We can reach here if we were unsuccessful in scheduling a
// collection (because another thread beat us to it) or if we were
// stalled due to the GC locker. In either can we should retry the
// allocation attempt in case another thread successfully
// performed a collection and reclaimed enough space.
// Humongous object allocation always needs a lock, so we wait for the retry
// in the next iteration of the loop, unlike for the regular iteration case.
// Give a warning if we seem to be looping forever.
if ((QueuedAllocationWarningCount > 0) &&
(try_count % QueuedAllocationWarningCount == 0)) {
log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
Thread::current()->name(), try_count, word_size);
}
}
ShouldNotReachHere();
return NULL;
}
HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
bool expect_null_mutator_alloc_region) {
assert_at_safepoint_on_vm_thread();
assert(!_allocator->has_mutator_alloc_region() || !expect_null_mutator_alloc_region,
"the current alloc region was unexpectedly found to be non-NULL");
if (!is_humongous(word_size)) {
return _allocator->attempt_allocation_locked(word_size);
} else {
HeapWord* result = humongous_obj_allocate(word_size);
if (result != NULL && policy()->need_to_start_conc_mark("STW humongous allocation")) {
collector_state()->set_initiate_conc_mark_if_possible(true);
}
return result;
}
ShouldNotReachHere();
}
class PostCompactionPrinterClosure: public HeapRegionClosure {
private:
G1HRPrinter* _hr_printer;
public:
bool do_heap_region(HeapRegion* hr) {
assert(!hr->is_young(), "not expecting to find young regions");
_hr_printer->post_compaction(hr);
return false;
}
PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
: _hr_printer(hr_printer) { }
};
void G1CollectedHeap::print_hrm_post_compaction() {
if (_hr_printer.is_active()) {
PostCompactionPrinterClosure cl(hr_printer());
heap_region_iterate(&cl);
}
}
void G1CollectedHeap::abort_concurrent_cycle() {
// If we start the compaction before the CM threads finish
// scanning the root regions we might trip them over as we'll
// be moving objects / updating references. So let's wait until
// they are done. By telling them to abort, they should complete
// early.
_cm->root_regions()->abort();
_cm->root_regions()->wait_until_scan_finished();
// Disable discovery and empty the discovered lists
// for the CM ref processor.