-
Notifications
You must be signed in to change notification settings - Fork 397
/
Copy pathGCExtensionsBase.hpp
1842 lines (1674 loc) · 83.7 KB
/
GCExtensionsBase.hpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*******************************************************************************
* Copyright (c) 1991, 2021 IBM Corp. and others
*
* This program and the accompanying materials are made available under
* the terms of the Eclipse Public License 2.0 which accompanies this
* distribution and is available at https://www.eclipse.org/legal/epl-2.0/
* or the Apache License, Version 2.0 which accompanies this distribution and
* is available at https://www.apache.org/licenses/LICENSE-2.0.
*
* This Source Code may also be made available under the following
* Secondary Licenses when the conditions for such availability set
* forth in the Eclipse Public License, v. 2.0 are satisfied: GNU
* General Public License, version 2 with the GNU Classpath
* Exception [1] and GNU General Public License, version 2 with the
* OpenJDK Assembly Exception [2].
*
* [1] https://www.gnu.org/software/classpath/license.html
* [2] http://openjdk.java.net/legal/assembly-exception.html
*
* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 OR GPL-2.0 WITH Classpath-exception-2.0 OR LicenseRef-GPL-2.0 WITH Assembly-exception
*******************************************************************************/
#if !defined(GCEXTENSIONSBASE_HPP_)
#define GCEXTENSIONSBASE_HPP_
#include "omrcfg.h"
#include "omrcomp.h"
#include "mmomrhook_internal.h"
#include "mmprivatehook_internal.h"
#include "modronbase.h"
#include "omr.h"
#include "AllocationStats.hpp"
#include "ArrayObjectModel.hpp"
#include "BaseVirtual.hpp"
#include "ExcessiveGCStats.hpp"
#include "Forge.hpp"
#include "GlobalGCStats.hpp"
#include "GlobalVLHGCStats.hpp"
#include "LargeObjectAllocateStats.hpp"
#include "MemoryHandle.hpp"
#include "MixedObjectModel.hpp"
#include "NUMAManager.hpp"
#include "OMRVMThreadListIterator.hpp"
#include "ObjectModel.hpp"
#include "ScavengerCopyScanRatio.hpp"
#include "ScavengerStats.hpp"
#include "SublistPool.hpp"
class MM_CardTable;
class MM_ClassLoaderRememberedSet;
class MM_CollectorLanguageInterface;
class MM_CompactGroupPersistentStats;
class MM_CompressedCardTable;
class MM_Configuration;
class MM_EnvironmentBase;
class MM_FrequentObjectsStats;
class MM_GlobalAllocationManager;
class MM_GlobalCollector;
class MM_Heap;
class MM_HeapMap;
class MM_HeapRegionManager;
class MM_InterRegionRememberedSet;
class MM_MemoryManager;
class MM_MemorySubSpace;
class MM_ParallelDispatcher;
#if defined(OMR_GC_OBJECT_MAP)
class MM_ObjectMap;
#endif /* defined(OMR_GC_OBJECT_MAP) */
class MM_ReferenceChainWalkerMarkMap;
class MM_RememberedSetCardBucket;
#if defined(OMR_GC_REALTIME)
class MM_RememberedSetSATB;
#endif /* defined(OMR_GC_REALTIME) */
#if defined(OMR_GC_MODRON_SCAVENGER)
class MM_Scavenger;
#endif /* OMR_GC_MODRON_SCAVENGER */
class MM_SizeClasses;
class MM_SweepHeapSectioning;
class MM_SweepPoolManager;
class MM_SweepPoolManagerAddressOrderedList;
class MM_SweepPoolManagerAddressOrderedListBase;
class MM_RealtimeGC;
class MM_VerboseManagerBase;
struct J9Pool;
namespace OMR {
namespace GC {
#if defined(OMR_GC_VLHGC_CONCURRENT_COPY_FORWARD)
class HeapRegionStateTable;
#endif /* defined(OMR_GC_VLHGC_CONCURRENT_COPY_FORWARD) */
} // namespace OMR
} // namespace GC
#if defined(OMR_ENV_DATA64)
#define MINIMUM_TLH_SIZE 768
#else
#define MINIMUM_TLH_SIZE 512
#endif /* defined(OMR_ENV_DATA64) */
/* The amount of work (array elements) to split when splitting array scanning. */
#define DEFAULT_ARRAY_SPLIT_MINIMUM_SIZE 512
#define DEFAULT_ARRAY_SPLIT_MAXIMUM_SIZE 16384
#define DEFAULT_SCAN_CACHE_MAXIMUM_SIZE (128 * 1024)
#define DEFAULT_SCAN_CACHE_MINIMUM_SIZE (8 * 1024)
#define NO_ESTIMATE_FRAGMENTATION 0x0
#define LOCALGC_ESTIMATE_FRAGMENTATION 0x1
#define GLOBALGC_ESTIMATE_FRAGMENTATION 0x2
enum ExcessiveLevel {
excessive_gc_normal = 0,
excessive_gc_aggressive,
excessive_gc_fatal,
excessive_gc_fatal_consumed
};
enum BackOutState {
backOutFlagCleared, /* Normal state, no backout pending or in progress */
backOutFlagRaised, /* Backout pending */
backOutStarted /* Backout started */
};
/* Note: These should be templates if DDR ever supports them (JAZZ 40487) */
class MM_UserSpecifiedParameterUDATA {
/* Data Members */
private:
protected:
public:
bool _wasSpecified; /**< True if this parameter was specified by the user, false means it is undefined */
uintptr_t _valueSpecified; /**< The value specified by the user or undefined in _wasSpecified is false */
/* Member Functions */
private:
protected:
public:
MM_UserSpecifiedParameterUDATA()
: _wasSpecified(false)
, _valueSpecified(0)
{
}
};
class MM_UserSpecifiedParameterBool {
/* Data Members */
private:
protected:
public:
bool _wasSpecified; /**< True if this parameter was specified by the user, false means it is undefined */
bool _valueSpecified; /**< The value specified by the user or undefined in _wasSpecified is false */
/* Member Functions */
private:
protected:
public:
MM_UserSpecifiedParameterBool()
: _wasSpecified(false)
, _valueSpecified(false)
{
}
};
class MM_ConfigurationOptions : public MM_BaseNonVirtual
{
private:
public:
MM_GCPolicy _gcPolicy; /**< gc policy (default or configured) */
bool _forceOptionScavenge; /**< true if Scavenge option is forced in command line */
bool _forceOptionConcurrentMark; /**< true if Concurrent Mark option is forced in command line */
bool _forceOptionConcurrentSweep; /**< true if Concurrent Sweep option is forced in command line */
bool _forceOptionLargeObjectArea; /**< true if Large Object Area option is forced in command line */
bool _forceOptionWriteBarrierSATB; /**< Set with -Xgc:snapshotAtTheBeginningBarrier */
MM_ConfigurationOptions()
: MM_BaseNonVirtual()
, _gcPolicy(gc_policy_undefined)
, _forceOptionScavenge(false)
, _forceOptionConcurrentMark(false)
, _forceOptionConcurrentSweep(false)
, _forceOptionLargeObjectArea(false)
, _forceOptionWriteBarrierSATB(false)
{
_typeId = __FUNCTION__;
}
};
class MM_GCExtensionsBase : public MM_BaseVirtual {
/* Data Members */
private:
#if defined(OMR_GC_COMPRESSED_POINTERS) && defined(OMR_GC_FULL_POINTERS) && !defined(OMR_OVERRIDE_COMPRESS_OBJECT_REFERENCES)
bool _compressObjectReferences;
#endif /* defined(OMR_GC_COMPRESSED_POINTERS) && defined(OMR_GC_FULL_POINTERS) && !defined(OMR_OVERRIDE_COMPRESS_OBJECT_REFERENCES) */
#if defined(OMR_GC_MODRON_SCAVENGER)
void* _guaranteedNurseryStart; /**< lowest address guaranteed to be in the nursery */
void* _guaranteedNurseryEnd; /**< highest address guaranteed to be in the nursery */
bool _isRememberedSetInOverflow;
volatile BackOutState _backOutState; /**< set if a thread is unable to copy an object due to lack of free space in both Survivor and Tenure */
volatile bool _concurrentGlobalGCInProgress; /**< set to true if concurrent Global GC is in progress */
#if defined(OMR_GC_CONCURRENT_SCAVENGER)
bool debugConcurrentScavengerPageAlignment; /**< if true allows debug output prints for Concurrent Scavenger Page Alignment logic */
uintptr_t concurrentScavengerPageSectionSize; /**< selected section size for Concurrent Scavenger Page */
void *concurrentScavengerPageStartAddress; /**< start address for Concurrent Scavenger Page, UDATA_MAX if it is not initialized */
#endif /* OMR_GC_CONCURRENT_SCAVENGER */
#endif /* OMR_GC_MODRON_SCAVENGER */
protected:
OMR_VM* _omrVM;
OMR::GC::Forge _forge;
MM_GlobalCollector* _globalCollector; /**< The global collector for the system */
uintptr_t lastGlobalGCFreeBytes; /**< records the free memory size from after Global GC cycle */
#if defined(OMR_GC_OBJECT_MAP)
MM_ObjectMap *_objectMap;
#endif /* defined(OMR_GC_OBJECT_MAP) */
public:
bool _lazyCollectorInit; /**< Are we initializing without a collector? */
MM_CollectorLanguageInterface* collectorLanguageInterface;
void* _tenureBase; /**< Start address of Old subspace */
uintptr_t _tenureSize; /**< Size of Old subspace in bytes */
GC_ObjectModel objectModel; /**< generic object model for mixed and indexable objects */
GC_MixedObjectModel mixedObjectModel; /**< object model for mixed objects */
GC_ArrayObjectModel indexableObjectModel; /**< object model for arrays */
#if defined(OMR_GC_MODRON_SCAVENGER)
MM_Scavenger *scavenger;
void *_mainThreadTenureTLHRemainderBase; /**< base and top pointers of the last unused tenure TLH copy cache, that will be loaded to thread env during main setup */
void *_mainThreadTenureTLHRemainderTop;
#endif /* OMR_GC_MODRON_SCAVENGER */
J9Pool* environments;
MM_ExcessiveGCStats excessiveGCStats;
#if defined(OMR_GC_MODRON_STANDARD) || defined(OMR_GC_REALTIME)
MM_GlobalGCStats globalGCStats;
#endif /* OMR_GC_MODRON_STANDARD || OMR_GC_REALTIME */
#if defined(OMR_GC_MODRON_SCAVENGER)
MM_ScavengerStats incrementScavengerStats; /**< scavengerStats for the current phase/increment; typically just used for reporting purposes */
MM_ScavengerStats scavengerStats; /**< cumulative scavengerStats for all phases/increments (STW and concurrent) within a single cycle; typically used for various heursitics at the end of GC */
MM_ScavengerCopyScanRatio copyScanRatio; /* Most recent estimate of ratio of aggregate slots copied to slots scanned in completeScan() */
#endif /* OMR_GC_MODRON_SCAVENGER */
#if defined(OMR_GC_VLHGC)
MM_GlobalVLHGCStats globalVLHGCStats; /**< Global summary of all GC activity for VLHGC */
#endif /* OMR_GC_VLHGC */
#if defined(OMR_GC_CONCURRENT_SWEEP)
/* Temporary move from the leaf implementation */
bool concurrentSweep;
#endif /* OMR_GC_CONCURRENT_SWEEP */
bool largePageWarnOnError;
bool largePageFailOnError;
bool largePageFailedToSatisfy;
#if defined(OMR_GC_DOUBLE_MAP_ARRAYLETS)
bool isArrayletDoubleMapRequested;
bool isArrayletDoubleMapAvailable;
#endif /* OMR_GC_DOUBLE_MAP_ARRAYLETS */
uintptr_t requestedPageSize;
uintptr_t requestedPageFlags;
uintptr_t gcmetadataPageSize;
uintptr_t gcmetadataPageFlags;
#if defined(OMR_GC_MODRON_SCAVENGER)
MM_SublistPool rememberedSet;
uintptr_t oldHeapSizeOnLastGlobalGC;
uintptr_t freeOldHeapSizeOnLastGlobalGC;
float concurrentKickoffTenuringHeadroom; /**< percentage of free memory remaining in tenure heap. Used in conjunction with free memory to determine concurrent mark kickoff */
float tenureBytesDeviationBoost; /**< boost factor for tenuring deviation used for concurrent mark kickoff math */
#endif /* OMR_GC_MODRON_SCAVENGER */
#if defined(OMR_GC_REALTIME)
MM_RememberedSetSATB* sATBBarrierRememberedSet; /**< The snapshot at the beginning barrier remembered set used for the write barrier */
#endif /* defined(OMR_GC_REALTIME) */
ModronLnrlOptions lnrlOptions;
MM_OMRHookInterface omrHookInterface;
MM_PrivateHookInterface privateHookInterface;
void* heapBaseForBarrierRange0;
uintptr_t heapSizeForBarrierRange0;
#if defined(OMR_ENV_DATA64) && defined(OMR_GC_FULL_POINTERS)
void* shadowHeapBase; /* Read Barrier Verifier shadow heap base address */
void* shadowHeapTop; /* Read Barrier Verifier shadow heap base address */
MM_MemoryHandle shadowHeapHandle; /* Read Barrier Verifier shadow heap Virtual Memory handle (descriptor) */
#endif /* defined(OMR_ENV_DATA64) && defined(OMR_GC_FULL_POINTERS) */
bool doOutOfLineAllocationTrace;
bool doFrequentObjectAllocationSampling; /**< Whether to track object allocations*/
uintptr_t oolObjectSamplingBytesGranularity; /**< How often (in bytes) we do allocation sampling as tracked by per thread's local _oolTraceAllocationBytes. */
uintptr_t objectSamplingBytesGranularity; /**< How often (in bytes) we do allocation sampling as tracked by per thread's local _traceAllocationBytes. */
uintptr_t frequentObjectAllocationSamplingRate; /**< # bytes to sample / # bytes allocated */
MM_FrequentObjectsStats* frequentObjectsStats;
uint32_t frequentObjectAllocationSamplingDepth; /**< # of frequent objects we'd like to report */
uint32_t estimateFragmentation; /**< Enable estimate fragmentation, NO_ESTIMATE_FRAGMENTATION, LOCALGC_ESTIMATE_FRAGMENTATION, GLOBALGC_ESTIMATE_FRAGMENTATION(default) */
bool processLargeAllocateStats; /**< Enable process LargeObjectAllocateStats */
uintptr_t largeObjectAllocationProfilingThreshold; /**< object size threshold above which the object is large enough for allocation profiling */
uintptr_t largeObjectAllocationProfilingVeryLargeObjectThreshold; /**< object size threshold above which the object is large enough for recording precise size in allocation profiling */
uintptr_t largeObjectAllocationProfilingVeryLargeObjectSizeClass; /**< index of sizeClass for minimum veryLargeEntry*/
uint32_t largeObjectAllocationProfilingSizeClassRatio; /**< ratio of lower and upper boundary of a size class in large object allocation profiling */
uint32_t largeObjectAllocationProfilingTopK; /**< number of most allocation size we want to track/report in large object allocation profiling */
MM_FreeEntrySizeClassStats freeEntrySizeClassStatsSimulated; /**< snapshot of free memory status used for simulated allocator for fragmentation estimation */
uintptr_t freeMemoryProfileMaxSizeClasses; /**< maximum number of sizeClass maintained for heap free memory profile (computed from SizeClassRatio) */
volatile OMR_VMThread* gcExclusiveAccessThreadId; /**< thread token that represents the current "winning" thread for performing garbage collection */
omrthread_monitor_t gcExclusiveAccessMutex; /**< Mutex used for acquiring gc priviledges as well as for signalling waiting threads that GC has been completed */
J9Pool* _lightweightNonReentrantLockPool;
omrthread_monitor_t _lightweightNonReentrantLockPoolMutex;
#if defined(OMR_GC_COMBINATION_SPEC)
bool _isSegregatedHeap; /**< Are we using a segregated heap model */
bool _isVLHGC; /**< Is balanced GC policy */
bool _isMetronomeGC; /**< Is metronome GC policy */
bool _isStandardGC; /**< Is it one of standard GC policy */
#endif /* OMR_GC_COMBINATION_SPEC */
uintptr_t tlhMinimumSize;
uintptr_t tlhMaximumSize;
uintptr_t tlhInitialSize;
uintptr_t tlhIncrementSize;
uintptr_t tlhSurvivorDiscardThreshold; /**< below this size GC (Scavenger) will discard survivor copy cache TLH, if alloc not succeeded (otherwise we reuse memory for next TLH) */
uintptr_t tlhTenureDiscardThreshold; /**< below this size GC (Scavenger) will discard tenure copy cache TLH, if alloc not succeeded (otherwise we reuse memory for next TLH) */
MM_AllocationStats allocationStats; /**< Statistics for allocations. */
uintptr_t bytesAllocatedMost;
OMR_VMThread* vmThreadAllocatedMost;
const char* gcModeString;
uintptr_t splitFreeListSplitAmount;
uintptr_t splitFreeListNumberChunksPrepared; /**< Used in MPSAOL postProcess. Shared for all MPSAOLs. Do not overwrite during postProcess for any MPSAOL. */
bool enableHybridMemoryPool;
bool largeObjectArea;
#if defined(OMR_GC_LARGE_OBJECT_AREA)
typedef enum {
METER_BY_SOA = 0,
METER_BY_LOA,
METER_DYNAMIC
} ConcurrentMetering;
uintptr_t largeObjectMinimumSize;
double largeObjectAreaInitialRatio;
double largeObjectAreaMinimumRatio;
double largeObjectAreaMaximumRatio;
bool debugLOAFreelist;
bool debugLOAAllocate;
int loaFreeHistorySize; /**< max size of _loaFreeRatioHistory array */
uintptr_t lastGlobalGCFreeBytesLOA; /**< records the LOA free memory size from after Global GC cycle */
ConcurrentMetering concurrentMetering;
#endif /* OMR_GC_LARGE_OBJECT_AREA */
bool disableExplicitGC;
uintptr_t heapAlignment;
uintptr_t absoluteMinimumOldSubSpaceSize;
uintptr_t absoluteMinimumNewSubSpaceSize;
float darkMatterCompactThreshold; /**< Value used to trigger compaction when dark matter ratio reaches this percentage of memory pools memory*/
uintptr_t parSweepChunkSize;
uintptr_t heapExpansionMinimumSize;
uintptr_t heapExpansionMaximumSize;
uintptr_t heapFreeMinimumRatioDivisor;
uintptr_t heapFreeMinimumRatioMultiplier;
uintptr_t heapFreeMaximumRatioDivisor;
uintptr_t heapFreeMaximumRatioMultiplier;
uintptr_t heapExpansionGCTimeThreshold; /**< max percentage of time spent in gc before expansion */
uintptr_t heapContractionGCTimeThreshold; /**< min percentage of time spent in gc before contraction */
uintptr_t heapExpansionStabilizationCount; /**< GC count required before the heap is allowed to expand due to excessvie time after last heap expansion */
uintptr_t heapContractionStabilizationCount; /**< GC count required before the heap is allowed to contract due to excessvie time after last heap expansion */
float heapSizeStartupHintConservativeFactor; /**< Use only a fraction of hints stored in SC */
float heapSizeStartupHintWeightNewValue; /**< Learn slowly by historic averaging of stored hints */
bool useGCStartupHints; /**< Enabled/disable usage of heap sizing startup hints from Shared Cache */
uintptr_t workpacketCount; /**< this value is ONLY set if -Xgcworkpackets is specified - otherwise the workpacket count is determined heuristically */
uintptr_t packetListSplit; /**< the number of ways to split packet lists, set by -XXgc:packetListLockSplit=, or determined heuristically based on the number of GC threads */
uintptr_t markingArraySplitMaximumAmount; /**< maximum number of elements to split array scanning work in marking scheme */
uintptr_t markingArraySplitMinimumAmount; /**< minimum number of elements to split array scanning work in marking scheme */
bool rootScannerStatsEnabled; /**< Enable/disable recording of performance statistics for the root scanner. Defaults to false. */
bool rootScannerStatsUsed; /**< Flag that indicates if rootScannerStats are used for in the last increment (by any thread, for any of its roots) */
/* bools and counters for -Xgc:fvtest options */
bool fvtest_forceOldResize;
uintptr_t fvtest_oldResizeCounter;
#if defined(OMR_GC_MODRON_SCAVENGER) || defined(OMR_GC_VLHGC)
uintptr_t fvtest_scanCacheCount;
#if defined(OMR_GC_MODRON_SCAVENGER)
bool fvtest_forceScavengerBackout;
uintptr_t fvtest_backoutCounter;
bool fvtest_forcePoisonEvacuate; /**< if true poison Evacuate space with pattern at the end of scavenge */
bool fvtest_forceNurseryResize;
uintptr_t fvtest_nurseryResizeCounter;
#endif /* OMR_GC_MODRON_SCAVENGER */
#endif /* OMR_GC_MODRON_SCAVENGER || OMR_GC_VLHGC */
bool fvtest_alwaysApplyOverflowRounding; /**< always round down the allocated heap as if overflow rounding were required */
uintptr_t fvtest_forceExcessiveAllocFailureAfter; /**< force excessive GC to occur after this many global GCs */
void* fvtest_verifyHeapAbove; /**< if non-NULL, will force start-up failure if any part of the heap is below this value */
void* fvtest_verifyHeapBelow; /**< if non-NULL, will force start-up failure if any part of the heap is above this value */
#if defined(OMR_GC_VLHGC)
bool fvtest_tarokVerifyMarkMapClosure; /**< True if the collector should verify that the new mark map defines a consistent and closed object graph after a GMP finishes creating it */
#endif /* defined(OMR_GC_VLHGC) */
bool fvtest_disableInlineAllocation; /**< True if inline allocation should be disabled (i.e. force out-of-line paths) */
uintptr_t fvtest_forceSweepChunkArrayCommitFailure; /**< Force failure at Sweep Chunk Array commit operation */
uintptr_t fvtest_forceSweepChunkArrayCommitFailureCounter; /**< Force failure at Sweep Chunk Array commit operation counter */
#if defined(OMR_ENV_DATA64) && defined(OMR_GC_FULL_POINTERS)
uintptr_t fvtest_enableReadBarrierVerification; /**< Forces failure at all direct memory read sites */
uintptr_t fvtest_enableMonitorObjectsReadBarrierVerification; /**< Forces failure at all direct memory read sites for monitor slot objects */
uintptr_t fvtest_enableClassStaticsReadBarrierVerification; /**< Forces failure at all direct memory read sites for class statics */
uintptr_t fvtest_enableJNIGlobalWeakReadBarrierVerification; /**< Forces failure at all direct memory read sites for JNI Global weak references */
uintptr_t fvtest_enableHeapReadBarrierVerification; /**< Forces failure at all direct memory read sites for heap references */
#endif /* defined(OMR_ENV_DATA64) && defined(OMR_GC_FULL_POINTERS) */
uintptr_t fvtest_forceMarkMapCommitFailure; /**< Force failure at Mark Map commit operation */
uintptr_t fvtest_forceMarkMapCommitFailureCounter; /**< Force failure at Mark Map commit operation counter */
uintptr_t fvtest_forceMarkMapDecommitFailure; /**< Force failure at Mark Map decommit operation */
uintptr_t fvtest_forceMarkMapDecommitFailureCounter; /**< Force failure at Mark Map decommit operation counter */
uintptr_t fvtest_forceReferenceChainWalkerMarkMapCommitFailure; /**< Force failure at Reference Chain Walker Mark Map commit operation */
uintptr_t fvtest_forceReferenceChainWalkerMarkMapCommitFailureCounter; /**< Force failure at Reference Chain Walker Mark Map commit operation counter */
uintptr_t fvtest_forceCopyForwardHybridRatio; /**< Force to run CopyForward Hybrid mode value = 1-100 the percentage of non evacuated eden regions */
uintptr_t softMx; /**< set through -Xsoftmx, depending on GC policy this number might differ from available heap memory, use MM_Heap::getActualSoftMxSize for calculations */
#if defined(OMR_GC_BATCH_CLEAR_TLH)
uintptr_t batchClearTLH;
#endif /* OMR_GC_BATCH_CLEAR_TLH */
omrthread_monitor_t gcStatsMutex;
uintptr_t gcThreadCount; /**< Initial number of GC threads - chosen default or specified in java options*/
bool gcThreadCountForced; /**< true if number of GC threads is specified in java options. Currently we have a few ways to do this:
-Xgcthreads -Xthreads= (RT only) -XthreadCount= */
uintptr_t dispatcherHybridNotifyThreadBound; /** Bound for determining hybrid notification type (Individual notifies for count < MIN(bound, maxThreads/2), otherwise notify_all) */
#if defined(OMR_GC_MODRON_SCAVENGER) || defined(OMR_GC_VLHGC)
enum ScavengerScanOrdering {
OMR_GC_SCAVENGER_SCANORDERING_BREADTH_FIRST = 0,
OMR_GC_SCAVENGER_SCANORDERING_DYNAMIC_BREADTH_FIRST,
OMR_GC_SCAVENGER_SCANORDERING_HIERARCHICAL,
};
ScavengerScanOrdering scavengerScanOrdering; /**< scan ordering in Scavenger */
/* Start of options relating to dynamicBreadthFirstScanOrdering */
uintptr_t gcCountBetweenHotFieldSort;
uintptr_t gcCountBetweenHotFieldSortMax;
bool adaptiveGcCountBetweenHotFieldSort;
bool depthCopyTwoPaths;
bool depthCopyThreePaths;
bool alwaysDepthCopyFirstOffset;
bool allowPermanantHotFields;
bool hotFieldResettingEnabled;
uintptr_t maxConsecutiveHotFieldSelections;
uintptr_t gcCountBetweenHotFieldReset;
uintptr_t depthCopyMax;
uint32_t maxHotFieldListLength;
uintptr_t minCpuUtil;
/* End of options relating to dynamicBreadthFirstScanOrdering */
#if defined(OMR_GC_MODRON_SCAVENGER)
uintptr_t scvTenureRatioHigh;
uintptr_t scvTenureRatioLow;
uintptr_t scvTenureFixedTenureAge; /**< The tenure age to use for the Fixed scavenger tenure strategy. */
uintptr_t scvTenureAdaptiveTenureAge; /**< The tenure age to use for the Adaptive scavenger tenure strategy. */
double scvTenureStrategySurvivalThreshold; /**< The survival threshold (from 0.0 to 1.0) used for deciding to tenure particular ages. */
bool scvTenureStrategyFixed; /**< Flag for enabling the Fixed scavenger tenure strategy. */
bool scvTenureStrategyAdaptive; /**< Flag for enabling the Adaptive scavenger tenure strategy. */
bool scvTenureStrategyLookback; /**< Flag for enabling the Lookback scavenger tenure strategy. */
bool scvTenureStrategyHistory; /**< Flag for enabling the History scavenger tenure strategy. */
bool scavengerEnabled;
bool scavengerRsoScanUnsafe;
uintptr_t cacheListSplit; /**< the number of ways to split scanCache lists, set by -XXgc:cacheListLockSplit=, or determined heuristically based on the number of GC threads */
#if defined(OMR_GC_CONCURRENT_SCAVENGER)
bool softwareRangeCheckReadBarrier; /**< enable software read barrier instead of hardware guarded loads when running with CS */
bool concurrentScavenger; /**< CS enabled/disabled flag */
bool concurrentScavengerForced; /**< set to true if CS is requested (by cmdline option), but there are more checks to do before deciding whether the request is to be obeyed */
bool concurrentScavengerHWSupport; /**< set to true if CS runs with HW support */
uintptr_t concurrentScavengerBackgroundThreads; /**< number of background GC threads during concurrent phase of Scavenge */
bool concurrentScavengerBackgroundThreadsForced; /**< true if concurrentScavengerBackgroundThreads set via command line option */
uintptr_t concurrentScavengerSlack; /**< amount of bytes added on top of avearge allocated bytes during concurrent cycle, in calcualtion for survivor size */
float concurrentScavengerAllocDeviationBoost; /**< boost factor for allocate rate and its deviation, used for tilt calcuation in Concurrent Scavenger */
bool concurrentScavengeExhaustiveTermination; /**< control flag to enable/disable concurrent phase termination optimization using involing async mutator callbacks */
#endif /* OMR_GC_CONCURRENT_SCAVENGER */
uintptr_t scavengerFailedTenureThreshold;
uintptr_t maxScavengeBeforeGlobal;
uintptr_t scvArraySplitMaximumAmount; /**< maximum number of elements to split array scanning work in the scavenger */
uintptr_t scvArraySplitMinimumAmount; /**< minimum number of elements to split array scanning work in the scavenger */
uintptr_t scavengerScanCacheMaximumSize; /**< maximum size of scan and copy caches before rounding, zero (default) means calculate them */
uintptr_t scavengerScanCacheMinimumSize; /**< minimum size of scan and copy caches before rounding, zero (default) means calculate them */
bool tiltedScavenge;
bool debugTiltedScavenge;
double survivorSpaceMinimumSizeRatio;
double survivorSpaceMaximumSizeRatio;
double tiltedScavengeMaximumIncrease;
double scavengerCollectorExpandRatio; /**< the ratio of _avgTenureBytes we use to expand when a collectorAllocate() fails */
uintptr_t scavengerMaximumCollectorExpandSize; /**< the maximum amount by which we will expand when a collectorAllocate() fails */
bool dynamicNewSpaceSizing;
bool debugDynamicNewSpaceSizing;
bool dnssAvoidMovingObjects;
double dnssExpectedTimeRatioMinimum;
double dnssExpectedTimeRatioMaximum;
double dnssWeightedTimeRatioFactorIncreaseSmall;
double dnssWeightedTimeRatioFactorIncreaseMedium;
double dnssWeightedTimeRatioFactorIncreaseLarge;
double dnssWeightedTimeRatioFactorDecrease;
double dnssMaximumExpansion;
double dnssMaximumContraction;
double dnssMinimumExpansion;
double dnssMinimumContraction;
bool enableSplitHeap; /**< true if we are using gencon with -Xgc:splitheap (we will fail to boostrap if we can't allocate both ranges) */
double aliasInhibitingThresholdPercentage; /**< percentage of threads that can be blocked before copy cache aliasing is inhibited (set through aliasInhibitingThresholdPercentage=) */
enum HeapInitializationSplitHeapSection {
HEAP_INITIALIZATION_SPLIT_HEAP_UNKNOWN = 0,
HEAP_INITIALIZATION_SPLIT_HEAP_TENURE,
HEAP_INITIALIZATION_SPLIT_HEAP_NURSERY,
};
HeapInitializationSplitHeapSection splitHeapSection; /**< Split Heap section to be requested */
#endif /* OMR_GC_MODRON_SCAVENGER */
#endif /* OMR_GC_MODRON_SCAVENGER || OMR_GC_VLHGC */
double globalMaximumContraction; /**< maximum percentage of committed global heap which can contract in one GC cycle (set through -Xgc:globalMaximumContraction=) */
double globalMinimumContraction; /**< minimum percentage of committed global heap which can contract in one GC cycle (set through -Xgc:globalMinimumContraction=) */
/* global variables for excessiveGC functionality */
MM_UserSpecifiedParameterBool excessiveGCEnabled; /**< should we check for excessiveGC? (set through -XdisableExcessiveGC and -XenableExcessiveGC) */
bool isRecursiveGC; /**< is the current executing gc a result of another gc (ie: scavenger triggering a global collect) */
bool didGlobalGC; /**< has a global gc occurred in the current gc (possibly as a result of a recursive gc) */
ExcessiveLevel excessiveGCLevel;
float excessiveGCnewRatioWeight;
uintptr_t excessiveGCratio;
float excessiveGCFreeSizeRatio;
MM_Heap* heap;
MM_HeapRegionManager* heapRegionManager; /**< The heap region manager used to view the heap as regions of memory */
MM_MemoryManager* memoryManager; /**< memory manager used to access to virtual memory instances */
uintptr_t aggressive;
MM_SweepHeapSectioning* sweepHeapSectioning; /**< Reference to the SweepHeapSectioning to Compact can share the backing store */
#if defined(OMR_GC_MODRON_COMPACTION)
uintptr_t compactOnGlobalGC;
uintptr_t noCompactOnGlobalGC;
uintptr_t compactOnSystemGC;
uintptr_t nocompactOnSystemGC;
bool compactToSatisfyAllocate;
#endif /* OMR_GC_MODRON_COMPACTION */
bool payAllocationTax;
#if defined(OMR_GC_MODRON_CONCURRENT_MARK)
bool concurrentMark;
bool concurrentKickoffEnabled;
double concurrentSlackFragmentationAdjustmentWeight; /**< weight(from 0.0 to 5.0) used for calculating free tenure space (how much percentage of the fragmentation need to remove from freeBytes) */
bool debugConcurrentMark;
bool optimizeConcurrentWB;
bool dirtCardDuringRSScan;
uintptr_t concurrentLevel;
uintptr_t concurrentBackground;
uintptr_t concurrentSlack; /**< number of bytes to add to the concurrent kickoff threshold buffer */
uintptr_t cardCleanPass2Boost;
uintptr_t cardCleaningPasses;
UDATA fvtest_concurrentCardTablePreparationDelay; /**< Delay for concurrent card table preparation in milliseconds */
UDATA fvtest_forceConcurrentTLHMarkMapCommitFailure; /**< Force failure at Concurrent TLH Mark Map commit operation */
UDATA fvtest_forceConcurrentTLHMarkMapCommitFailureCounter; /**< Force failure at Concurrent TLH Mark Map commit operation counter */
UDATA fvtest_forceConcurrentTLHMarkMapDecommitFailure; /**< Force failure at Concurrent TLH Mark Map decommit operation */
UDATA fvtest_forceConcurrentTLHMarkMapDecommitFailureCounter; /**< Force failure at Concurrent TLH Mark Map decommit operation counter */
#endif /* OMR_GC_MODRON_CONCURRENT_MARK */
UDATA fvtest_forceCardTableCommitFailure; /**< Force failure at Card Table commit operation */
UDATA fvtest_forceCardTableCommitFailureCounter; /**< Force failure at Card Table commit operation counter */
UDATA fvtest_forceCardTableDecommitFailure; /**< Force failure at Card Table decommit operation */
UDATA fvtest_forceCardTableDecommitFailureCounter; /**< Force failure at Card Table decommit operation counter */
MM_ParallelDispatcher* dispatcher;
MM_CardTable* cardTable;
/* Begin command line options temporary home */
uintptr_t memoryMax;
uintptr_t initialMemorySize;
uintptr_t minNewSpaceSize;
uintptr_t newSpaceSize;
uintptr_t maxNewSpaceSize;
uintptr_t minOldSpaceSize;
uintptr_t oldSpaceSize;
uintptr_t maxOldSpaceSize;
uintptr_t allocationIncrement;
uintptr_t fixedAllocationIncrement;
uintptr_t lowMinimum;
uintptr_t allowMergedSpaces;
uintptr_t maxSizeDefaultMemorySpace;
bool allocationIncrementSetByUser;
/* End command line options temporary home */
uintptr_t overflowSafeAllocSize;
uint64_t usablePhysicalMemory; /**< Physical memory available to the process */
#if defined(OMR_GC_REALTIME)
/* Parameters */
uintptr_t RTC_Frequency;
uintptr_t itPeriodMicro;
uintptr_t hrtPeriodMicro;
uintptr_t debugWriteBarrier;
uintptr_t timeWindowMicro;
uintptr_t beatMicro;
bool overrideHiresTimerCheck; /**< ignore the values returned from clokc_getres if this value is true */
uintptr_t targetUtilizationPercentage;
uintptr_t gcTrigger; // start gc when bytes used exceeds gcTrigger
uintptr_t gcInitialTrigger; // start gc when bytes used exceeds gcTrigger
uintptr_t headRoom; // at end of GC, reset gcTrigger to OMR_MAX(gcInitialTrigger, usedMemory + headRoom)
bool synchronousGCOnOOM;
bool extraYield;
/* Global variables */
MM_RealtimeGC* realtimeGC;
bool fixHeapForWalk; /**< configuration flag set by command line option or GC Check onload */
uintptr_t minArraySizeToSetAsScanned;
uintptr_t overflowCacheCount; /**< How many entries should there be in the environments local overflow cache */
#endif /* OMR_GC_REALTIME */
#if defined(OMR_GC_REALTIME)
bool concurrentSweepingEnabled; /**< if this is set, the sweep phase of GC will be run concurrently */
bool concurrentTracingEnabled; /**< if this is set, tracing will run concurrently */
#endif /* defined(OMR_GC_REALTIME) */
bool instrumentableAllocateHookEnabled;
MM_HeapMap* previousMarkMap; /**< the previous valid mark map. This can be used to walk marked objects in regions which have _markMapUpToDate set to true */
MM_GlobalAllocationManager* globalAllocationManager; /**< Used for attaching threads to AllocationContexts */
#if defined(OMR_GC_REALTIME) || defined(OMR_GC_SEGREGATED_HEAP)
uintptr_t managedAllocationContextCount; /**< The number of allocation contexts which will be instantiated and managed by the GlobalAllocationManagerRealtime (currently 2*cpu_count) */
#endif /* OMR_GC_REALTIME || OMR_GC_SEGREGATED_HEAP */
#if defined(OMR_GC_SEGREGATED_HEAP)
MM_SizeClasses* defaultSizeClasses;
#endif /* defined(OMR_GC_SEGREGATED_HEAP) */
#if defined(OMR_GC_VLHGC_CONCURRENT_COPY_FORWARD)
OMR::GC::HeapRegionStateTable *heapRegionStateTable;
#endif /* defined(OMR_GC_VLHGC_CONCURRENT_COPY_FORWARD) */
/* OMR_GC_REALTIME (in for all -- see 82589) */
uint32_t distanceToYieldTimeCheck; /**< Number of condYield that can be skipped before actual checking for yield, when the quanta time has been relaxed */
uintptr_t traceCostToCheckYield; /**< tracing cost (in number of objects marked and pointers scanned) after we try to yield */
uintptr_t sweepCostToCheckYield; /**< weighted count of free chunks/marked objects before we check yield in sweep small loop */
uintptr_t splitAvailableListSplitAmount; /**< Number of split available lists per size class, per defragment bucket */
uint32_t newThreadAllocationColor;
uintptr_t minimumFreeEntrySize;
uintptr_t arrayletsPerRegion;
uintptr_t verbose; // Accessability from mmparse
uintptr_t debug;
uintptr_t allocationTrackerMaxTotalError; /**< The total maximum desired error for the free bytes approximation, the larger the number, the lower the contention and vice versa */
uintptr_t allocationTrackerMaxThreshold; /**< The maximum threshold for a single allocation tracker */
uintptr_t allocationTrackerFlushThreshold; /**< The flush threshold to be used for all allocation trackers, this value is adjusted every time a new thread is created/destroyed */
/* TODO: These variables should also be used for TLHs */
uintptr_t allocationCacheMinimumSize;
uintptr_t allocationCacheMaximumSize;
uintptr_t allocationCacheInitialSize;
uintptr_t allocationCacheIncrementSize;
bool nonDeterministicSweep;
/* OMR_GC_REALTIME (in for all) */
MM_ConfigurationOptions configurationOptions; /**< holds the options struct, used during startup for selecting a Configuration */
MM_Configuration* configuration; /**< holds the Configuration selected during startup */
MM_VerboseManagerBase* verboseGCManager;
uintptr_t verbosegcCycleTime;
bool verboseExtensions;
bool verboseNewFormat; /**< a flag, enabled by -XXgc:verboseNewFormat, to enable the new verbose GC format */
bool bufferedLogging; /**< Enabled by -Xgc:bufferedLogging. Use buffered filestreams when writing logs (e.g. verbose:gc) to a file */
uintptr_t lowAllocationThreshold; /**< the lower bound of the allocation threshold range */
uintptr_t highAllocationThreshold; /**< the upper bound of the allocation threshold range */
bool disableInlineCacheForAllocationThreshold; /**< true if inline allocates fall within the allocation threshold*/
bool disableInlineAllocationForSamplingBytesGranularity; /**< true if inline allocation should be "disabled" for SamplingBytesGranularity */
uintptr_t heapCeiling; /**< the highest point in memory where objects can be addressed (used for the -Xgc:lowMemHeap option) */
enum HeapInitializationFailureReason {
HEAP_INITIALIZATION_FAILURE_REASON_NO_ERROR = 0,
HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_INSTANTIATE_HEAP,
HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_INSTANTIATE_SPLIT_HEAP_OLD_SPACE,
HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_INSTANTIATE_SPLIT_HEAP_NEW_SPACE,
HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_INSTANTIATE_SPLIT_HEAP_GEOMETRY,
HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_ALLOCATE_LOW_MEMORY_RESERVE,
HEAP_INITIALIZATION_FAILURE_REASON_CAN_NOT_SATISFY_REQUESTED_PAGE_SIZE,
HEAP_INITIALIZATION_FAILURE_REASON_METRONOME_DOES_NOT_SUPPORT_4BIT_SHIFT,
};
HeapInitializationFailureReason heapInitializationFailureReason; /**< Error code provided additional information about heap initialization failure */
bool scavengerAlignHotFields; /**< True if the scavenger is to check the hot field description for an object in order to better cache align it when tenuring (enabled with the -Xgc:hotAlignment option) */
uintptr_t suballocatorInitialSize; /**< the initial chunk size in bytes for the J9Heap suballocator (enabled with the -Xgc:suballocatorInitialSize option) */
uintptr_t suballocatorCommitSize; /**< the commit size in bytes for the J9Heap suballocator (enabled with the -Xgc:suballocatorCommitSize option) */
#if defined(OMR_GC_COMPRESSED_POINTERS)
bool shouldAllowShiftingCompression; /**< temporary option to enable compressed reference scaling by shifting pointers */
bool shouldForceSpecifiedShiftingCompression; /**< temporary option to enable forcedShiftingCompressionAmount */
uintptr_t forcedShiftingCompressionAmount; /**< temporary option to force compressed reference scaling to use this as the shifted value (typically 0-3 in current usage) */
#endif /* defined (OMR_GC_COMPRESSED_POINTERS) */
uintptr_t preferredHeapBase; /**< the preferred heap base for heap allocated using virtual memory */
uintptr_t currentEnvironmentCount; /**< The current count of active environments, aka: running threads */
uintptr_t regionSize; /**< The size, in bytes, of a fixed-size table-backed region of the heap (does not apply to AUX regions) */
MM_NUMAManager _numaManager; /**< The object which abstracts the details of our NUMA support so that the GCExtensions and the callers don't need to duplicate the support to interpret our intention */
bool numaForced; /**< if true, specifies if numa is disabled or enabled (actual value stored in NUMA Manager) by command line option */
bool padToPageSize;
bool fvtest_disableExplictMainThread; /**< Test option to disable creation of explicit main GC thread */
#if defined(OMR_GC_VLHGC)
bool tarokDebugEnabled; /**< True if we want to perform additional checks and freed memory poisoning which aid in debugging Tarok problems */
uintptr_t tarokGlobalMarkIncrementTimeMillis; /**< The time period in millisecond a Global Mark increment is allowed to run (as set by the user, normally dynamic)*/
uintptr_t fvtest_tarokForceNUMANode; /**< The NUMA node to force the heap onto (UDATA_MAX => no force, 0 => interleaved, >0 => specific node) */
uintptr_t fvtest_tarokFirstContext; /**< The allocation context number to use first, when associating the first thread (assignments will proceed, as a round robin, from this number). Defaults to 0 */
bool tarokEnableScoreBasedAtomicCompact; /**< True if atomic compact does use score based compact region selection heuristic */
uintptr_t tarokIdealEdenMinimumBytes; /**< The ideal size of the eden space, in bytes, when the heap is at its -Xms size */
uintptr_t tarokIdealEdenMaximumBytes; /**< The ideal size of the eden space, in bytes, when the heap is at its -Xmx size */
bool tarokEnableIncrementalGMP; /**< True if we want to perform GMP work as a series of increments during the run. (set to false if we should rely on OOM global collections to perform the GMP) */
MM_UserSpecifiedParameterUDATA tarokNurseryMaxAge; /**< The maximum age that a region will be before it is excluded from a partial garbage collection */
uintptr_t tarokRememberedSetCardListMaxSize; /* The maximum size in entries of RememberedSetCardList per region */
uintptr_t tarokRememberedSetCardListSize; /* The average (allocated) size in entries of RememberedSetCardList per region */
uintptr_t tarokPGCtoGMPNumerator; /* The numerator of the PGC:GMP ratio */
uintptr_t tarokPGCtoGMPDenominator; /* The denominator of the PGC:GMP ratio */
uintptr_t tarokGMPIntermission; /** The delay between GMP cycles, specified as the number of GMP increments to skip */
bool tarokAutomaticGMPIntermission; /** Should the delay between GMP cycles be automatic, or as specified in tarokGMPIntermission? */
uintptr_t tarokRegionMaxAge; /**< Maximum age a region can be before it will no longer have its age incremented after a PGC (saturating age) */
uintptr_t tarokKickoffHeadroomInBytes; /**< extra bytes reserved for survivor set, in case of sudden changes of survivor rate. Used in calculation to predict GMP kickoff */
bool tarokForceKickoffHeadroomInBytes; /** true if user specifies tarokKickoffHeadroomInBytes via -XXgc:tarokKickoffHeadroomInBytes= */
uint32_t tarokKickoffHeadroomRegionRate; /**< used by calculating tarokKickoffHeadroomInBytes, the percentage of the free memory, range: 0(0%)<=the rate<=50(50%) , default=2 (2%) */
MM_RememberedSetCardBucket* rememberedSetCardBucketPool; /* GC thread local pools of RS Card Buckets for each Region (its Card List) */
bool tarokEnableDynamicCollectionSetSelection; /**< Enable dynamic selection of regions to include in the collection set that reside outside of the nursery */
uintptr_t tarokDynamicCollectionSetSelectionAbsoluteBudget; /**< Number of budgeted regions to dynamically select for PGC collection (outside of the required nursery set) */
double tarokDynamicCollectionSetSelectionPercentageBudget; /**< Percentage increase of nursery region count to use as dynamically selected regions for PGC */
uintptr_t tarokCoreSamplingAbsoluteBudget; /**< Number of budgeted regions to select for core sampling in a PGC collection (outside of the required nursery set) */
double tarokCoreSamplingPercentageBudget; /**< Percentage increase of nursery region count to use as core sampling selected regions for PGC */
void* tarokTgcSetSelectionDataTable; /**< (TGC USE ONLY!) Table containing all dynamic and core sampling set selection information */
bool tarokTgcEnableRememberedSetDuplicateDetection; /** (TGC USE ONLY!) True if we want to enable duplicate card stats reported by TGC for RSCL */
bool tarokPGCShouldCopyForward; /**< True if we want to allow PGC increments to reclaim memory using copy-forward (default is true) */
bool tarokPGCShouldMarkCompact; /**< True if we want to allow PGC increments to reclaim memory using compact (default is true) and require a corresponding mark operation */
MM_InterRegionRememberedSet* interRegionRememberedSet; /**< The remembered set abstraction to be used to track inter-region references found while processing this cycle */
bool tarokEnableStableRegionDetection; /**< Enable overflowing RSCSLs for stable regions */
double tarokDefragmentEmptinessThreshold; /**< Emptiness (freeAndDarkMatter/regionSize) for a region to be considered as a target for defragmentation (used for stable region detection and region de-fragmentation selection) */
bool tarokAttachedThreadsAreCommon; /**< True if we want to associate all common threads with the "common context" which is otherwise only reserved for regions which the collector has identified as common to all nodes ("common" regions are still moved to this context, no matter the value of this flag) */
double tarokCopyForwardFragmentationTarget; /**< The fraction of discarded space targeted in each copy-forward collection. The actual amount may be lower or higher than this fraction. */
bool tarokEnableCardScrubbing; /**< Set if card scrubbing in GMPs is enabled (default is true) */
bool tarokEnableConcurrentGMP; /**< Set if the GMP should attempt to accomplish work concurrently, where possible. False implies GMP work will only be done in the stop-the-world increments */
MM_CompactGroupPersistentStats* compactGroupPersistentStats; /**< The global persistent stats indexed by compact group number */
MM_ClassLoaderRememberedSet* classLoaderRememberedSet; /**< The remembered set abstraction to be used to track references from instances to the class loaders containing their defining class */
bool tarokEnableIncrementalClassGC; /**< Enable class unloading during partial garbage collection increments */
bool tarokEnableCompressedCardTable; /**< Enable usage of Compressed Card Table (Summary) */
MM_CompressedCardTable* compressedCardTable; /**< The pointer to Compressed Card Table */
bool tarokEnableLeafFirstCopying; /**< Enable copying of leaf children immediately after parent is copied in CopyForwardScheme */
uint64_t tarokMaximumAgeInBytes; /**< Maximum age in bytes for bytes-based-allocated aging system */
uint64_t tarokMaximumNurseryAgeInBytes; /**< Maximum Nursery Age in bytes for bytes-based-allocated aging system */
bool tarokAllocationAgeEnabled; /**< Enable Allocation-based aging system */
uintptr_t tarokAllocationAgeUnit; /**< base unit for allocation-based aging system */
double tarokAllocationAgeExponentBase; /**< allocation-based aging system exponent base */
bool tarokUseProjectedSurvivalCollectionSet; /**< True if we should use a collection set based on the projected survival rate of regions*/
uintptr_t tarokWorkSplittingPeriod; /**< The number of objects which must be scanned between each check that the depth-first copy-forward implementation makes to see if it should push work out to other threads */
MM_UserSpecifiedParameterUDATA tarokMinimumGMPWorkTargetBytes; /**< Minimum used for GMP work targets. This avoids the low-scan-rate -> low GMP work target -> low scan-rate feedback loop. */
double tarokConcurrentMarkingCostWeight; /**< How much we weigh concurrentMarking into our GMP scan time cost calculations */
bool tarokAutomaticDefragmentEmptinessThreshold; /**< Whether we should use the automatically derived value for tarokDefragmentEmptinessThreshold or not */
bool tarokEnableCopyForwardHybrid; /**< Enable CopyForward Hybrid mode */
enum ReserveRegions {
RESERVE_REGIONS_NO = 0,
RESERVE_REGIONS_MOST_ALLOCATABLE,
RESERVE_REGIONS_MOST_FREE
};
ReserveRegions tarokReserveRegionsFromCollectionSet;
bool tarokEnableRecoverRegionTailsAfterSweep; /**< Enable recovering region tail during post sweep of GMP */
#if defined(OMR_GC_VLHGC_CONCURRENT_COPY_FORWARD)
bool _isConcurrentCopyForward;
#endif
enum TarokRegionTailCondidateListSortOrder {
SORT_ORDER_NOORDER = 0,
SORT_ORDER_ASCENDING,
SORT_ORDER_DESCENDING
};
TarokRegionTailCondidateListSortOrder tarokTailCandidateListSortOrder;
#endif /* defined (OMR_GC_VLHGC) */
/* OMR_GC_VLHGC (in for all -- see 82589) */
bool tarokEnableExpensiveAssertions; /**< True if the collector should perform extra consistency verifications which are known to be very expensive or poorly parallelized */
bool tarokEnableAllocationPointerAssertion;
/* OMR_GC_VLHGC (in for all) */
MM_SweepPoolManagerAddressOrderedList* sweepPoolManagerAddressOrderedList; /**< Pointer to Sweep Pool Manager for MPAOL, used for LOA and nursery */
MM_SweepPoolManagerAddressOrderedListBase* sweepPoolManagerSmallObjectArea; /**< Pointer to Sweep Pool Manager for MPSAOL or Hybrid, used for SOA */
MM_SweepPoolManager* sweepPoolManagerBumpPointer; /**< Pointer to Sweep Pool Manager for MemoryPoolBumpPointer */
uint64_t _mainThreadCpuTimeNanos; /**< Total CPU time used by all main threads */
bool alwaysCallWriteBarrier; /**< was -Xgc:alwayscallwritebarrier specified? */
bool alwaysCallReadBarrier; /**< was -Xgc:alwaysCallReadBarrier specified? */
bool _holdRandomThreadBeforeHandlingWorkUnit; /**< Whether we should randomly hold up a thread entering MM_ParallelTask::handleNextWorkUnit() */
uintptr_t _holdRandomThreadBeforeHandlingWorkUnitPeriod; /**< How often (in terms of number of times MM_ParallelTask::handleNextWorkUnit() is called) to randomly hold up a thread entering MM_ParallelTask::handleNextWorkUnit() */
bool _forceRandomBackoutsAfterScan; /**< Whether we should force MM_Scavenger::completeScan() to randomly fail due to backout */
uintptr_t _forceRandomBackoutsAfterScanPeriod; /**< How often (in terms of number of times MM_Scavenger::completeScan() is called) to randomly have MM_Scavenger::completeScan() fail due to backout */
MM_ReferenceChainWalkerMarkMap* referenceChainWalkerMarkMap; /**< Reference to Reference Chain Walker mark map - will be created at first call and destroyed in Configuration tearDown*/
bool trackMutatorThreadCategory; /**< Whether we should switch thread categories for mutators doing GC work */
uintptr_t darkMatterSampleRate;/**< the weight of darkMatterSample for standard gc, default:32, if the weight = 0, disable darkMatterSampling */
bool pretouchHeapOnExpand; /**< True to pretouch memory during initial heap inflation or heap expansion */
#if defined(OMR_GC_IDLE_HEAP_MANAGER)
uintptr_t idleMinimumFree; /**< percentage of free heap to be retained as committed, default=0 for gencon, complete tenture free memory will be decommitted */
bool gcOnIdle; /**< Enables releasing free heap pages if true while systemGarbageCollect invoked with IDLE GC code, default is false */
bool compactOnIdle; /**< Forces compaction if global GC executed while VM Runtime State set to IDLE, default is false */
float gcOnIdleCompactThreshold; /**< Enables compaction when fragmented memory and dark matter exceed this limit. The larger this number, the more memory can be fragmented before compact is triggered **/
#endif
#if defined(OMR_VALGRIND_MEMCHECK)
uintptr_t valgrindMempoolAddr; /**< Memory pool's address for valgrind **/
J9HashTable *memcheckHashTable; /**< Hash table to store object addresses for valgrind> **/
MUTEX memcheckHashTableMutex;
#endif /* defined(OMR_VALGRIND_MEMCHECK) */
bool shouldForceLowMemoryHeapCeilingShiftIfPossible; /**< Whether we should force compressed reference shift to 3 **/
/* Function Members */
private:
/**
* Validate default page parameters
* Search passed pair (page size,page flags) in the arrays provided by Port Library (known by OS)
* Note: The only OS supported flags is ZOS however all other platforms have flags array filled by OMRPORT_VMEM_PAGE_FLAG_NOT_USED values
* So an example of pair for ZOS: [1M, OMRPORT_VMEM_PAGE_FLAG_PAGEABLE]
* An example For AIX: [64K, OMRPORT_VMEM_PAGE_FLAG_NOT_USED]
*
* @param[in] vm pointer to Java VM object
* @param[in] pageSize page size to search
* @param[in] pageFlags page flags to search
* @param[in] pageSizesArray Page Sizes array (zero terminated)
* @param[in] pageFlagsArray Page Flags array (same size as a pageSizesArray array)
* @return true if requested pair is discovered
*/
static bool validateDefaultPageParameters(uintptr_t pageSize, uintptr_t pageFlags, uintptr_t *pageSizesArray, uintptr_t *pageFlagsArray);
protected:
virtual bool initialize(MM_EnvironmentBase* env);
virtual void tearDown(MM_EnvironmentBase* env);
virtual void computeDefaultMaxHeap(MM_EnvironmentBase* env);
public:
static MM_GCExtensionsBase* newInstance(MM_EnvironmentBase* env);
virtual void kill(MM_EnvironmentBase* env);
/**
* Gets a pointer to the base extensions.
* @return Pointer to the base extensions.
*/
MMINLINE static MM_GCExtensionsBase* getExtensions(OMR_VM* omrVM) { return (MM_GCExtensionsBase*)omrVM->_gcOmrVMExtensions; }
MMINLINE OMR_VM* getOmrVM() { return _omrVM; }
MMINLINE void setOmrVM(OMR_VM* omrVM) { _omrVM = omrVM; }
/**
* Gets a pointer to the memory forge
* @return Pointer to the memory forge
*/
MMINLINE OMR::GC::Forge* getForge() { return &_forge; }
/**
* Return back true if object references are compressed
* @return true, if object references are compressed
*/
MMINLINE bool compressObjectReferences() {
#if defined(OMR_GC_COMPRESSED_POINTERS)
#if defined(OMR_GC_FULL_POINTERS)
#if defined(OMR_OVERRIDE_COMPRESS_OBJECT_REFERENCES)
return (bool)OMR_OVERRIDE_COMPRESS_OBJECT_REFERENCES;
#else /* defined(OMR_OVERRIDE_COMPRESS_OBJECT_REFERENCES) */
return _compressObjectReferences;
#endif /* defined(OMR_OVERRIDE_COMPRESS_OBJECT_REFERENCES) */
#else /* defined(OMR_GC_FULL_POINTERS) */
return true;
#endif /* defined(OMR_GC_FULL_POINTERS) */
#else /* defined(OMR_GC_COMPRESSED_POINTERS) */
return false;
#endif /* defined(OMR_GC_COMPRESSED_POINTERS) */
}
MMINLINE uintptr_t getRememberedCount()
{
if (isStandardGC()) {
#if defined(OMR_GC_MODRON_SCAVENGER)
return static_cast<MM_SublistPool>(rememberedSet).countElements();
#else
return 0;
#endif /* OMR_GC_MODRON_SCAVENGER */
} else {
return 0;
}
}
MMINLINE MM_GlobalCollector* getGlobalCollector() { return _globalCollector; }
MMINLINE void setGlobalCollector(MM_GlobalCollector* collector) { _globalCollector = collector; }
MMINLINE uintptr_t getLastGlobalGCFreeBytes(){ return lastGlobalGCFreeBytes; }
MMINLINE void setLastGlobalGCFreeBytes(uintptr_t globalGCFreeBytes){ lastGlobalGCFreeBytes = globalGCFreeBytes;}
#if defined(OMR_GC_OBJECT_MAP)
MMINLINE MM_ObjectMap *getObjectMap() { return _objectMap; }
MMINLINE void setObjectMap(MM_ObjectMap *objectMap) { _objectMap = objectMap; }
#endif /* defined(OMR_GC_OBJECT_MAP) */
MMINLINE bool
isConcurrentScavengerEnabled()
{
#if defined(OMR_GC_CONCURRENT_SCAVENGER)
return concurrentScavenger;
#else
return false;
#endif /* defined(OMR_GC_CONCURRENT_SCAVENGER) */
}
MMINLINE bool
isConcurrentScavengerHWSupported()
{
#if defined(OMR_GC_CONCURRENT_SCAVENGER)
return concurrentScavengerHWSupport;
#else
return false;
#endif /* defined(OMR_GC_CONCURRENT_SCAVENGER) */
}
MMINLINE bool
isSoftwareRangeCheckReadBarrierEnabled()
{
#if defined(OMR_GC_CONCURRENT_SCAVENGER)
return softwareRangeCheckReadBarrier;
#else
return false;
#endif /* defined(OMR_GC_CONCURRENT_SCAVENGER) */
}
bool isConcurrentScavengerInProgress();
MMINLINE bool
isScavengerEnabled()
{
#if defined(OMR_GC_MODRON_SCAVENGER)
return scavengerEnabled;
#else
return false;
#endif /* defined(OMR_GC_MODRON_SCAVENGER) */
}
MMINLINE bool
isConcurrentMarkEnabled()
{
#if defined(OMR_GC_MODRON_CONCURRENT_MARK)
return concurrentMark;
#else
return false;
#endif /* defined(OMR_GC_MODRON_CONCURRENT_MARK) */
}
MMINLINE bool
isConcurrentSweepEnabled()
{
#if defined(OMR_GC_CONCURRENT_SWEEP)
return concurrentSweep;
#else
return false;
#endif /* defined(OMR_GC_CONCURRENT_SWEEP) */
}
MMINLINE bool
isSegregatedHeap()
{
#if defined(OMR_GC_COMBINATION_SPEC)
return _isSegregatedHeap;
#elif defined(OMR_GC_SEGREGATED_HEAP)
return true;
#else
return false;
#endif
}
MMINLINE void
setSegregatedHeap(bool value)
{
#if defined(OMR_GC_COMBINATION_SPEC)
_isSegregatedHeap = value;
#endif