/
codeCache.cpp
1902 lines (1687 loc) · 65.5 KB
/
codeCache.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "jvm_io.h"
#include "code/codeBlob.hpp"
#include "code/codeCache.hpp"
#include "code/codeHeapState.hpp"
#include "code/compiledIC.hpp"
#include "code/dependencies.hpp"
#include "code/dependencyContext.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "code/pcDesc.hpp"
#include "compiler/compilationPolicy.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/compilerDefinitions.inline.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "jfr/jfrEvents.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/iterator.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/method.inline.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "oops/verifyOopClosure.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
#include "runtime/init.hpp"
#include "runtime/java.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "runtime/vmThread.hpp"
#include "services/memoryService.hpp"
#include "utilities/align.hpp"
#include "utilities/vmError.hpp"
#include "utilities/xmlstream.hpp"
#ifdef COMPILER1
#include "c1/c1_Compilation.hpp"
#include "c1/c1_Compiler.hpp"
#endif
#ifdef COMPILER2
#include "opto/c2compiler.hpp"
#include "opto/compile.hpp"
#include "opto/node.hpp"
#endif
// Helper class for printing in CodeCache
class CodeBlob_sizes {
private:
int count;
int total_size;
int header_size;
int code_size;
int stub_size;
int relocation_size;
int scopes_oop_size;
int scopes_metadata_size;
int scopes_data_size;
int scopes_pcs_size;
public:
CodeBlob_sizes() {
count = 0;
total_size = 0;
header_size = 0;
code_size = 0;
stub_size = 0;
relocation_size = 0;
scopes_oop_size = 0;
scopes_metadata_size = 0;
scopes_data_size = 0;
scopes_pcs_size = 0;
}
int total() const { return total_size; }
bool is_empty() const { return count == 0; }
void print(const char* title) const {
if (is_empty()) {
tty->print_cr(" #%d %s = %dK",
count,
title,
total() / (int)K);
} else {
tty->print_cr(" #%d %s = %dK (hdr %dK %d%%, loc %dK %d%%, code %dK %d%%, stub %dK %d%%, [oops %dK %d%%, metadata %dK %d%%, data %dK %d%%, pcs %dK %d%%])",
count,
title,
total() / (int)K,
header_size / (int)K,
header_size * 100 / total_size,
relocation_size / (int)K,
relocation_size * 100 / total_size,
code_size / (int)K,
code_size * 100 / total_size,
stub_size / (int)K,
stub_size * 100 / total_size,
scopes_oop_size / (int)K,
scopes_oop_size * 100 / total_size,
scopes_metadata_size / (int)K,
scopes_metadata_size * 100 / total_size,
scopes_data_size / (int)K,
scopes_data_size * 100 / total_size,
scopes_pcs_size / (int)K,
scopes_pcs_size * 100 / total_size);
}
}
void add(CodeBlob* cb) {
count++;
total_size += cb->size();
header_size += cb->header_size();
relocation_size += cb->relocation_size();
if (cb->is_nmethod()) {
nmethod* nm = cb->as_nmethod_or_null();
code_size += nm->insts_size();
stub_size += nm->stub_size();
scopes_oop_size += nm->oops_size();
scopes_metadata_size += nm->metadata_size();
scopes_data_size += nm->scopes_data_size();
scopes_pcs_size += nm->scopes_pcs_size();
} else {
code_size += cb->code_size();
}
}
};
// Iterate over all CodeHeaps
#define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap)
#define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap)
#define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap)
// Iterate over all CodeBlobs (cb) on the given CodeHeap
#define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb))
address CodeCache::_low_bound = 0;
address CodeCache::_high_bound = 0;
int CodeCache::_number_of_nmethods_with_dependencies = 0;
ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL;
// Initialize arrays of CodeHeap subsets
GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode);
void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
// Prepare error message
const char* error = "Invalid code heap sizes";
err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)"
" + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K",
non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
if (total_size > cache_size) {
// Some code heap sizes were explicitly set: total_size must be <= cache_size
message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
vm_exit_during_initialization(error, message);
} else if (all_set && total_size != cache_size) {
// All code heap sizes were explicitly set: total_size must equal cache_size
message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K).", cache_size/K);
vm_exit_during_initialization(error, message);
}
}
void CodeCache::initialize_heaps() {
bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
size_t min_size = os::vm_page_size();
size_t cache_size = ReservedCodeCacheSize;
size_t non_nmethod_size = NonNMethodCodeHeapSize;
size_t profiled_size = ProfiledCodeHeapSize;
size_t non_profiled_size = NonProfiledCodeHeapSize;
// Check if total size set via command line flags exceeds the reserved size
check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size),
(profiled_set ? profiled_size : min_size),
(non_profiled_set ? non_profiled_size : min_size),
cache_size,
non_nmethod_set && profiled_set && non_profiled_set);
// Determine size of compiler buffers
size_t code_buffers_size = 0;
#ifdef COMPILER1
// C1 temporary code buffers (see Compiler::init_buffer_blob())
const int c1_count = CompilationPolicy::c1_count();
code_buffers_size += c1_count * Compiler::code_buffer_size();
#endif
#ifdef COMPILER2
// C2 scratch buffers (see Compile::init_scratch_buffer_blob())
const int c2_count = CompilationPolicy::c2_count();
// Initial size of constant table (this may be increased if a compiled method needs more space)
code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
#endif
// Increase default non_nmethod_size to account for compiler buffers
if (!non_nmethod_set) {
non_nmethod_size += code_buffers_size;
}
// Calculate default CodeHeap sizes if not set by user
if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
// Check if we have enough space for the non-nmethod code heap
if (cache_size > non_nmethod_size) {
// Use the default value for non_nmethod_size and one half of the
// remaining size for non-profiled and one half for profiled methods
size_t remaining_size = cache_size - non_nmethod_size;
profiled_size = remaining_size / 2;
non_profiled_size = remaining_size - profiled_size;
} else {
// Use all space for the non-nmethod heap and set other heaps to minimal size
non_nmethod_size = cache_size - 2 * min_size;
profiled_size = min_size;
non_profiled_size = min_size;
}
} else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {
// The user explicitly set some code heap sizes. Increase or decrease the (default)
// sizes of the other code heaps accordingly. First adapt non-profiled and profiled
// code heap sizes and then only change non-nmethod code heap size if still necessary.
intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);
if (non_profiled_set) {
if (!profiled_set) {
// Adapt size of profiled code heap
if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) {
// Not enough space available, set to minimum size
diff_size += profiled_size - min_size;
profiled_size = min_size;
} else {
profiled_size += diff_size;
diff_size = 0;
}
}
} else if (profiled_set) {
// Adapt size of non-profiled code heap
if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) {
// Not enough space available, set to minimum size
diff_size += non_profiled_size - min_size;
non_profiled_size = min_size;
} else {
non_profiled_size += diff_size;
diff_size = 0;
}
} else if (non_nmethod_set) {
// Distribute remaining size between profiled and non-profiled code heaps
diff_size = cache_size - non_nmethod_size;
profiled_size = diff_size / 2;
non_profiled_size = diff_size - profiled_size;
diff_size = 0;
}
if (diff_size != 0) {
// Use non-nmethod code heap for remaining space requirements
assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity");
non_nmethod_size += diff_size;
}
}
// We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
if (!heap_available(CodeBlobType::MethodProfiled)) {
non_profiled_size += profiled_size;
profiled_size = 0;
}
// We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
if (!heap_available(CodeBlobType::MethodNonProfiled)) {
non_nmethod_size += non_profiled_size;
non_profiled_size = 0;
}
// Make sure we have enough space for VM internal code
uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
if (non_nmethod_size < min_code_cache_size) {
vm_exit_during_initialization(err_msg(
"Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K",
non_nmethod_size/K, min_code_cache_size/K));
}
// Verify sizes and update flag values
assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod_size);
FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled_size);
FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled_size);
// If large page support is enabled, align code heaps according to large
// page size to make sure that code cache is covered by large pages.
const size_t alignment = MAX2(page_size(false, 8), (size_t) os::vm_allocation_granularity());
non_nmethod_size = align_up(non_nmethod_size, alignment);
profiled_size = align_down(profiled_size, alignment);
non_profiled_size = align_down(non_profiled_size, alignment);
// Reserve one continuous chunk of memory for CodeHeaps and split it into
// parts for the individual heaps. The memory layout looks like this:
// ---------- high -----------
// Non-profiled nmethods
// Non-nmethods
// Profiled nmethods
// ---------- low ------------
ReservedCodeSpace rs = reserve_heap_memory(cache_size);
ReservedSpace profiled_space = rs.first_part(profiled_size);
ReservedSpace rest = rs.last_part(profiled_size);
ReservedSpace non_method_space = rest.first_part(non_nmethod_size);
ReservedSpace non_profiled_space = rest.last_part(non_nmethod_size);
// Non-nmethods (stubs, adapters, ...)
add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
// Tier 2 and tier 3 (profiled) methods
add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
// Tier 1 and tier 4 (non-profiled) methods and native methods
add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
}
size_t CodeCache::page_size(bool aligned, size_t min_pages) {
if (os::can_execute_large_page_memory()) {
if (InitialCodeCacheSize < ReservedCodeCacheSize) {
// Make sure that the page size allows for an incremental commit of the reserved space
min_pages = MAX2(min_pages, (size_t)8);
}
return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) :
os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages);
} else {
return os::vm_page_size();
}
}
ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) {
// Align and reserve space for code cache
const size_t rs_ps = page_size();
const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity());
const size_t rs_size = align_up(size, rs_align);
ReservedCodeSpace rs(rs_size, rs_align, rs_ps);
if (!rs.is_reserved()) {
vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)",
rs_size/K));
}
// Initialize bounds
_low_bound = (address)rs.base();
_high_bound = _low_bound + rs.size();
return rs;
}
// Heaps available for allocation
bool CodeCache::heap_available(CodeBlobType code_blob_type) {
if (!SegmentedCodeCache) {
// No segmentation: use a single code heap
return (code_blob_type == CodeBlobType::All);
} else if (CompilerConfig::is_interpreter_only()) {
// Interpreter only: we don't need any method code heaps
return (code_blob_type == CodeBlobType::NonNMethod);
} else if (CompilerConfig::is_c1_profiling()) {
// Tiered compilation: use all code heaps
return (code_blob_type < CodeBlobType::All);
} else {
// No TieredCompilation: we only need the non-nmethod and non-profiled code heap
return (code_blob_type == CodeBlobType::NonNMethod) ||
(code_blob_type == CodeBlobType::MethodNonProfiled);
}
}
const char* CodeCache::get_code_heap_flag_name(CodeBlobType code_blob_type) {
switch(code_blob_type) {
case CodeBlobType::NonNMethod:
return "NonNMethodCodeHeapSize";
break;
case CodeBlobType::MethodNonProfiled:
return "NonProfiledCodeHeapSize";
break;
case CodeBlobType::MethodProfiled:
return "ProfiledCodeHeapSize";
break;
default:
ShouldNotReachHere();
return NULL;
}
}
int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) {
if (lhs->code_blob_type() == rhs->code_blob_type()) {
return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0);
} else {
return static_cast<int>(lhs->code_blob_type()) - static_cast<int>(rhs->code_blob_type());
}
}
void CodeCache::add_heap(CodeHeap* heap) {
assert(!Universe::is_fully_initialized(), "late heap addition?");
_heaps->insert_sorted<code_heap_compare>(heap);
CodeBlobType type = heap->code_blob_type();
if (code_blob_type_accepts_compiled(type)) {
_compiled_heaps->insert_sorted<code_heap_compare>(heap);
}
if (code_blob_type_accepts_nmethod(type)) {
_nmethod_heaps->insert_sorted<code_heap_compare>(heap);
}
if (code_blob_type_accepts_allocable(type)) {
_allocable_heaps->insert_sorted<code_heap_compare>(heap);
}
}
void CodeCache::add_heap(ReservedSpace rs, const char* name, CodeBlobType code_blob_type) {
// Check if heap is needed
if (!heap_available(code_blob_type)) {
return;
}
// Create CodeHeap
CodeHeap* heap = new CodeHeap(name, code_blob_type);
add_heap(heap);
// Reserve Space
size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size());
size_initial = align_up(size_initial, os::vm_page_size());
if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) {
vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)",
heap->name(), size_initial/K));
}
// Register the CodeHeap
MemoryService::add_code_heap_memory_pool(heap, name);
}
CodeHeap* CodeCache::get_code_heap_containing(void* start) {
FOR_ALL_HEAPS(heap) {
if ((*heap)->contains(start)) {
return *heap;
}
}
return NULL;
}
CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) {
assert(cb != NULL, "CodeBlob is null");
FOR_ALL_HEAPS(heap) {
if ((*heap)->contains_blob(cb)) {
return *heap;
}
}
ShouldNotReachHere();
return NULL;
}
CodeHeap* CodeCache::get_code_heap(CodeBlobType code_blob_type) {
FOR_ALL_HEAPS(heap) {
if ((*heap)->accepts(code_blob_type)) {
return *heap;
}
}
return NULL;
}
CodeBlob* CodeCache::first_blob(CodeHeap* heap) {
assert_locked_or_safepoint(CodeCache_lock);
assert(heap != NULL, "heap is null");
return (CodeBlob*)heap->first();
}
CodeBlob* CodeCache::first_blob(CodeBlobType code_blob_type) {
if (heap_available(code_blob_type)) {
return first_blob(get_code_heap(code_blob_type));
} else {
return NULL;
}
}
CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
assert_locked_or_safepoint(CodeCache_lock);
assert(heap != NULL, "heap is null");
return (CodeBlob*)heap->next(cb);
}
/**
* Do not seize the CodeCache lock here--if the caller has not
* already done so, we are going to lose bigtime, since the code
* cache will contain a garbage CodeBlob until the caller can
* run the constructor for the CodeBlob subclass he is busy
* instantiating.
*/
CodeBlob* CodeCache::allocate(int size, CodeBlobType code_blob_type, bool handle_alloc_failure, CodeBlobType orig_code_blob_type) {
assert_locked_or_safepoint(CodeCache_lock);
assert(size > 0, "Code cache allocation request must be > 0 but is %d", size);
if (size <= 0) {
return NULL;
}
CodeBlob* cb = NULL;
// Get CodeHeap for the given CodeBlobType
CodeHeap* heap = get_code_heap(code_blob_type);
assert(heap != NULL, "heap is null");
while (true) {
cb = (CodeBlob*)heap->allocate(size);
if (cb != NULL) break;
if (!heap->expand_by(CodeCacheExpansionSize)) {
// Save original type for error reporting
if (orig_code_blob_type == CodeBlobType::All) {
orig_code_blob_type = code_blob_type;
}
// Expansion failed
if (SegmentedCodeCache) {
// Fallback solution: Try to store code in another code heap.
// NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled)
CodeBlobType type = code_blob_type;
switch (type) {
case CodeBlobType::NonNMethod:
type = CodeBlobType::MethodNonProfiled;
break;
case CodeBlobType::MethodNonProfiled:
type = CodeBlobType::MethodProfiled;
break;
case CodeBlobType::MethodProfiled:
// Avoid loop if we already tried that code heap
if (type == orig_code_blob_type) {
type = CodeBlobType::MethodNonProfiled;
}
break;
default:
break;
}
if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) {
if (PrintCodeCacheExtension) {
tty->print_cr("Extension of %s failed. Trying to allocate in %s.",
heap->name(), get_code_heap(type)->name());
}
return allocate(size, type, handle_alloc_failure, orig_code_blob_type);
}
}
if (handle_alloc_failure) {
MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CompileBroker::handle_full_code_cache(orig_code_blob_type);
}
return NULL;
}
if (PrintCodeCacheExtension) {
ResourceMark rm;
if (_nmethod_heaps->length() >= 1) {
tty->print("%s", heap->name());
} else {
tty->print("CodeCache");
}
tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
(intptr_t)heap->low_boundary(), (intptr_t)heap->high(),
(address)heap->high() - (address)heap->low_boundary());
}
}
print_trace("allocation", cb, size);
return cb;
}
void CodeCache::free(CodeBlob* cb) {
assert_locked_or_safepoint(CodeCache_lock);
CodeHeap* heap = get_code_heap(cb);
print_trace("free", cb);
if (cb->is_nmethod()) {
heap->set_nmethod_count(heap->nmethod_count() - 1);
if (((nmethod *)cb)->has_dependencies()) {
_number_of_nmethods_with_dependencies--;
}
}
if (cb->is_adapter_blob()) {
heap->set_adapter_count(heap->adapter_count() - 1);
}
// Get heap for given CodeBlob and deallocate
get_code_heap(cb)->deallocate(cb);
assert(heap->blob_count() >= 0, "sanity check");
}
void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) {
assert_locked_or_safepoint(CodeCache_lock);
guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!");
print_trace("free_unused_tail", cb);
// We also have to account for the extra space (i.e. header) used by the CodeBlob
// which provides the memory (see BufferBlob::create() in codeBlob.cpp).
used += CodeBlob::align_code_offset(cb->header_size());
// Get heap for given CodeBlob and deallocate its unused tail
get_code_heap(cb)->deallocate_tail(cb, used);
// Adjust the sizes of the CodeBlob
cb->adjust_size(used);
}
void CodeCache::commit(CodeBlob* cb) {
// this is called by nmethod::nmethod, which must already own CodeCache_lock
assert_locked_or_safepoint(CodeCache_lock);
CodeHeap* heap = get_code_heap(cb);
if (cb->is_nmethod()) {
heap->set_nmethod_count(heap->nmethod_count() + 1);
if (((nmethod *)cb)->has_dependencies()) {
_number_of_nmethods_with_dependencies++;
}
}
if (cb->is_adapter_blob()) {
heap->set_adapter_count(heap->adapter_count() + 1);
}
// flush the hardware I-cache
ICache::invalidate_range(cb->content_begin(), cb->content_size());
}
bool CodeCache::contains(void *p) {
// S390 uses contains() in current_frame(), which is used before
// code cache initialization if NativeMemoryTracking=detail is set.
S390_ONLY(if (_heaps == NULL) return false;)
// It should be ok to call contains without holding a lock.
FOR_ALL_HEAPS(heap) {
if ((*heap)->contains(p)) {
return true;
}
}
return false;
}
bool CodeCache::contains(nmethod *nm) {
return contains((void *)nm);
}
// This method is safe to call without holding the CodeCache_lock. It only depends on the _segmap to contain
// valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
CodeBlob* CodeCache::find_blob(void* start) {
// NMT can walk the stack before code cache is created
if (_heaps != NULL) {
CodeHeap* heap = get_code_heap_containing(start);
if (heap != NULL) {
return heap->find_blob(start);
}
}
return NULL;
}
nmethod* CodeCache::find_nmethod(void* start) {
CodeBlob* cb = find_blob(start);
assert(cb->is_nmethod(), "did not find an nmethod");
return (nmethod*)cb;
}
void CodeCache::blobs_do(void f(CodeBlob* nm)) {
assert_locked_or_safepoint(CodeCache_lock);
FOR_ALL_HEAPS(heap) {
FOR_ALL_BLOBS(cb, *heap) {
f(cb);
}
}
}
void CodeCache::nmethods_do(void f(nmethod* nm)) {
assert_locked_or_safepoint(CodeCache_lock);
NMethodIterator iter(NMethodIterator::all_blobs);
while(iter.next()) {
f(iter.method());
}
}
void CodeCache::metadata_do(MetadataClosure* f) {
assert_locked_or_safepoint(CodeCache_lock);
NMethodIterator iter(NMethodIterator::all_blobs);
while(iter.next()) {
iter.method()->metadata_do(f);
}
}
int CodeCache::alignment_unit() {
return (int)_heaps->first()->alignment_unit();
}
int CodeCache::alignment_offset() {
return (int)_heaps->first()->alignment_offset();
}
// Calculate the number of GCs after which an nmethod is expected to have been
// used in order to not be classed as cold.
void CodeCache::update_cold_gc_count() {
if (!MethodFlushing || !UseCodeCacheFlushing || NmethodSweepActivity == 0) {
// No aging
return;
}
size_t last_used = _last_unloading_used;
double last_time = _last_unloading_time;
double time = os::elapsedTime();
size_t free = unallocated_capacity();
size_t max = max_capacity();
size_t used = max - free;
double gc_interval = time - last_time;
_unloading_threshold_gc_requested = false;
_last_unloading_time = time;
_last_unloading_used = used;
if (last_time == 0.0) {
// The first GC doesn't have enough information to make good
// decisions, so just keep everything afloat
log_info(codecache)("Unknown code cache pressure; don't age code");
return;
}
if (gc_interval <= 0.0 || last_used >= used) {
// Dodge corner cases where there is no pressure or negative pressure
// on the code cache. Just don't unload when this happens.
_cold_gc_count = INT_MAX;
log_info(codecache)("No code cache pressure; don't age code");
return;
}
double allocation_rate = (used - last_used) / gc_interval;
_unloading_allocation_rates.add(allocation_rate);
_unloading_gc_intervals.add(gc_interval);
size_t aggressive_sweeping_free_threshold = StartAggressiveSweepingAt / 100.0 * max;
if (free < aggressive_sweeping_free_threshold) {
// We are already in the red zone; be very aggressive to avoid disaster
// But not more aggressive than 2. This ensures that an nmethod must
// have been unused at least between two GCs to be considered cold still.
_cold_gc_count = 2;
log_info(codecache)("Code cache critically low; use aggressive aging");
return;
}
// The code cache has an expected time for cold nmethods to "time out"
// when they have not been used. The time for nmethods to time out
// depends on how long we expect we can keep allocating code until
// aggressive sweeping starts, based on sampled allocation rates.
double average_gc_interval = _unloading_gc_intervals.avg();
double average_allocation_rate = _unloading_allocation_rates.avg();
double time_to_aggressive = ((double)(free - aggressive_sweeping_free_threshold)) / average_allocation_rate;
double cold_timeout = time_to_aggressive / NmethodSweepActivity;
// Convert time to GC cycles, and crop at INT_MAX. The reason for
// that is that the _cold_gc_count will be added to an epoch number
// and that addition must not overflow, or we can crash the VM.
// But not more aggressive than 2. This ensures that an nmethod must
// have been unused at least between two GCs to be considered cold still.
_cold_gc_count = MAX2(MIN2((uint64_t)(cold_timeout / average_gc_interval), (uint64_t)INT_MAX), (uint64_t)2);
double used_ratio = double(used) / double(max);
double last_used_ratio = double(last_used) / double(max);
log_info(codecache)("Allocation rate: %.3f KB/s, time to aggressive unloading: %.3f s, cold timeout: %.3f s, cold gc count: " UINT64_FORMAT
", used: %.3f MB (%.3f%%), last used: %.3f MB (%.3f%%), gc interval: %.3f s",
average_allocation_rate / K, time_to_aggressive, cold_timeout, _cold_gc_count,
double(used) / M, used_ratio * 100.0, double(last_used) / M, last_used_ratio * 100.0, average_gc_interval);
}
uint64_t CodeCache::cold_gc_count() {
return _cold_gc_count;
}
void CodeCache::gc_on_allocation() {
if (!is_init_completed()) {
// Let's not heuristically trigger GCs before the JVM is ready for GCs, no matter what
return;
}
size_t free = unallocated_capacity();
size_t max = max_capacity();
size_t used = max - free;
double free_ratio = double(free) / double(max);
if (free_ratio <= StartAggressiveSweepingAt / 100.0) {
// In case the GC is concurrent, we make sure only one thread requests the GC.
if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
log_info(codecache)("Triggering aggressive GC due to having only %.3f%% free memory", free_ratio * 100.0);
Universe::heap()->collect(GCCause::_codecache_GC_aggressive);
}
return;
}
size_t last_used = _last_unloading_used;
if (last_used >= used) {
// No increase since last GC; no need to sweep yet
return;
}
size_t allocated_since_last = used - last_used;
double allocated_since_last_ratio = double(allocated_since_last) / double(max);
double threshold = SweeperThreshold / 100.0;
double used_ratio = double(used) / double(max);
double last_used_ratio = double(last_used) / double(max);
if (used_ratio > threshold) {
// After threshold is reached, scale it by free_ratio so that more aggressive
// GC is triggered as we approach code cache exhaustion
threshold *= free_ratio;
}
// If code cache has been allocated without any GC at all, let's make sure
// it is eventually invoked to avoid trouble.
if (allocated_since_last_ratio > threshold) {
// In case the GC is concurrent, we make sure only one thread requests the GC.
if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) {
log_info(codecache)("Triggering threshold (%.3f%%) GC due to allocating %.3f%% since last unloading (%.3f%% used -> %.3f%% used)",
threshold * 100.0, allocated_since_last_ratio * 100.0, last_used_ratio * 100.0, used_ratio * 100.0);
Universe::heap()->collect(GCCause::_codecache_GC_threshold);
}
}
}
// We initialize the _gc_epoch to 2, because previous_completed_gc_marking_cycle
// subtracts the value by 2, and the type is unsigned. We don't want underflow.
//
// Odd values mean that marking is in progress, and even values mean that no
// marking is currently active.
uint64_t CodeCache::_gc_epoch = 2;
// How many GCs after an nmethod has not been used, do we consider it cold?
uint64_t CodeCache::_cold_gc_count = INT_MAX;
double CodeCache::_last_unloading_time = 0.0;
size_t CodeCache::_last_unloading_used = 0;
volatile bool CodeCache::_unloading_threshold_gc_requested = false;
TruncatedSeq CodeCache::_unloading_gc_intervals(10 /* samples */);
TruncatedSeq CodeCache::_unloading_allocation_rates(10 /* samples */);
uint64_t CodeCache::gc_epoch() {
return _gc_epoch;
}
bool CodeCache::is_gc_marking_cycle_active() {
// Odd means that marking is active
return (_gc_epoch % 2) == 1;
}
uint64_t CodeCache::previous_completed_gc_marking_cycle() {
if (is_gc_marking_cycle_active()) {
return _gc_epoch - 2;
} else {
return _gc_epoch - 1;
}
}
void CodeCache::on_gc_marking_cycle_start() {
assert(!is_gc_marking_cycle_active(), "Previous marking cycle never ended");
++_gc_epoch;
}
void CodeCache::on_gc_marking_cycle_finish() {
assert(is_gc_marking_cycle_active(), "Marking cycle started before last one finished");
++_gc_epoch;
update_cold_gc_count();
}
void CodeCache::arm_all_nmethods() {
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm != NULL) {
bs_nm->arm_all_nmethods();
}
}
// Mark nmethods for unloading if they contain otherwise unreachable oops.
void CodeCache::do_unloading(bool unloading_occurred) {
assert_locked_or_safepoint(CodeCache_lock);
CompiledMethodIterator iter(CompiledMethodIterator::all_blobs);
while(iter.next()) {
iter.method()->do_unloading(unloading_occurred);
}
}
void CodeCache::blobs_do(CodeBlobClosure* f) {
assert_locked_or_safepoint(CodeCache_lock);
FOR_ALL_ALLOCABLE_HEAPS(heap) {
FOR_ALL_BLOBS(cb, *heap) {
f->do_code_blob(cb);
#ifdef ASSERT
if (cb->is_nmethod()) {
Universe::heap()->verify_nmethod((nmethod*)cb);
}
#endif //ASSERT
}
}
}
void CodeCache::verify_clean_inline_caches() {
#ifdef ASSERT
NMethodIterator iter(NMethodIterator::only_not_unloading);
while(iter.next()) {
nmethod* nm = iter.method();
nm->verify_clean_inline_caches();
nm->verify();
}
#endif
}
void CodeCache::verify_icholder_relocations() {
#ifdef ASSERT
// make sure that we aren't leaking icholders
int count = 0;
FOR_ALL_HEAPS(heap) {
FOR_ALL_BLOBS(cb, *heap) {
CompiledMethod *nm = cb->as_compiled_method_or_null();
if (nm != NULL) {
count += nm->verify_icholder_relocations();
}
}
}
assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
CompiledICHolder::live_count(), "must agree");
#endif
}
// Defer freeing of concurrently cleaned ExceptionCache entries until
// after a global handshake operation.
void CodeCache::release_exception_cache(ExceptionCache* entry) {
if (SafepointSynchronize::is_at_safepoint()) {
delete entry;
} else {
for (;;) {
ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list);
entry->set_purge_list_next(purge_list_head);
if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) {
break;
}
}
}
}
// Delete exception caches that have been concurrently unlinked,
// followed by a global handshake operation.
void CodeCache::purge_exception_caches() {
ExceptionCache* curr = _exception_cache_purge_list;
while (curr != NULL) {
ExceptionCache* next = curr->purge_list_next();
delete curr;
curr = next;
}
_exception_cache_purge_list = NULL;
}
// Register an is_unloading nmethod to be flushed after unlinking
void CodeCache::register_unlinked(nmethod* nm) {
assert(nm->unlinked_next() == NULL, "Only register for unloading once");
for (;;) {
// Only need acquire when reading the head, when the next
// pointer is walked, which it is not here.
nmethod* head = Atomic::load(&_unlinked_head);
nmethod* next = head != NULL ? head : nm; // Self looped means end of list
nm->set_unlinked_next(next);
if (Atomic::cmpxchg(&_unlinked_head, head, nm) == head) {
break;
}
}
}
// Flush all the nmethods the GC unlinked
void CodeCache::flush_unlinked_nmethods() {
nmethod* nm = _unlinked_head;
_unlinked_head = NULL;
size_t freed_memory = 0;
while (nm != NULL) {
nmethod* next = nm->unlinked_next();
freed_memory += nm->total_size();
nm->flush();
if (next == nm) {
// Self looped means end of list
break;
}
nm = next;
}
// Try to start the compiler again if we freed any memory
if (!CompileBroker::should_compile_new_jobs() && freed_memory != 0) {
CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
log_info(codecache)("Restarting compiler");
EventJITRestart event;
event.set_freedMemory(freed_memory);
event.set_codeCacheMaxCapacity(CodeCache::max_capacity());
event.commit();
}
}
uint8_t CodeCache::_unloading_cycle = 1;