/
interp_masm_arm.cpp
1815 lines (1402 loc) · 62.6 KB
/
interp_masm_arm.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "jvm.h"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.inline.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interp_masm_arm.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "logging/log.hpp"
#include "oops/arrayOop.hpp"
#include "oops/markWord.hpp"
#include "oops/method.hpp"
#include "oops/methodData.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/powerOfTwo.hpp"
//--------------------------------------------------------------------
// Implementation of InterpreterMacroAssembler
InterpreterMacroAssembler::InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {
}
void InterpreterMacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
#ifdef ASSERT
// Ensure that last_sp is not filled.
{ Label L;
ldr(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
cbz(Rtemp, L);
stop("InterpreterMacroAssembler::call_VM_helper: last_sp != NULL");
bind(L);
}
#endif // ASSERT
// Rbcp must be saved/restored since it may change due to GC.
save_bcp();
// super call
MacroAssembler::call_VM_helper(oop_result, entry_point, number_of_arguments, check_exceptions);
// Restore interpreter specific registers.
restore_bcp();
restore_method();
}
void InterpreterMacroAssembler::jump_to_entry(address entry) {
assert(entry, "Entry must have been generated by now");
b(entry);
}
void InterpreterMacroAssembler::check_and_handle_popframe() {
if (can_pop_frame()) {
Label L;
const Register popframe_cond = R2_tmp;
// Initiate popframe handling only if it is not already being processed. If the flag
// has the popframe_processing bit set, it means that this code is called *during* popframe
// handling - we don't want to reenter.
ldr_s32(popframe_cond, Address(Rthread, JavaThread::popframe_condition_offset()));
tbz(popframe_cond, exact_log2(JavaThread::popframe_pending_bit), L);
tbnz(popframe_cond, exact_log2(JavaThread::popframe_processing_bit), L);
// Call Interpreter::remove_activation_preserving_args_entry() to get the
// address of the same-named entrypoint in the generated interpreter code.
call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
// Call indirectly to avoid generation ordering problem.
jump(R0);
bind(L);
}
}
// Blows R2, Rtemp. Sets TOS cached value.
void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
const Register thread_state = R2_tmp;
ldr(thread_state, Address(Rthread, JavaThread::jvmti_thread_state_offset()));
const Address tos_addr(thread_state, JvmtiThreadState::earlyret_tos_offset());
const Address oop_addr(thread_state, JvmtiThreadState::earlyret_oop_offset());
const Address val_addr(thread_state, JvmtiThreadState::earlyret_value_offset());
const Address val_addr_hi(thread_state, JvmtiThreadState::earlyret_value_offset()
+ in_ByteSize(wordSize));
Register zero = zero_register(Rtemp);
switch (state) {
case atos: ldr(R0_tos, oop_addr);
str(zero, oop_addr);
interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
break;
case ltos: ldr(R1_tos_hi, val_addr_hi); // fall through
case btos: // fall through
case ztos: // fall through
case ctos: // fall through
case stos: // fall through
case itos: ldr_s32(R0_tos, val_addr); break;
#ifdef __SOFTFP__
case dtos: ldr(R1_tos_hi, val_addr_hi); // fall through
case ftos: ldr(R0_tos, val_addr); break;
#else
case ftos: ldr_float (S0_tos, val_addr); break;
case dtos: ldr_double(D0_tos, val_addr); break;
#endif // __SOFTFP__
case vtos: /* nothing to do */ break;
default : ShouldNotReachHere();
}
// Clean up tos value in the thread object
str(zero, val_addr);
str(zero, val_addr_hi);
mov(Rtemp, (int) ilgl);
str_32(Rtemp, tos_addr);
}
// Blows R2, Rtemp.
void InterpreterMacroAssembler::check_and_handle_earlyret() {
if (can_force_early_return()) {
Label L;
const Register thread_state = R2_tmp;
ldr(thread_state, Address(Rthread, JavaThread::jvmti_thread_state_offset()));
cbz(thread_state, L); // if (thread->jvmti_thread_state() == NULL) exit;
// Initiate earlyret handling only if it is not already being processed.
// If the flag has the earlyret_processing bit set, it means that this code
// is called *during* earlyret handling - we don't want to reenter.
ldr_s32(Rtemp, Address(thread_state, JvmtiThreadState::earlyret_state_offset()));
cmp(Rtemp, JvmtiThreadState::earlyret_pending);
b(L, ne);
// Call Interpreter::remove_activation_early_entry() to get the address of the
// same-named entrypoint in the generated interpreter code.
ldr_s32(R0, Address(thread_state, JvmtiThreadState::earlyret_tos_offset()));
call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), R0);
jump(R0);
bind(L);
}
}
// Sets reg. Blows Rtemp.
void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) {
assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");
assert(reg != Rtemp, "should be different registers");
ldrb(Rtemp, Address(Rbcp, bcp_offset));
ldrb(reg, Address(Rbcp, bcp_offset+1));
orr(reg, reg, AsmOperand(Rtemp, lsl, BitsPerByte));
}
void InterpreterMacroAssembler::get_index_at_bcp(Register index, int bcp_offset, Register tmp_reg, size_t index_size) {
assert_different_registers(index, tmp_reg);
if (index_size == sizeof(u2)) {
// load bytes of index separately to avoid unaligned access
ldrb(index, Address(Rbcp, bcp_offset+1));
ldrb(tmp_reg, Address(Rbcp, bcp_offset));
orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte));
} else if (index_size == sizeof(u4)) {
ldrb(index, Address(Rbcp, bcp_offset+3));
ldrb(tmp_reg, Address(Rbcp, bcp_offset+2));
orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte));
ldrb(tmp_reg, Address(Rbcp, bcp_offset+1));
orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte));
ldrb(tmp_reg, Address(Rbcp, bcp_offset));
orr(index, tmp_reg, AsmOperand(index, lsl, BitsPerByte));
// Check if the secondary index definition is still ~x, otherwise
// we have to change the following assembler code to calculate the
// plain index.
assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
mvn_32(index, index); // convert to plain index
} else if (index_size == sizeof(u1)) {
ldrb(index, Address(Rbcp, bcp_offset));
} else {
ShouldNotReachHere();
}
}
// Sets cache, index.
void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset, size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
assert_different_registers(cache, index);
get_index_at_bcp(index, bcp_offset, cache, index_size);
// load constant pool cache pointer
ldr(cache, Address(FP, frame::interpreter_frame_cache_offset * wordSize));
// convert from field index to ConstantPoolCacheEntry index
assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
logical_shift_left(index, index, 2);
}
// Sets cache, index, bytecode.
void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, Register index, Register bytecode, int byte_no, int bcp_offset, size_t index_size) {
get_cache_and_index_at_bcp(cache, index, bcp_offset, index_size);
// caution index and bytecode can be the same
add(bytecode, cache, AsmOperand(index, lsl, LogBytesPerWord));
ldrb(bytecode, Address(bytecode, (1 + byte_no) + in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset())));
TemplateTable::volatile_barrier(MacroAssembler::LoadLoad, noreg, true);
}
// Sets cache. Blows reg_tmp.
void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register reg_tmp, int bcp_offset, size_t index_size) {
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
assert_different_registers(cache, reg_tmp);
get_index_at_bcp(reg_tmp, bcp_offset, cache, index_size);
// load constant pool cache pointer
ldr(cache, Address(FP, frame::interpreter_frame_cache_offset * wordSize));
// skip past the header
add(cache, cache, in_bytes(ConstantPoolCache::base_offset()));
// convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset
assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
add(cache, cache, AsmOperand(reg_tmp, lsl, 2 + LogBytesPerWord));
}
// Load object from cpool->resolved_references(index)
void InterpreterMacroAssembler::load_resolved_reference_at_index(
Register result, Register index) {
assert_different_registers(result, index);
get_constant_pool(result);
Register cache = result;
// load pointer for resolved_references[] objArray
ldr(cache, Address(result, ConstantPool::cache_offset_in_bytes()));
ldr(cache, Address(result, ConstantPoolCache::resolved_references_offset_in_bytes()));
resolve_oop_handle(cache);
// Add in the index
// convert from field index to resolved_references() index and from
// word index to byte offset. Since this is a java object, it can be compressed
logical_shift_left(index, index, LogBytesPerHeapOop);
add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
load_heap_oop(result, Address(cache, index));
}
void InterpreterMacroAssembler::load_resolved_klass_at_offset(
Register Rcpool, Register Rindex, Register Rklass) {
add(Rtemp, Rcpool, AsmOperand(Rindex, lsl, LogBytesPerWord));
ldrh(Rtemp, Address(Rtemp, sizeof(ConstantPool))); // Rtemp = resolved_klass_index
ldr(Rklass, Address(Rcpool, ConstantPool::resolved_klasses_offset_in_bytes())); // Rklass = cpool->_resolved_klasses
add(Rklass, Rklass, AsmOperand(Rtemp, lsl, LogBytesPerWord));
ldr(Rklass, Address(Rklass, Array<Klass*>::base_offset_in_bytes()));
}
// Generate a subtype check: branch to not_subtype if sub_klass is
// not a subtype of super_klass.
// Profiling code for the subtype check failure (profile_typecheck_failed)
// should be explicitly generated by the caller in the not_subtype case.
// Blows Rtemp, tmp1, tmp2.
void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
Register Rsuper_klass,
Label ¬_subtype,
Register tmp1,
Register tmp2) {
assert_different_registers(Rsub_klass, Rsuper_klass, tmp1, tmp2, Rtemp);
Label ok_is_subtype, loop, update_cache;
const Register super_check_offset = tmp1;
const Register cached_super = tmp2;
// Profile the not-null value's klass.
profile_typecheck(tmp1, Rsub_klass);
// Load the super-klass's check offset into
ldr_u32(super_check_offset, Address(Rsuper_klass, Klass::super_check_offset_offset()));
// Check for self
cmp(Rsub_klass, Rsuper_klass);
// Load from the sub-klass's super-class display list, or a 1-word cache of
// the secondary superclass list, or a failing value with a sentinel offset
// if the super-klass is an interface or exceptionally deep in the Java
// hierarchy and we have to scan the secondary superclass list the hard way.
// See if we get an immediate positive hit
ldr(cached_super, Address(Rsub_klass, super_check_offset));
cond_cmp(Rsuper_klass, cached_super, ne);
b(ok_is_subtype, eq);
// Check for immediate negative hit
cmp(super_check_offset, in_bytes(Klass::secondary_super_cache_offset()));
b(not_subtype, ne);
// Now do a linear scan of the secondary super-klass chain.
const Register supers_arr = tmp1;
const Register supers_cnt = tmp2;
const Register cur_super = Rtemp;
// Load objArrayOop of secondary supers.
ldr(supers_arr, Address(Rsub_klass, Klass::secondary_supers_offset()));
ldr_u32(supers_cnt, Address(supers_arr, Array<Klass*>::length_offset_in_bytes())); // Load the array length
cmp(supers_cnt, 0);
// Skip to the start of array elements and prefetch the first super-klass.
ldr(cur_super, Address(supers_arr, Array<Klass*>::base_offset_in_bytes(), pre_indexed), ne);
b(not_subtype, eq);
bind(loop);
cmp(cur_super, Rsuper_klass);
b(update_cache, eq);
subs(supers_cnt, supers_cnt, 1);
ldr(cur_super, Address(supers_arr, wordSize, pre_indexed), ne);
b(loop, ne);
b(not_subtype);
bind(update_cache);
// Must be equal but missed in cache. Update cache.
str(Rsuper_klass, Address(Rsub_klass, Klass::secondary_super_cache_offset()));
bind(ok_is_subtype);
}
//////////////////////////////////////////////////////////////////////////////////
// Java Expression Stack
void InterpreterMacroAssembler::pop_ptr(Register r) {
assert(r != Rstack_top, "unpredictable instruction");
ldr(r, Address(Rstack_top, wordSize, post_indexed));
}
void InterpreterMacroAssembler::pop_i(Register r) {
assert(r != Rstack_top, "unpredictable instruction");
ldr_s32(r, Address(Rstack_top, wordSize, post_indexed));
zap_high_non_significant_bits(r);
}
void InterpreterMacroAssembler::pop_l(Register lo, Register hi) {
assert_different_registers(lo, hi);
assert(lo < hi, "lo must be < hi");
pop(RegisterSet(lo) | RegisterSet(hi));
}
void InterpreterMacroAssembler::pop_f(FloatRegister fd) {
fpops(fd);
}
void InterpreterMacroAssembler::pop_d(FloatRegister fd) {
fpopd(fd);
}
// Transition vtos -> state. Blows R0, R1. Sets TOS cached value.
void InterpreterMacroAssembler::pop(TosState state) {
switch (state) {
case atos: pop_ptr(R0_tos); break;
case btos: // fall through
case ztos: // fall through
case ctos: // fall through
case stos: // fall through
case itos: pop_i(R0_tos); break;
case ltos: pop_l(R0_tos_lo, R1_tos_hi); break;
#ifdef __SOFTFP__
case ftos: pop_i(R0_tos); break;
case dtos: pop_l(R0_tos_lo, R1_tos_hi); break;
#else
case ftos: pop_f(S0_tos); break;
case dtos: pop_d(D0_tos); break;
#endif // __SOFTFP__
case vtos: /* nothing to do */ break;
default : ShouldNotReachHere();
}
interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
}
void InterpreterMacroAssembler::push_ptr(Register r) {
assert(r != Rstack_top, "unpredictable instruction");
str(r, Address(Rstack_top, -wordSize, pre_indexed));
check_stack_top_on_expansion();
}
void InterpreterMacroAssembler::push_i(Register r) {
assert(r != Rstack_top, "unpredictable instruction");
str_32(r, Address(Rstack_top, -wordSize, pre_indexed));
check_stack_top_on_expansion();
}
void InterpreterMacroAssembler::push_l(Register lo, Register hi) {
assert_different_registers(lo, hi);
assert(lo < hi, "lo must be < hi");
push(RegisterSet(lo) | RegisterSet(hi));
}
void InterpreterMacroAssembler::push_f() {
fpushs(S0_tos);
}
void InterpreterMacroAssembler::push_d() {
fpushd(D0_tos);
}
// Transition state -> vtos. Blows Rtemp.
void InterpreterMacroAssembler::push(TosState state) {
interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
switch (state) {
case atos: push_ptr(R0_tos); break;
case btos: // fall through
case ztos: // fall through
case ctos: // fall through
case stos: // fall through
case itos: push_i(R0_tos); break;
case ltos: push_l(R0_tos_lo, R1_tos_hi); break;
#ifdef __SOFTFP__
case ftos: push_i(R0_tos); break;
case dtos: push_l(R0_tos_lo, R1_tos_hi); break;
#else
case ftos: push_f(); break;
case dtos: push_d(); break;
#endif // __SOFTFP__
case vtos: /* nothing to do */ break;
default : ShouldNotReachHere();
}
}
// Converts return value in R0/R1 (interpreter calling conventions) to TOS cached value.
void InterpreterMacroAssembler::convert_retval_to_tos(TosState state) {
#if (!defined __SOFTFP__ && !defined __ABI_HARD__)
// According to interpreter calling conventions, result is returned in R0/R1,
// but templates expect ftos in S0, and dtos in D0.
if (state == ftos) {
fmsr(S0_tos, R0);
} else if (state == dtos) {
fmdrr(D0_tos, R0, R1);
}
#endif // !__SOFTFP__ && !__ABI_HARD__
}
// Converts TOS cached value to return value in R0/R1 (according to interpreter calling conventions).
void InterpreterMacroAssembler::convert_tos_to_retval(TosState state) {
#if (!defined __SOFTFP__ && !defined __ABI_HARD__)
// According to interpreter calling conventions, result is returned in R0/R1,
// so ftos (S0) and dtos (D0) are moved to R0/R1.
if (state == ftos) {
fmrs(R0, S0_tos);
} else if (state == dtos) {
fmrrd(R0, R1, D0_tos);
}
#endif // !__SOFTFP__ && !__ABI_HARD__
}
// Helpers for swap and dup
void InterpreterMacroAssembler::load_ptr(int n, Register val) {
ldr(val, Address(Rstack_top, Interpreter::expr_offset_in_bytes(n)));
}
void InterpreterMacroAssembler::store_ptr(int n, Register val) {
str(val, Address(Rstack_top, Interpreter::expr_offset_in_bytes(n)));
}
void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() {
// set sender sp
mov(Rsender_sp, SP);
// record last_sp
str(Rsender_sp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize));
}
// Jump to from_interpreted entry of a call unless single stepping is possible
// in this thread in which case we must call the i2i entry
void InterpreterMacroAssembler::jump_from_interpreted(Register method) {
assert_different_registers(method, Rtemp);
prepare_to_jump_from_interpreted();
if (can_post_interpreter_events()) {
// JVMTI events, such as single-stepping, are implemented partly by avoiding running
// compiled code in threads for which the event is enabled. Check here for
// interp_only_mode if these events CAN be enabled.
ldr_s32(Rtemp, Address(Rthread, JavaThread::interp_only_mode_offset()));
cmp(Rtemp, 0);
ldr(PC, Address(method, Method::interpreter_entry_offset()), ne);
}
indirect_jump(Address(method, Method::from_interpreted_offset()), Rtemp);
}
void InterpreterMacroAssembler::restore_dispatch() {
mov_slow(RdispatchTable, (address)Interpreter::dispatch_table(vtos));
}
// The following two routines provide a hook so that an implementation
// can schedule the dispatch in two parts.
void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {
// Nothing ARM-specific to be done here.
}
void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
dispatch_next(state, step);
}
void InterpreterMacroAssembler::dispatch_base(TosState state,
DispatchTableMode table_mode,
bool verifyoop, bool generate_poll) {
if (VerifyActivationFrameSize) {
Label L;
sub(Rtemp, FP, SP);
int min_frame_size = (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * wordSize;
cmp(Rtemp, min_frame_size);
b(L, ge);
stop("broken stack frame");
bind(L);
}
if (verifyoop) {
interp_verify_oop(R0_tos, state, __FILE__, __LINE__);
}
Label safepoint;
address* const safepoint_table = Interpreter::safept_table(state);
address* const table = Interpreter::dispatch_table(state);
bool needs_thread_local_poll = generate_poll && table != safepoint_table;
if (needs_thread_local_poll) {
NOT_PRODUCT(block_comment("Thread-local Safepoint poll"));
ldr(Rtemp, Address(Rthread, Thread::polling_word_offset()));
tbnz(Rtemp, exact_log2(SafepointMechanism::poll_bit()), safepoint);
}
if((state == itos) || (state == btos) || (state == ztos) || (state == ctos) || (state == stos)) {
zap_high_non_significant_bits(R0_tos);
}
#ifdef ASSERT
Label L;
mov_slow(Rtemp, (address)Interpreter::dispatch_table(vtos));
cmp(Rtemp, RdispatchTable);
b(L, eq);
stop("invalid RdispatchTable");
bind(L);
#endif
if (table_mode == DispatchDefault) {
if (state == vtos) {
indirect_jump(Address::indexed_ptr(RdispatchTable, R3_bytecode), Rtemp);
} else {
// on 32-bit ARM this method is faster than the one above.
sub(Rtemp, RdispatchTable, (Interpreter::distance_from_dispatch_table(vtos) -
Interpreter::distance_from_dispatch_table(state)) * wordSize);
indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
}
} else {
assert(table_mode == DispatchNormal, "invalid dispatch table mode");
address table = (address) Interpreter::normal_table(state);
mov_slow(Rtemp, table);
indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
}
if (needs_thread_local_poll) {
bind(safepoint);
lea(Rtemp, ExternalAddress((address)safepoint_table));
indirect_jump(Address::indexed_ptr(Rtemp, R3_bytecode), Rtemp);
}
nop(); // to avoid filling CPU pipeline with invalid instructions
nop();
}
void InterpreterMacroAssembler::dispatch_only(TosState state, bool generate_poll) {
dispatch_base(state, DispatchDefault, true, generate_poll);
}
void InterpreterMacroAssembler::dispatch_only_normal(TosState state) {
dispatch_base(state, DispatchNormal);
}
void InterpreterMacroAssembler::dispatch_only_noverify(TosState state) {
dispatch_base(state, DispatchNormal, false);
}
void InterpreterMacroAssembler::dispatch_next(TosState state, int step, bool generate_poll) {
// load next bytecode and advance Rbcp
ldrb(R3_bytecode, Address(Rbcp, step, pre_indexed));
dispatch_base(state, DispatchDefault, true, generate_poll);
}
void InterpreterMacroAssembler::narrow(Register result) {
// mask integer result to narrower return type.
const Register Rtmp = R2;
// get method type
ldr(Rtmp, Address(Rmethod, Method::const_offset()));
ldrb(Rtmp, Address(Rtmp, ConstMethod::result_type_offset()));
Label notBool, notByte, notChar, done;
cmp(Rtmp, T_INT);
b(done, eq);
cmp(Rtmp, T_BOOLEAN);
b(notBool, ne);
and_32(result, result, 1);
b(done);
bind(notBool);
cmp(Rtmp, T_BYTE);
b(notByte, ne);
sign_extend(result, result, 8);
b(done);
bind(notByte);
cmp(Rtmp, T_CHAR);
b(notChar, ne);
zero_extend(result, result, 16);
b(done);
bind(notChar);
// cmp(Rtmp, T_SHORT);
// b(done, ne);
sign_extend(result, result, 16);
// Nothing to do
bind(done);
}
// remove activation
//
// Unlock the receiver if this is a synchronized method.
// Unlock any Java monitors from syncronized blocks.
// Remove the activation from the stack.
//
// If there are locked Java monitors
// If throw_monitor_exception
// throws IllegalMonitorStateException
// Else if install_monitor_exception
// installs IllegalMonitorStateException
// Else
// no error processing
void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_addr,
bool throw_monitor_exception,
bool install_monitor_exception,
bool notify_jvmdi) {
Label unlock, unlocked, no_unlock;
// Note: Registers R0, R1, S0 and D0 (TOS cached value) may be in use for the result.
const Address do_not_unlock_if_synchronized(Rthread,
JavaThread::do_not_unlock_if_synchronized_offset());
const Register Rflag = R2;
const Register Raccess_flags = R3;
restore_method();
ldrb(Rflag, do_not_unlock_if_synchronized);
// get method access flags
ldr_u32(Raccess_flags, Address(Rmethod, Method::access_flags_offset()));
strb(zero_register(Rtemp), do_not_unlock_if_synchronized); // reset the flag
// check if method is synchronized
tbz(Raccess_flags, JVM_ACC_SYNCHRONIZED_BIT, unlocked);
// Don't unlock anything if the _do_not_unlock_if_synchronized flag is set.
cbnz(Rflag, no_unlock);
// unlock monitor
push(state); // save result
// BasicObjectLock will be first in list, since this is a synchronized method. However, need
// to check that the object has not been unlocked by an explicit monitorexit bytecode.
const Register Rmonitor = R0; // fixed in unlock_object()
const Register Robj = R2;
// address of first monitor
sub(Rmonitor, FP, - frame::interpreter_frame_monitor_block_bottom_offset * wordSize + (int)sizeof(BasicObjectLock));
ldr(Robj, Address(Rmonitor, BasicObjectLock::obj_offset_in_bytes()));
cbnz(Robj, unlock);
pop(state);
if (throw_monitor_exception) {
// Entry already unlocked, need to throw exception
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
should_not_reach_here();
} else {
// Monitor already unlocked during a stack unroll.
// If requested, install an illegal_monitor_state_exception.
// Continue with stack unrolling.
if (install_monitor_exception) {
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
}
b(unlocked);
}
// Exception case for the check that all monitors are unlocked.
const Register Rcur = R2;
Label restart_check_monitors_unlocked, exception_monitor_is_still_locked;
bind(exception_monitor_is_still_locked);
// Monitor entry is still locked, need to throw exception.
// Rcur: monitor entry.
if (throw_monitor_exception) {
// Throw exception
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
should_not_reach_here();
} else {
// Stack unrolling. Unlock object and install illegal_monitor_exception
// Unlock does not block, so don't have to worry about the frame
push(state);
mov(Rmonitor, Rcur);
unlock_object(Rmonitor);
if (install_monitor_exception) {
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
}
pop(state);
b(restart_check_monitors_unlocked);
}
bind(unlock);
unlock_object(Rmonitor);
pop(state);
// Check that for block-structured locking (i.e., that all locked objects has been unlocked)
bind(unlocked);
// Check that all monitors are unlocked
{
Label loop;
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
const Register Rbottom = R3;
const Register Rcur_obj = Rtemp;
bind(restart_check_monitors_unlocked);
ldr(Rcur, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize));
// points to current entry, starting with top-most entry
sub(Rbottom, FP, -frame::interpreter_frame_monitor_block_bottom_offset * wordSize);
// points to word before bottom of monitor block
cmp(Rcur, Rbottom); // check if there are no monitors
ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
// prefetch monitor's object
b(no_unlock, eq);
bind(loop);
// check if current entry is used
cbnz(Rcur_obj, exception_monitor_is_still_locked);
add(Rcur, Rcur, entry_size); // otherwise advance to next entry
cmp(Rcur, Rbottom); // check if bottom reached
ldr(Rcur_obj, Address(Rcur, BasicObjectLock::obj_offset_in_bytes()), ne);
// prefetch monitor's object
b(loop, ne); // if not at bottom then check this entry
}
bind(no_unlock);
// jvmti support
if (notify_jvmdi) {
notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
} else {
notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
}
// remove activation
mov(Rtemp, FP);
ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize));
if (ret_addr != LR) {
mov(ret_addr, LR);
}
}
// At certain points in the method invocation the monitor of
// synchronized methods hasn't been entered yet.
// To correctly handle exceptions at these points, we set the thread local
// variable _do_not_unlock_if_synchronized to true. The remove_activation will
// check this flag.
void InterpreterMacroAssembler::set_do_not_unlock_if_synchronized(bool flag, Register tmp) {
const Address do_not_unlock_if_synchronized(Rthread,
JavaThread::do_not_unlock_if_synchronized_offset());
if (flag) {
mov(tmp, 1);
strb(tmp, do_not_unlock_if_synchronized);
} else {
strb(zero_register(tmp), do_not_unlock_if_synchronized);
}
}
// Lock object
//
// Argument: R1 : Points to BasicObjectLock to be used for locking.
// Must be initialized with object to lock.
// Blows volatile registers R0-R3, Rtemp, LR. Calls VM.
void InterpreterMacroAssembler::lock_object(Register Rlock) {
assert(Rlock == R1, "the second argument");
if (UseHeavyMonitors) {
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock);
} else {
Label done;
const Register Robj = R2;
const Register Rmark = R3;
assert_different_registers(Robj, Rmark, Rlock, R0, Rtemp);
const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes();
Label already_locked, slow_case;
// Load object pointer
ldr(Robj, Address(Rlock, obj_offset));
if (DiagnoseSyncOnValueBasedClasses != 0) {
load_klass(R0, Robj);
ldr_u32(R0, Address(R0, Klass::access_flags_offset()));
tst(R0, JVM_ACC_IS_VALUE_BASED_CLASS);
b(slow_case, ne);
}
if (UseBiasedLocking) {
biased_locking_enter(Robj, Rmark/*scratched*/, R0, false, Rtemp, done, slow_case);
}
// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
// That would be acceptable as ether CAS or slow case path is taken in that case.
// Exception to that is if the object is locked by the calling thread, then the recursive test will pass (guaranteed as
// loads are satisfied from a store queue if performed on the same processor).
assert(oopDesc::mark_offset_in_bytes() == 0, "must be");
ldr(Rmark, Address(Robj, oopDesc::mark_offset_in_bytes()));
// Test if object is already locked
tst(Rmark, markWord::unlocked_value);
b(already_locked, eq);
// Save old object->mark() into BasicLock's displaced header
str(Rmark, Address(Rlock, mark_offset));
cas_for_lock_acquire(Rmark, Rlock, Robj, Rtemp, slow_case);
#ifndef PRODUCT
if (PrintBiasedLockingStatistics) {
cond_atomic_inc32(al, BiasedLocking::fast_path_entry_count_addr());
}
#endif //!PRODUCT
b(done);
// If we got here that means the object is locked by ether calling thread or another thread.
bind(already_locked);
// Handling of locked objects: recursive locks and slow case.
// Fast check for recursive lock.
//
// Can apply the optimization only if this is a stack lock
// allocated in this thread. For efficiency, we can focus on
// recently allocated stack locks (instead of reading the stack
// base and checking whether 'mark' points inside the current
// thread stack):
// 1) (mark & 3) == 0
// 2) SP <= mark < SP + os::pagesize()
//
// Warning: SP + os::pagesize can overflow the stack base. We must
// neither apply the optimization for an inflated lock allocated
// just above the thread stack (this is why condition 1 matters)
// nor apply the optimization if the stack lock is inside the stack
// of another thread. The latter is avoided even in case of overflow
// because we have guard pages at the end of all stacks. Hence, if
// we go over the stack base and hit the stack of another thread,
// this should not be in a writeable area that could contain a
// stack lock allocated by that thread. As a consequence, a stack
// lock less than page size away from SP is guaranteed to be
// owned by the current thread.
//
// Note: assuming SP is aligned, we can check the low bits of
// (mark-SP) instead of the low bits of mark. In that case,
// assuming page size is a power of 2, we can merge the two
// conditions into a single test:
// => ((mark - SP) & (3 - os::pagesize())) == 0
// (3 - os::pagesize()) cannot be encoded as an ARM immediate operand.
// Check independently the low bits and the distance to SP.
// -1- test low 2 bits
movs(R0, AsmOperand(Rmark, lsl, 30));
// -2- test (mark - SP) if the low two bits are 0
sub(R0, Rmark, SP, eq);
movs(R0, AsmOperand(R0, lsr, exact_log2(os::vm_page_size())), eq);
// If still 'eq' then recursive locking OK: store 0 into lock record
str(R0, Address(Rlock, mark_offset), eq);
#ifndef PRODUCT
if (PrintBiasedLockingStatistics) {
cond_atomic_inc32(eq, BiasedLocking::fast_path_entry_count_addr());
}
#endif // !PRODUCT
b(done, eq);
bind(slow_case);
// Call the runtime routine for slow case
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock);
bind(done);
}
}
// Unlocks an object. Used in monitorexit bytecode and remove_activation.
//
// Argument: R0: Points to BasicObjectLock structure for lock
// Throw an IllegalMonitorException if object is not locked by current thread
// Blows volatile registers R0-R3, Rtemp, LR. Calls VM.
void InterpreterMacroAssembler::unlock_object(Register Rlock) {
assert(Rlock == R0, "the first argument");
if (UseHeavyMonitors) {
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), Rlock);
} else {
Label done, slow_case;
const Register Robj = R2;
const Register Rmark = R3;
assert_different_registers(Robj, Rmark, Rlock, Rtemp);