forked from nodejs/node
-
Notifications
You must be signed in to change notification settings - Fork 5
/
macro-assembler-arm64.h
2116 lines (1869 loc) · 84.9 KB
/
macro-assembler-arm64.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
#error This header must be included via macro-assembler.h
#endif
#ifndef V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
#define V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
#include <vector>
#include "src/base/bits.h"
#include "src/codegen/arm64/assembler-arm64.h"
#include "src/codegen/bailout-reason.h"
#include "src/common/globals.h"
// Simulator specific helpers.
#if USE_SIMULATOR
#if DEBUG
#define ASM_LOCATION(message) __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
#define ASM_LOCATION_IN_ASSEMBLER(message) \
Debug("LOCATION: " message, __LINE__, NO_PARAM)
#else
#define ASM_LOCATION(message)
#define ASM_LOCATION_IN_ASSEMBLER(message)
#endif
#else
#define ASM_LOCATION(message)
#define ASM_LOCATION_IN_ASSEMBLER(message)
#endif
namespace v8 {
namespace internal {
#define LS_MACRO_LIST(V) \
V(Ldrb, Register&, rt, LDRB_w) \
V(Strb, Register&, rt, STRB_w) \
V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
V(Ldrh, Register&, rt, LDRH_w) \
V(Strh, Register&, rt, STRH_w) \
V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
V(Str, CPURegister&, rt, StoreOpFor(rt)) \
V(Ldrsw, Register&, rt, LDRSW_x)
#define LSPAIR_MACRO_LIST(V) \
V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \
V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
#define LDA_STL_MACRO_LIST(V) \
V(Ldarb, ldarb) \
V(Ldarh, ldarh) \
V(Ldar, ldar) \
V(Ldaxrb, ldaxrb) \
V(Ldaxrh, ldaxrh) \
V(Ldaxr, ldaxr) \
V(Stlrb, stlrb) \
V(Stlrh, stlrh) \
V(Stlr, stlr)
#define STLX_MACRO_LIST(V) \
V(Stlxrb, stlxrb) \
V(Stlxrh, stlxrh) \
V(Stlxr, stlxr)
// ----------------------------------------------------------------------------
// Static helper functions
// Generate a MemOperand for loading a field from an object.
inline MemOperand FieldMemOperand(Register object, int offset);
// ----------------------------------------------------------------------------
// MacroAssembler
enum BranchType {
// Copies of architectural conditions.
// The associated conditions can be used in place of those, the code will
// take care of reinterpreting them with the correct type.
integer_eq = eq,
integer_ne = ne,
integer_hs = hs,
integer_lo = lo,
integer_mi = mi,
integer_pl = pl,
integer_vs = vs,
integer_vc = vc,
integer_hi = hi,
integer_ls = ls,
integer_ge = ge,
integer_lt = lt,
integer_gt = gt,
integer_le = le,
integer_al = al,
integer_nv = nv,
// These two are *different* from the architectural codes al and nv.
// 'always' is used to generate unconditional branches.
// 'never' is used to not generate a branch (generally as the inverse
// branch type of 'always).
always,
never,
// cbz and cbnz
reg_zero,
reg_not_zero,
// tbz and tbnz
reg_bit_clear,
reg_bit_set,
// Aliases.
kBranchTypeFirstCondition = eq,
kBranchTypeLastCondition = nv,
kBranchTypeFirstUsingReg = reg_zero,
kBranchTypeFirstUsingBit = reg_bit_clear
};
inline BranchType InvertBranchType(BranchType type) {
if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
return static_cast<BranchType>(
NegateCondition(static_cast<Condition>(type)));
} else {
return static_cast<BranchType>(type ^ 1);
}
}
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
// The macro assembler supports moving automatically pre-shifted immediates for
// arithmetic and logical instructions, and then applying a post shift in the
// instruction to undo the modification, in order to reduce the code emitted for
// an operation. For example:
//
// Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1.
//
// This optimisation can be only partially applied when the stack pointer is an
// operand or destination, so this enumeration is used to control the shift.
enum PreShiftImmMode {
kNoShift, // Don't pre-shift.
kLimitShiftForSP, // Limit pre-shift for add/sub extend use.
kAnyShift // Allow any pre-shift.
};
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
#if DEBUG
void set_allow_macro_instructions(bool value) {
allow_macro_instructions_ = value;
}
bool allow_macro_instructions() const { return allow_macro_instructions_; }
#endif
// We should not use near calls or jumps for calls to external references,
// since the code spaces are not guaranteed to be close to each other.
bool CanUseNearCallOrJump(RelocInfo::Mode rmode) {
return rmode != RelocInfo::EXTERNAL_REFERENCE;
}
static bool IsNearCallOffset(int64_t offset);
// Activation support.
void EnterFrame(StackFrame::Type type);
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
// Out-of-line constant pool not implemented on arm64.
UNREACHABLE();
}
void LeaveFrame(StackFrame::Type type);
inline void InitializeRootRegister();
void Mov(const Register& rd, const Operand& operand,
DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
void Mov(const Register& rd, uint64_t imm);
void Mov(const VRegister& vd, int vd_index, const VRegister& vn,
int vn_index) {
DCHECK(allow_macro_instructions());
mov(vd, vd_index, vn, vn_index);
}
void Mov(const Register& rd, Smi smi);
void Mov(const VRegister& vd, const VRegister& vn, int index) {
DCHECK(allow_macro_instructions());
mov(vd, vn, index);
}
void Mov(const VRegister& vd, int vd_index, const Register& rn) {
DCHECK(allow_macro_instructions());
mov(vd, vd_index, rn);
}
void Mov(const Register& rd, const VRegister& vn, int vn_index) {
DCHECK(allow_macro_instructions());
mov(rd, vn, vn_index);
}
// This is required for compatibility with architecture independent code.
// Remove if not needed.
void Move(Register dst, Smi src);
// Move src0 to dst0 and src1 to dst1, handling possible overlaps.
void MovePair(Register dst0, Register src0, Register dst1, Register src1);
// Register swap. Note that the register operands should be distinct.
void Swap(Register lhs, Register rhs);
void Swap(VRegister lhs, VRegister rhs);
// NEON by element instructions.
#define NEON_BYELEMENT_MACRO_LIST(V) \
V(fmla, Fmla) \
V(fmls, Fmls) \
V(fmul, Fmul) \
V(fmulx, Fmulx) \
V(mul, Mul) \
V(mla, Mla) \
V(mls, Mls) \
V(sqdmulh, Sqdmulh) \
V(sqrdmulh, Sqrdmulh) \
V(sqdmull, Sqdmull) \
V(sqdmull2, Sqdmull2) \
V(sqdmlal, Sqdmlal) \
V(sqdmlal2, Sqdmlal2) \
V(sqdmlsl, Sqdmlsl) \
V(sqdmlsl2, Sqdmlsl2) \
V(smull, Smull) \
V(smull2, Smull2) \
V(smlal, Smlal) \
V(smlal2, Smlal2) \
V(smlsl, Smlsl) \
V(smlsl2, Smlsl2) \
V(umull, Umull) \
V(umull2, Umull2) \
V(umlal, Umlal) \
V(umlal2, Umlal2) \
V(umlsl, Umlsl) \
V(umlsl2, Umlsl2)
#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm, \
int vm_index) { \
DCHECK(allow_macro_instructions()); \
ASM(vd, vn, vm, vm_index); \
}
NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC
// NEON 2 vector register instructions.
#define NEON_2VREG_MACRO_LIST(V) \
V(abs, Abs) \
V(addp, Addp) \
V(addv, Addv) \
V(cls, Cls) \
V(clz, Clz) \
V(cnt, Cnt) \
V(faddp, Faddp) \
V(fcvtas, Fcvtas) \
V(fcvtau, Fcvtau) \
V(fcvtms, Fcvtms) \
V(fcvtmu, Fcvtmu) \
V(fcvtns, Fcvtns) \
V(fcvtnu, Fcvtnu) \
V(fcvtps, Fcvtps) \
V(fcvtpu, Fcvtpu) \
V(fmaxnmp, Fmaxnmp) \
V(fmaxnmv, Fmaxnmv) \
V(fmaxp, Fmaxp) \
V(fmaxv, Fmaxv) \
V(fminnmp, Fminnmp) \
V(fminnmv, Fminnmv) \
V(fminp, Fminp) \
V(fminv, Fminv) \
V(fneg, Fneg) \
V(frecpe, Frecpe) \
V(frecpx, Frecpx) \
V(frinta, Frinta) \
V(frinti, Frinti) \
V(frintm, Frintm) \
V(frintn, Frintn) \
V(frintp, Frintp) \
V(frintx, Frintx) \
V(frintz, Frintz) \
V(frsqrte, Frsqrte) \
V(fsqrt, Fsqrt) \
V(mov, Mov) \
V(mvn, Mvn) \
V(neg, Neg) \
V(not_, Not) \
V(rbit, Rbit) \
V(rev16, Rev16) \
V(rev32, Rev32) \
V(rev64, Rev64) \
V(sadalp, Sadalp) \
V(saddlp, Saddlp) \
V(saddlv, Saddlv) \
V(smaxv, Smaxv) \
V(sminv, Sminv) \
V(sqabs, Sqabs) \
V(sqneg, Sqneg) \
V(sqxtn2, Sqxtn2) \
V(sqxtn, Sqxtn) \
V(sqxtun2, Sqxtun2) \
V(sqxtun, Sqxtun) \
V(suqadd, Suqadd) \
V(sxtl2, Sxtl2) \
V(sxtl, Sxtl) \
V(uadalp, Uadalp) \
V(uaddlp, Uaddlp) \
V(uaddlv, Uaddlv) \
V(umaxv, Umaxv) \
V(uminv, Uminv) \
V(uqxtn2, Uqxtn2) \
V(uqxtn, Uqxtn) \
V(urecpe, Urecpe) \
V(ursqrte, Ursqrte) \
V(usqadd, Usqadd) \
V(uxtl2, Uxtl2) \
V(uxtl, Uxtl) \
V(xtn2, Xtn2) \
V(xtn, Xtn)
#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
void MASM(const VRegister& vd, const VRegister& vn) { \
DCHECK(allow_macro_instructions()); \
ASM(vd, vn); \
}
NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC
#undef NEON_2VREG_MACRO_LIST
// NEON 2 vector register with immediate instructions.
#define NEON_2VREG_FPIMM_MACRO_LIST(V) \
V(fcmeq, Fcmeq) \
V(fcmge, Fcmge) \
V(fcmgt, Fcmgt) \
V(fcmle, Fcmle) \
V(fcmlt, Fcmlt)
#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
void MASM(const VRegister& vd, const VRegister& vn, double imm) { \
DCHECK(allow_macro_instructions()); \
ASM(vd, vn, imm); \
}
NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC
// NEON 3 vector register instructions.
#define NEON_3VREG_MACRO_LIST(V) \
V(add, Add) \
V(addhn2, Addhn2) \
V(addhn, Addhn) \
V(addp, Addp) \
V(and_, And) \
V(bic, Bic) \
V(bif, Bif) \
V(bit, Bit) \
V(bsl, Bsl) \
V(cmeq, Cmeq) \
V(cmge, Cmge) \
V(cmgt, Cmgt) \
V(cmhi, Cmhi) \
V(cmhs, Cmhs) \
V(cmtst, Cmtst) \
V(eor, Eor) \
V(fabd, Fabd) \
V(facge, Facge) \
V(facgt, Facgt) \
V(faddp, Faddp) \
V(fcmeq, Fcmeq) \
V(fcmge, Fcmge) \
V(fcmgt, Fcmgt) \
V(fmaxnmp, Fmaxnmp) \
V(fmaxp, Fmaxp) \
V(fminnmp, Fminnmp) \
V(fminp, Fminp) \
V(fmla, Fmla) \
V(fmls, Fmls) \
V(fmulx, Fmulx) \
V(fnmul, Fnmul) \
V(frecps, Frecps) \
V(frsqrts, Frsqrts) \
V(mla, Mla) \
V(mls, Mls) \
V(mul, Mul) \
V(orn, Orn) \
V(orr, Orr) \
V(pmull2, Pmull2) \
V(pmull, Pmull) \
V(pmul, Pmul) \
V(raddhn2, Raddhn2) \
V(raddhn, Raddhn) \
V(rsubhn2, Rsubhn2) \
V(rsubhn, Rsubhn) \
V(sabal2, Sabal2) \
V(sabal, Sabal) \
V(saba, Saba) \
V(sabdl2, Sabdl2) \
V(sabdl, Sabdl) \
V(sabd, Sabd) \
V(saddl2, Saddl2) \
V(saddl, Saddl) \
V(saddw2, Saddw2) \
V(saddw, Saddw) \
V(shadd, Shadd) \
V(shsub, Shsub) \
V(smaxp, Smaxp) \
V(smax, Smax) \
V(sminp, Sminp) \
V(smin, Smin) \
V(smlal2, Smlal2) \
V(smlal, Smlal) \
V(smlsl2, Smlsl2) \
V(smlsl, Smlsl) \
V(smull2, Smull2) \
V(smull, Smull) \
V(sqadd, Sqadd) \
V(sqdmlal2, Sqdmlal2) \
V(sqdmlal, Sqdmlal) \
V(sqdmlsl2, Sqdmlsl2) \
V(sqdmlsl, Sqdmlsl) \
V(sqdmulh, Sqdmulh) \
V(sqdmull2, Sqdmull2) \
V(sqdmull, Sqdmull) \
V(sqrdmulh, Sqrdmulh) \
V(sqrshl, Sqrshl) \
V(sqshl, Sqshl) \
V(sqsub, Sqsub) \
V(srhadd, Srhadd) \
V(srshl, Srshl) \
V(sshl, Sshl) \
V(ssubl2, Ssubl2) \
V(ssubl, Ssubl) \
V(ssubw2, Ssubw2) \
V(ssubw, Ssubw) \
V(subhn2, Subhn2) \
V(subhn, Subhn) \
V(sub, Sub) \
V(trn1, Trn1) \
V(trn2, Trn2) \
V(uabal2, Uabal2) \
V(uabal, Uabal) \
V(uaba, Uaba) \
V(uabdl2, Uabdl2) \
V(uabdl, Uabdl) \
V(uabd, Uabd) \
V(uaddl2, Uaddl2) \
V(uaddl, Uaddl) \
V(uaddw2, Uaddw2) \
V(uaddw, Uaddw) \
V(uhadd, Uhadd) \
V(uhsub, Uhsub) \
V(umaxp, Umaxp) \
V(umax, Umax) \
V(uminp, Uminp) \
V(umin, Umin) \
V(umlal2, Umlal2) \
V(umlal, Umlal) \
V(umlsl2, Umlsl2) \
V(umlsl, Umlsl) \
V(umull2, Umull2) \
V(umull, Umull) \
V(uqadd, Uqadd) \
V(uqrshl, Uqrshl) \
V(uqshl, Uqshl) \
V(uqsub, Uqsub) \
V(urhadd, Urhadd) \
V(urshl, Urshl) \
V(ushl, Ushl) \
V(usubl2, Usubl2) \
V(usubl, Usubl) \
V(usubw2, Usubw2) \
V(usubw, Usubw) \
V(uzp1, Uzp1) \
V(uzp2, Uzp2) \
V(zip1, Zip1) \
V(zip2, Zip2)
#define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \
DCHECK(allow_macro_instructions()); \
ASM(vd, vn, vm); \
}
NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
#undef DEFINE_MACRO_ASM_FUNC
void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) {
DCHECK(allow_macro_instructions());
bic(vd, imm8, left_shift);
}
// This is required for compatibility in architecture independent code.
inline void jmp(Label* L);
void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
inline void B(Label* label);
inline void B(Condition cond, Label* label);
void B(Label* label, Condition cond);
void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
void Tbz(const Register& rt, unsigned bit_pos, Label* label);
void Cbnz(const Register& rt, Label* label);
void Cbz(const Register& rt, Label* label);
void Paciasp() {
DCHECK(allow_macro_instructions_);
paciasp();
}
void Autiasp() {
DCHECK(allow_macro_instructions_);
autiasp();
}
// The 1716 pac and aut instructions encourage people to use x16 and x17
// directly, perhaps without realising that this is forbidden. For example:
//
// UseScratchRegisterScope temps(&masm);
// Register temp = temps.AcquireX(); // temp will be x16
// __ Mov(x17, ptr);
// __ Mov(x16, modifier); // Will override temp!
// __ Pacia1716();
//
// To work around this issue, you must exclude x16 and x17 from the scratch
// register list. You may need to replace them with other registers:
//
// UseScratchRegisterScope temps(&masm);
// temps.Exclude(x16, x17);
// temps.Include(x10, x11);
// __ Mov(x17, ptr);
// __ Mov(x16, modifier);
// __ Pacia1716();
void Pacia1716() {
DCHECK(allow_macro_instructions_);
DCHECK(!TmpList()->IncludesAliasOf(x16));
DCHECK(!TmpList()->IncludesAliasOf(x17));
pacia1716();
}
void Autia1716() {
DCHECK(allow_macro_instructions_);
DCHECK(!TmpList()->IncludesAliasOf(x16));
DCHECK(!TmpList()->IncludesAliasOf(x17));
autia1716();
}
inline void Dmb(BarrierDomain domain, BarrierType type);
inline void Dsb(BarrierDomain domain, BarrierType type);
inline void Isb();
inline void Csdb();
// Removes current frame and its arguments from the stack preserving
// the arguments and a return address pushed to the stack for the next call.
// Both |callee_args_count| and |caller_args_count| do not include
// receiver. |callee_args_count| is not modified. |caller_args_count| is
// trashed.
void PrepareForTailCall(Register callee_args_count,
Register caller_args_count, Register scratch0,
Register scratch1);
inline void SmiUntag(Register dst, Register src);
inline void SmiUntag(Register dst, const MemOperand& src);
inline void SmiUntag(Register smi);
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cond, AbortReason reason);
// Like Assert(), but without condition.
// Use --debug_code to enable.
void AssertUnreachable(AbortReason reason);
void AssertSmi(Register object,
AbortReason reason = AbortReason::kOperandIsNotASmi);
// Like Assert(), but always enabled.
void Check(Condition cond, AbortReason reason);
inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
void Trap() override;
void DebugBreak() override;
// Print a message to stderr and abort execution.
void Abort(AbortReason reason);
// Like printf, but print at run-time from generated code.
//
// The caller must ensure that arguments for floating-point placeholders
// (such as %e, %f or %g) are VRegisters, and that arguments for integer
// placeholders are Registers.
//
// Format placeholders that refer to more than one argument, or to a specific
// argument, are not supported. This includes formats like "%1$d" or "%.*d".
//
// This function automatically preserves caller-saved registers so that
// calling code can use Printf at any point without having to worry about
// corruption. The preservation mechanism generates a lot of code. If this is
// a problem, preserve the important registers manually and then call
// PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
// implicitly preserved.
void Printf(const char* format, CPURegister arg0 = NoCPUReg,
CPURegister arg1 = NoCPUReg, CPURegister arg2 = NoCPUReg,
CPURegister arg3 = NoCPUReg);
// Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
//
// The return code from the system printf call will be returned in x0.
void PrintfNoPreserve(const char* format, const CPURegister& arg0 = NoCPUReg,
const CPURegister& arg1 = NoCPUReg,
const CPURegister& arg2 = NoCPUReg,
const CPURegister& arg3 = NoCPUReg);
// Remaining instructions are simple pass-through calls to the assembler.
inline void Asr(const Register& rd, const Register& rn, unsigned shift);
inline void Asr(const Register& rd, const Register& rn, const Register& rm);
// Try to move an immediate into the destination register in a single
// instruction. Returns true for success, and updates the contents of dst.
// Returns false, otherwise.
bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
inline void Bind(Label* label,
BranchTargetIdentifier id = BranchTargetIdentifier::kNone);
// Control-flow integrity:
// Define a function entrypoint.
inline void CodeEntry();
// Define an exception handler.
inline void ExceptionHandler();
// Define an exception handler and bind a label.
inline void BindExceptionHandler(Label* label);
// Control-flow integrity:
// Define a jump (BR) target.
inline void JumpTarget();
// Define a jump (BR) target and bind a label.
inline void BindJumpTarget(Label* label);
// Define a call (BLR) target. The target also allows tail calls (via BR)
// when the target is x16 or x17.
inline void CallTarget();
// Define a jump/call target.
inline void JumpOrCallTarget();
// Define a jump/call target and bind a label.
inline void BindJumpOrCallTarget(Label* label);
static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
CPURegList* TmpList() { return &tmp_list_; }
CPURegList* FPTmpList() { return &fptmp_list_; }
static CPURegList DefaultTmpList();
static CPURegList DefaultFPTmpList();
// Move macros.
inline void Mvn(const Register& rd, uint64_t imm);
void Mvn(const Register& rd, const Operand& operand);
static bool IsImmMovn(uint64_t imm, unsigned reg_size);
static bool IsImmMovz(uint64_t imm, unsigned reg_size);
void LogicalMacro(const Register& rd, const Register& rn,
const Operand& operand, LogicalOp op);
void AddSubMacro(const Register& rd, const Register& rn,
const Operand& operand, FlagsUpdate S, AddSubOp op);
inline void Orr(const Register& rd, const Register& rn,
const Operand& operand);
void Orr(const VRegister& vd, const int imm8, const int left_shift = 0) {
DCHECK(allow_macro_instructions());
orr(vd, imm8, left_shift);
}
inline void Orn(const Register& rd, const Register& rn,
const Operand& operand);
inline void Eor(const Register& rd, const Register& rn,
const Operand& operand);
inline void Eon(const Register& rd, const Register& rn,
const Operand& operand);
inline void And(const Register& rd, const Register& rn,
const Operand& operand);
inline void Ands(const Register& rd, const Register& rn,
const Operand& operand);
inline void Tst(const Register& rn, const Operand& operand);
inline void Bic(const Register& rd, const Register& rn,
const Operand& operand);
inline void Blr(const Register& xn);
inline void Cmp(const Register& rn, const Operand& operand);
inline void CmpTagged(const Register& rn, const Operand& operand);
inline void Subs(const Register& rd, const Register& rn,
const Operand& operand);
void Csel(const Register& rd, const Register& rn, const Operand& operand,
Condition cond);
// Emits a runtime assert that the stack pointer is aligned.
void AssertSpAligned();
// Copy slot_count stack slots from the stack offset specified by src to
// the stack offset specified by dst. The offsets and count are expressed in
// slot-sized units. Offset dst must be less than src, or the gap between
// them must be greater than or equal to slot_count, otherwise the result is
// unpredictable. The function may corrupt its register arguments. The
// registers must not alias each other.
void CopySlots(int dst, Register src, Register slot_count);
void CopySlots(Register dst, Register src, Register slot_count);
// Copy count double words from the address in register src to the address
// in register dst. There are three modes for this function:
// 1) Address dst must be less than src, or the gap between them must be
// greater than or equal to count double words, otherwise the result is
// unpredictable. This is the default mode.
// 2) Address src must be less than dst, or the gap between them must be
// greater than or equal to count double words, otherwise the result is
// undpredictable. In this mode, src and dst specify the last (highest)
// address of the regions to copy from and to.
// 3) The same as mode 1, but the words are copied in the reversed order.
// The case where src == dst is not supported.
// The function may corrupt its register arguments. The registers must not
// alias each other.
enum CopyDoubleWordsMode {
kDstLessThanSrc,
kSrcLessThanDst,
kDstLessThanSrcAndReverse
};
void CopyDoubleWords(Register dst, Register src, Register count,
CopyDoubleWordsMode mode = kDstLessThanSrc);
// Calculate the address of a double word-sized slot at slot_offset from the
// stack pointer, and write it to dst. Positive slot_offsets are at addresses
// greater than sp, with slot zero at sp.
void SlotAddress(Register dst, int slot_offset);
void SlotAddress(Register dst, Register slot_offset);
// Load a literal from the inline constant pool.
inline void Ldr(const CPURegister& rt, const Operand& imm);
// Claim or drop stack space.
//
// On Windows, Claim will write a value every 4k, as is required by the stack
// expansion mechanism.
//
// The stack pointer must be aligned to 16 bytes and the size claimed or
// dropped must be a multiple of 16 bytes.
//
// Note that unit_size must be specified in bytes. For variants which take a
// Register count, the unit size must be a power of two.
inline void Claim(int64_t count, uint64_t unit_size = kXRegSize);
inline void Claim(const Register& count, uint64_t unit_size = kXRegSize);
inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
inline void Drop(const Register& count, uint64_t unit_size = kXRegSize);
// Drop 'count' arguments from the stack, rounded up to a multiple of two,
// without actually accessing memory.
// We assume the size of the arguments is the pointer size.
// An optional mode argument is passed, which can indicate we need to
// explicitly add the receiver to the count.
enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
inline void DropArguments(const Register& count,
ArgumentsCountMode mode = kCountIncludesReceiver);
inline void DropArguments(int64_t count,
ArgumentsCountMode mode = kCountIncludesReceiver);
// Drop 'count' slots from stack, rounded up to a multiple of two, without
// actually accessing memory.
inline void DropSlots(int64_t count);
// Push a single argument, with padding, to the stack.
inline void PushArgument(const Register& arg);
// Add and sub macros.
inline void Add(const Register& rd, const Register& rn,
const Operand& operand);
inline void Adds(const Register& rd, const Register& rn,
const Operand& operand);
inline void Sub(const Register& rd, const Register& rn,
const Operand& operand);
// Abort execution if argument is not a positive or zero integer, enabled via
// --debug-code.
void AssertPositiveOrZero(Register value);
#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
inline void FN(const REGTYPE REG, const MemOperand& addr);
LS_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION
// Push or pop up to 4 registers of the same width to or from the stack.
//
// If an argument register is 'NoReg', all further arguments are also assumed
// to be 'NoReg', and are thus not pushed or popped.
//
// Arguments are ordered such that "Push(a, b);" is functionally equivalent
// to "Push(a); Push(b);".
//
// It is valid to push the same register more than once, and there is no
// restriction on the order in which registers are specified.
//
// It is not valid to pop into the same register more than once in one
// operation, not even into the zero register.
//
// The stack pointer must be aligned to 16 bytes on entry and the total size
// of the specified registers must also be a multiple of 16 bytes.
//
// Other than the registers passed into Pop, the stack pointer, (possibly)
// the system stack pointer and (possibly) the link register, these methods
// do not modify any other registers.
//
// Some of the methods take an optional LoadLRMode or StoreLRMode template
// argument, which specifies whether we need to sign the link register at the
// start of the operation, or authenticate it at the end of the operation,
// when control flow integrity measures are enabled.
// When the mode is kDontLoadLR or kDontStoreLR, LR must not be passed as an
// argument to the operation.
enum LoadLRMode { kAuthLR, kDontLoadLR };
enum StoreLRMode { kSignLR, kDontStoreLR };
template <StoreLRMode lr_mode = kDontStoreLR>
void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
void Push(const CPURegister& src0, const CPURegister& src1,
const CPURegister& src2, const CPURegister& src3,
const CPURegister& src4, const CPURegister& src5 = NoReg,
const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
template <LoadLRMode lr_mode = kDontLoadLR>
void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
void Pop(const CPURegister& dst0, const CPURegister& dst1,
const CPURegister& dst2, const CPURegister& dst3,
const CPURegister& dst4, const CPURegister& dst5 = NoReg,
const CPURegister& dst6 = NoReg, const CPURegister& dst7 = NoReg);
template <StoreLRMode lr_mode = kDontStoreLR>
void Push(const Register& src0, const VRegister& src1);
// This is a convenience method for pushing a single Handle<Object>.
inline void Push(Handle<HeapObject> object);
inline void Push(Smi smi);
// Aliases of Push and Pop, required for V8 compatibility.
inline void push(Register src) { Push(src); }
inline void pop(Register dst) { Pop(dst); }
void SaveRegisters(RegList registers);
void RestoreRegisters(RegList registers);
void CallRecordWriteStub(Register object, Operand offset,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode);
void CallRecordWriteStub(Register object, Operand offset,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode, Address wasm_target);
void CallEphemeronKeyBarrier(Register object, Operand offset,
SaveFPRegsMode fp_mode);
// For a given |object| and |offset|:
// - Move |object| to |dst_object|.
// - Compute the address of the slot pointed to by |offset| in |object| and
// write it to |dst_slot|.
// This method makes sure |object| and |offset| are allowed to overlap with
// the destination registers.
void MoveObjectAndSlot(Register dst_object, Register dst_slot,
Register object, Operand offset);
// Alternative forms of Push and Pop, taking a RegList or CPURegList that
// specifies the registers that are to be pushed or popped. Higher-numbered
// registers are associated with higher memory addresses (as in the A32 push
// and pop instructions).
//
// (Push|Pop)SizeRegList allow you to specify the register size as a
// parameter. Only kXRegSizeInBits, kWRegSizeInBits, kDRegSizeInBits and
// kSRegSizeInBits are supported.
//
// Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
//
// The methods take an optional LoadLRMode or StoreLRMode template argument.
// When control flow integrity measures are enabled and the link register is
// included in 'registers', passing kSignLR to PushCPURegList will sign the
// link register before pushing the list, and passing kAuthLR to
// PopCPURegList will authenticate it after popping the list.
template <StoreLRMode lr_mode = kDontStoreLR>
void PushCPURegList(CPURegList registers);
template <LoadLRMode lr_mode = kDontLoadLR>
void PopCPURegList(CPURegList registers);
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) const;
// Push caller saved registers on the stack, and return the number of bytes
// stack pointer is adjusted.
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);
// Restore caller saved registers from the stack, and return the number of
// bytes stack pointer is adjusted.
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);
// Move an immediate into register dst, and return an Operand object for use
// with a subsequent instruction that accepts a shift. The value moved into
// dst is not necessarily equal to imm; it may have had a shifting operation
// applied to it that will be subsequently undone by the shift applied in the
// Operand.
Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm,
PreShiftImmMode mode);
void CheckPageFlag(const Register& object, int mask, Condition cc,
Label* condition_met);
// Compare a register with an operand, and branch to label depending on the
// condition. May corrupt the status flags.
inline void CompareAndBranch(const Register& lhs, const Operand& rhs,
Condition cond, Label* label);
inline void CompareTaggedAndBranch(const Register& lhs, const Operand& rhs,
Condition cond, Label* label);
// Test the bits of register defined by bit_pattern, and branch if ANY of
// those bits are set. May corrupt the status flags.
inline void TestAndBranchIfAnySet(const Register& reg,
const uint64_t bit_pattern, Label* label);
// Test the bits of register defined by bit_pattern, and branch if ALL of
// those bits are clear (ie. not set.) May corrupt the status flags.
inline void TestAndBranchIfAllClear(const Register& reg,
const uint64_t bit_pattern, Label* label);
inline void Brk(int code);
inline void JumpIfSmi(Register value, Label* smi_label,
Label* not_smi_label = nullptr);
inline void JumpIfEqual(Register x, int32_t y, Label* dest);
inline void JumpIfLessThan(Register x, int32_t y, Label* dest);
inline void Fmov(VRegister fd, VRegister fn);
inline void Fmov(VRegister fd, Register rn);
// Provide explicit double and float interfaces for FP immediate moves, rather
// than relying on implicit C++ casts. This allows signalling NaNs to be
// preserved when the immediate matches the format of fd. Most systems convert
// signalling NaNs to quiet NaNs when converting between float and double.
inline void Fmov(VRegister fd, double imm);
inline void Fmov(VRegister fd, float imm);
// Provide a template to allow other types to be converted automatically.
template <typename T>
void Fmov(VRegister fd, T imm) {
DCHECK(allow_macro_instructions());
Fmov(fd, static_cast<double>(imm));
}
inline void Fmov(Register rd, VRegister fn);
void Movi(const VRegister& vd, uint64_t imm, Shift shift = LSL,
int shift_amount = 0);
void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
void LoadFromConstantsTable(Register destination,
int constant_index) override;
void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
void LoadRootRelative(Register destination, int32_t offset) override;
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Jump(const ExternalReference& reference) override;
void Call(Register target);
void Call(Address target, RelocInfo::Mode rmode);
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
void Call(ExternalReference target);
// Generate an indirect call (for when a direct call's range is not adequate).
void IndirectCall(Address target, RelocInfo::Mode rmode);
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
void CallBuiltin(int builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
void JumpCodeObject(Register code_object) override;
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
void CallForDeoptimization(Address target, int deopt_id, Label* exit,
DeoptimizeKind kind);
// Calls a C function.
// The called function is not allowed to trigger a
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_reg_arguments);
void CallCFunction(ExternalReference function, int num_reg_arguments,
int num_double_arguments);
void CallCFunction(Register function, int num_reg_arguments,
int num_double_arguments);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer.
void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,