-
Notifications
You must be signed in to change notification settings - Fork 1.4k
/
LowLevelInterpreter.asm
2959 lines (2542 loc) · 96.1 KB
/
LowLevelInterpreter.asm
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright (C) 2011-2024 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# Crash course on the language that this is written in (which I just call
# "assembly" even though it's more than that):
#
# - Mostly gas-style operand ordering. The last operand tends to be the
# destination. So "a := b" is written as "mov b, a". But unlike gas,
# comparisons are in-order, so "if (a < b)" is written as
# "bilt a, b, ...".
#
# - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "q" = 64-bit word,
# "f" = float, "d" = double, "p" = pointer. For 32-bit, "i" and "p" are
# interchangeable except when an op supports one but not the other.
#
# - In general, valid operands for macro invocations and instructions are
# registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses
# (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels
# (eg "_foo" or ".foo"). Macro invocations can also take anonymous
# macros as operands. Instructions cannot take anonymous macros.
#
# - Labels must have names that begin with either "_" or ".". A "." label
# is local and gets renamed before code gen to minimize namespace
# pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_"
# may or may not be removed during code gen depending on whether the asm
# conventions for C name mangling on the target platform mandate a "_"
# prefix.
#
# - A "macro" is a lambda expression, which may be either anonymous or
# named. But this has caveats. "macro" can take zero or more arguments,
# which may be macros or any valid operands, but it can only return
# code. But you can do Turing-complete things via continuation passing
# style: "macro foo (a, b) b(a, a) end foo(foo, foo)". Actually, don't do
# that, since you'll just crash the assembler.
#
# - An "if" is a conditional on settings. Any identifier supplied in the
# predicate of an "if" is assumed to be a #define that is available
# during code gen. So you can't use "if" for computation in a macro, but
# you can use it to select different pieces of code for different
# platforms.
#
# - Arguments to macros follow lexical scoping rather than dynamic scoping.
# Const's also follow lexical scoping and may override (hide) arguments
# or other consts. All variables (arguments and constants) can be bound
# to operands. Additionally, arguments (but not constants) can be bound
# to macros.
# The following general-purpose registers are available:
#
# - cfr and sp hold the call frame and (native) stack pointer respectively.
# They are callee-save registers, and guaranteed to be distinct from all other
# registers on all architectures.
#
# - lr is defined on non-X86 architectures (ARM64, ARM64E, ARMv7, and CLOOP)
# and holds the return PC
#
# - t0, t1, t2, t3, t4, t5, and optionally t6 and t7 are temporary registers that can get trashed on
# calls, and are pairwise distinct registers. t4 holds the JS program counter, so use
# with caution in opcodes (actually, don't use it in opcodes at all, except as PC).
#
# - r0 and r1 are the platform's customary return registers, and thus are
# two distinct registers
#
# - a0, a1, a2 and a3 are the platform's customary argument registers, and
# thus are pairwise distinct registers. Be mindful that:
#
# - The only registers guaranteed to be caller-saved are r0, r1, a0, a1 and a2, and
# you should be mindful of that in functions that are called directly from C.
# If you need more registers, you should push and pop them like a good
# assembly citizen, because any other register will be callee-saved on X86.
#
# You can additionally assume:
#
# - a3, t2, t3, t4 and t5 are never return registers; t0, t1, a0, a1 and a2
# can be return registers.
#
# - t3 can only be a3, t1 can only be a1; but t0 and t2 can be either a0 or a2.
#
# - There are callee-save registers named csr0, csr1, ... csrN.
# The last three csr registers are used used to store the PC base and
# two special tag values (on 64-bits only). Don't use them for anything else.
#
# Additional platform-specific details (you shouldn't rely on this remaining
# true):
#
# - For consistency with the baseline JIT, t0 is always r0 (and t1 is always
# r1 on 32 bits platforms). You should use the r version when you need return
# registers, and the t version otherwise: code using t0 (or t1) should still
# work if swapped with e.g. t3, while code using r0 (or r1) should not. There
# *may* be legacy code relying on this.
#
# - On all platforms, t0 can only be a0 and t2 can only be a2.
#
# - On all platforms other than X86_64, a2 is not a return register.
# a2 is r1 on X86_64 (because the ABI enforces it).
#
# The following floating-point registers are available:
#
# - ft0-ft5 are temporary floating-point registers that get trashed on calls,
# and are pairwise distinct.
#
# - fa0 and fa1 are the platform's customary floating-point argument
# registers, and are both distinct. On 64-bits platforms, fa2 and fa3 are
# additional floating-point argument registers.
#
# - fr is the platform's customary floating-point return register
#
# You can assume that ft1-ft5 or fa1-fa3 are never fr, and that ftX is never
# faY if X != Y.
# Do not put any code before this.
global _llintPCRangeStart
_llintPCRangeStart:
# This break instruction is needed so that the synthesized llintPCRangeStart# label
# doesn't point to the exact same location as vmEntryToJavaScript which comes after it.
# Otherwise, libunwind will report vmEntryToJavaScript as llintPCRangeStart in
# stack traces.
break
# Work-around for the fact that the toolchain's awareness of armv7k / armv7s
# results in a separate slab in the fat binary, yet the offlineasm doesn't know
# to expect it.
if ARMv7k
end
if ARMv7s
end
# First come the common protocols that both interpreters use. Note that each
# of these must have an ASSERT() in LLIntData.cpp
# These declarations must match interpreter/JSStack.h.
const PtrSize = constexpr (sizeof(void*))
const MachineRegisterSize = constexpr (sizeof(CPURegister))
const SlotSize = constexpr (sizeof(Register))
const SeenMultipleCalleeObjects = 1
if JSVALUE64
const CallFrameHeaderSlots = 5
else
const CallFrameHeaderSlots = 4
const CallFrameAlignSlots = 1
end
const JSLexicalEnvironment_variables = (sizeof JSLexicalEnvironment + SlotSize - 1) & ~(SlotSize - 1)
const DirectArguments_storage = (sizeof DirectArguments + SlotSize - 1) & ~(SlotSize - 1)
const JSInternalFieldObjectImpl_internalFields = JSInternalFieldObjectImpl::m_internalFields
const StackAlignment = constexpr (stackAlignmentBytes())
const StackAlignmentSlots = constexpr (stackAlignmentRegisters())
const StackAlignmentMask = StackAlignment - 1
const CallerFrameAndPCSize = constexpr (sizeof(CallerFrameAndPC))
const PrologueStackPointerDelta = constexpr (prologueStackPointerDelta())
const CallerFrame = 0
const ReturnPC = CallerFrame + MachineRegisterSize
const CodeBlock = ReturnPC + MachineRegisterSize
const Callee = CodeBlock + SlotSize
const ArgumentCountIncludingThis = Callee + SlotSize
const ThisArgumentOffset = ArgumentCountIncludingThis + SlotSize
const FirstArgumentOffset = ThisArgumentOffset + SlotSize
const CallFrameHeaderSize = ThisArgumentOffset
const MetadataOffsetTable16Offset = 0
const MetadataOffsetTable32Offset = constexpr UnlinkedMetadataTable::s_offset16TableSize
const NumberOfJSOpcodeIDs = constexpr numOpcodeIDs
# Some value representation constants.
if JSVALUE64
const TagOther = constexpr JSValue::OtherTag
const TagBool = constexpr JSValue::BoolTag
const TagUndefined = constexpr JSValue::UndefinedTag
const ValueEmpty = constexpr JSValue::ValueEmpty
const ValueFalse = constexpr JSValue::ValueFalse
const ValueTrue = constexpr JSValue::ValueTrue
const ValueUndefined = constexpr JSValue::ValueUndefined
const ValueNull = constexpr JSValue::ValueNull
const TagNumber = constexpr JSValue::NumberTag
const NotCellMask = constexpr JSValue::NotCellMask
if BIGINT32
const TagBigInt32 = constexpr JSValue::BigInt32Tag
const MaskBigInt32 = constexpr JSValue::BigInt32Mask
end
const LowestOfHighBits = constexpr JSValue::LowestOfHighBits
else
const Int32Tag = constexpr JSValue::Int32Tag
const BooleanTag = constexpr JSValue::BooleanTag
const NullTag = constexpr JSValue::NullTag
const UndefinedTag = constexpr JSValue::UndefinedTag
const CellTag = constexpr JSValue::CellTag
const EmptyValueTag = constexpr JSValue::EmptyValueTag
const DeletedValueTag = constexpr JSValue::DeletedValueTag
const InvalidTag = constexpr JSValue::InvalidTag
const LowestTag = constexpr JSValue::LowestTag
end
if LARGE_TYPED_ARRAYS
const SmallTypedArrayMaxLength = constexpr ArrayProfile::s_smallTypedArrayMaxLength
end
const maxFrameExtentForSlowPathCall = constexpr maxFrameExtentForSlowPathCall
if X86_64 or ARM64 or ARM64E or RISCV64
const CalleeSaveSpaceAsVirtualRegisters = 4
elsif C_LOOP
const CalleeSaveSpaceAsVirtualRegisters = 1
elsif ARMv7
const CalleeSaveSpaceAsVirtualRegisters = 1
else
const CalleeSaveSpaceAsVirtualRegisters = 0
end
const CalleeSaveSpaceStackAligned = (CalleeSaveSpaceAsVirtualRegisters * SlotSize + StackAlignment - 1) & ~StackAlignmentMask
# Watchpoint states
const ClearWatchpoint = constexpr ClearWatchpoint
const IsWatched = constexpr IsWatched
const IsInvalidated = constexpr IsInvalidated
# ShadowChicken data
const ShadowChickenTailMarker = constexpr ShadowChicken::Packet::tailMarkerValue
# UnaryArithProfile data
const ArithProfileInt = constexpr (UnaryArithProfile::observedIntBits())
const ArithProfileNumber = constexpr (UnaryArithProfile::observedNumberBits())
# BinaryArithProfile data
const ArithProfileIntInt = constexpr (BinaryArithProfile::observedIntIntBits())
const ArithProfileNumberInt = constexpr (BinaryArithProfile::observedNumberIntBits())
const ArithProfileIntNumber = constexpr (BinaryArithProfile::observedIntNumberBits())
const ArithProfileNumberNumber = constexpr (BinaryArithProfile::observedNumberNumberBits())
# Pointer Tags
const AddressDiversified = 1
const BytecodePtrTag = constexpr BytecodePtrTag
const CustomAccessorPtrTag = constexpr CustomAccessorPtrTag
const JSEntryPtrTag = constexpr JSEntryPtrTag
const HostFunctionPtrTag = constexpr HostFunctionPtrTag
const JSEntrySlowPathPtrTag = constexpr JSEntrySlowPathPtrTag
const NativeToJITGatePtrTag = constexpr NativeToJITGatePtrTag
const ExceptionHandlerPtrTag = constexpr ExceptionHandlerPtrTag
const YarrEntryPtrTag = constexpr YarrEntryPtrTag
const CSSSelectorPtrTag = constexpr CSSSelectorPtrTag
const LLIntToWasmEntryPtrTag = constexpr LLIntToWasmEntryPtrTag
const NoPtrTag = constexpr NoPtrTag
# VMTraps data
const VMTrapsAsyncEvents = constexpr VMTraps::AsyncEvents
# Some register conventions.
# - We use a pair of registers to represent the PC: one register for the
# base of the bytecodes, and one register for the index.
# - The PC base (or PB for short) must be stored in a callee-save register.
# - The metadata (PM / pointer to metadata) must be stored in a callee-save register.
# - C calls are still given the Instruction* rather than the PC index.
# This requires an add before the call, and a sub after.
if JSVALUE64
const PC = t4 # When changing this, make sure LLIntPC is up to date in LLIntPCRanges.h
if ARM64 or ARM64E or RISCV64
const metadataTable = csr6
const PB = csr7
const numberTag = csr8
const notCellMask = csr9
elsif X86_64
const metadataTable = csr1
const PB = csr2
const numberTag = csr3
const notCellMask = csr4
elsif C_LOOP
const PB = csr0
const numberTag = csr1
const notCellMask = csr2
const metadataTable = csr3
end
else
const PC = t4 # When changing this, make sure LLIntPC is up to date in LLIntPCRanges.h
if C_LOOP
const PB = csr0
const metadataTable = csr3
elsif ARMv7
const metadataTable = csr0
const PB = csr1
else
error
end
end
if GIGACAGE_ENABLED
const GigacagePrimitiveBasePtrOffset = constexpr Gigacage::offsetOfPrimitiveGigacageBasePtr
end
# Opcode offsets
const OpcodeIDNarrowSize = 1 # OpcodeID
const OpcodeIDWide16SizeJS = 2 # Wide16 Prefix + OpcodeID
const OpcodeIDWide32SizeJS = 2 # Wide32 Prefix + OpcodeID
const OpcodeIDWide16SizeWasm = 2 # Wide16 Prefix + OpcodeID(1 byte)
const OpcodeIDWide32SizeWasm = 2 # Wide32 Prefix + OpcodeID(1 byte)
const WTFConfig = _g_config + constexpr WTF::startOffsetOfWTFConfig
const GigacageConfig = _g_config + constexpr Gigacage::startOffsetOfGigacageConfig
const JSCConfigOffset = constexpr WTF::offsetOfWTFConfigExtension
const JSCConfigGateMapOffset = JSCConfigOffset + constexpr JSC::offsetOfJSCConfigGateMap
macro loadBoolJSCOption(name, reg)
leap _g_config, reg
loadb JSCConfigOffset + JSC::Config::options + OptionsStorage::%name%[reg], reg
end
macro nextInstruction()
loadb [PB, PC, 1], t0
leap _g_opcodeMap, t1
jmp [t1, t0, PtrSize], BytecodePtrTag, AddressDiversified
end
macro nextInstructionWide16()
loadb OpcodeIDNarrowSize[PB, PC, 1], t0
leap _g_opcodeMapWide16, t1
jmp [t1, t0, PtrSize], BytecodePtrTag, AddressDiversified
end
macro nextInstructionWide32()
loadb OpcodeIDNarrowSize[PB, PC, 1], t0
leap _g_opcodeMapWide32, t1
jmp [t1, t0, PtrSize], BytecodePtrTag, AddressDiversified
end
macro dispatch(advanceReg)
addp advanceReg, PC
nextInstruction()
end
macro dispatchIndirect(offsetReg)
dispatch(offsetReg)
end
macro genericDispatchOpJS(dispatch, size, opcodeName)
macro dispatchNarrow()
dispatch((constexpr %opcodeName%_length) * 1 + OpcodeIDNarrowSize)
end
macro dispatchWide16()
dispatch((constexpr %opcodeName%_length) * 2 + OpcodeIDWide16SizeJS)
end
macro dispatchWide32()
dispatch((constexpr %opcodeName%_length) * 4 + OpcodeIDWide32SizeJS)
end
size(dispatchNarrow, dispatchWide16, dispatchWide32, macro (dispatch) dispatch() end)
end
macro genericDispatchOpWasm(dispatch, size, opcodeName)
macro dispatchNarrow()
dispatch((constexpr %opcodeName%_length) * 1 + OpcodeIDNarrowSize)
end
macro dispatchWide16()
dispatch((constexpr %opcodeName%_length) * 2 + OpcodeIDWide16SizeWasm)
end
macro dispatchWide32()
dispatch((constexpr %opcodeName%_length) * 4 + OpcodeIDWide32SizeWasm)
end
size(dispatchNarrow, dispatchWide16, dispatchWide32, macro (dispatch) dispatch() end)
end
macro dispatchOp(size, opcodeName)
genericDispatchOpJS(dispatch, size, opcodeName)
end
macro superSamplerBegin(scratch)
leap _g_superSamplerCount, scratch
addi 1, [scratch]
end
macro superSamplerEnd(scratch)
leap _g_superSamplerCount, scratch
subi 1, [scratch]
end
macro getu(size, opcodeStruct, fieldName, dst)
size(getuOperandNarrow, getuOperandWide16JS, getuOperandWide32JS, macro (getu)
getu(opcodeStruct, fieldName, dst)
end)
end
macro get(size, opcodeStruct, fieldName, dst)
size(getOperandNarrow, getOperandWide16JS, getOperandWide32JS, macro (get)
get(opcodeStruct, fieldName, dst)
end)
end
macro narrow(narrowFn, wide16Fn, wide32Fn, k)
k(narrowFn)
end
macro wide16(narrowFn, wide16Fn, wide32Fn, k)
k(wide16Fn)
end
macro wide32(narrowFn, wide16Fn, wide32Fn, k)
k(wide32Fn)
end
macro metadata(size, opcode, dst, scratch)
loadh (constexpr %opcode%::opcodeID * 2 + MetadataOffsetTable16Offset)[metadataTable], dst # offset = metadataTable<uint16_t*>[opcodeID]
btinz dst, .setUpOffset
loadi (constexpr %opcode%::opcodeID * 4 + MetadataOffsetTable32Offset)[metadataTable], dst # offset = metadataTable<uint32_t*>[opcodeID]
.setUpOffset:
getu(size, opcode, m_metadataID, scratch) # scratch = bytecode.m_metadataID
muli sizeof %opcode%::Metadata, scratch # scratch *= sizeof(Op::Metadata)
addi scratch, dst # offset += scratch
addp metadataTable, dst # return &metadataTable[offset]
# roundUpToMultipleOf(alignof(Metadata), dst)
const adder = (constexpr (alignof(%opcode%::Metadata))) - 1
const mask = ~adder
addp adder, dst
andp mask, dst
end
macro jumpImpl(dispatchIndirect, targetOffsetReg)
btiz targetOffsetReg, .outOfLineJumpTarget
dispatchIndirect(targetOffsetReg)
.outOfLineJumpTarget:
callSlowPath(_llint_slow_path_out_of_line_jump_target)
nextInstruction()
end
macro commonOp(label, prologue, fn)
_%label%:
prologue()
fn(narrow)
if ASSERT_ENABLED
break
break
end
_%label%_wide16:
prologue()
fn(wide16)
if ASSERT_ENABLED
break
break
end
_%label%_wide32:
prologue()
fn(wide32)
if ASSERT_ENABLED
break
break
end
end
macro op(l, fn)
commonOp(l, macro () end, macro (size)
size(fn, macro() break end, macro() break end, macro(gen) gen() end)
end)
end
macro llintOp(opcodeName, opcodeStruct, fn)
commonOp(llint_%opcodeName%, traceExecution, macro(size)
macro getImpl(fieldName, dst)
get(size, opcodeStruct, fieldName, dst)
end
macro dispatchImpl()
dispatchOp(size, opcodeName)
end
fn(size, getImpl, dispatchImpl)
end)
end
macro llintOpWithReturn(opcodeName, opcodeStruct, fn)
llintOp(opcodeName, opcodeStruct, macro(size, get, dispatch)
makeReturn(get, dispatch, macro (return)
fn(size, get, dispatch, return)
end)
end)
end
macro llintOpWithMetadata(opcodeName, opcodeStruct, fn)
llintOpWithReturn(opcodeName, opcodeStruct, macro (size, get, dispatch, return)
macro meta(dst, scratch)
metadata(size, opcodeStruct, dst, scratch)
end
fn(size, get, dispatch, meta, return)
end)
end
macro llintOpWithJump(opcodeName, opcodeStruct, impl)
llintOpWithMetadata(opcodeName, opcodeStruct, macro(size, get, dispatch, metadata, return)
macro jump(fieldName)
get(fieldName, t0)
jumpImpl(dispatchIndirect, t0)
end
impl(size, get, jump, dispatch)
end)
end
macro llintOpWithProfile(opcodeName, opcodeStruct, fn)
llintOpWithMetadata(opcodeName, opcodeStruct, macro(size, get, dispatch, metadata, return)
makeReturnProfiled(size, opcodeStruct, get, metadata, dispatch, macro (returnProfiled)
fn(size, get, dispatch, returnProfiled)
end)
end)
end
# Constants for reasoning about value representation.
const TagOffset = constexpr TagOffset
const PayloadOffset = constexpr PayloadOffset
# Constant for reasoning about butterflies.
const IsArray = constexpr IsArray
const IndexingShapeMask = constexpr IndexingShapeMask
const IndexingTypeMask = constexpr IndexingTypeMask
const NoIndexingShape = constexpr NoIndexingShape
const Int32Shape = constexpr Int32Shape
const DoubleShape = constexpr DoubleShape
const ContiguousShape = constexpr ContiguousShape
const ArrayStorageShape = constexpr ArrayStorageShape
const SlowPutArrayStorageShape = constexpr SlowPutArrayStorageShape
const CopyOnWrite = constexpr CopyOnWrite
const ArrayWithUndecided = constexpr ArrayWithUndecided
# Type constants.
const StructureType = constexpr StructureType
const StringType = constexpr StringType
const SymbolType = constexpr SymbolType
const ObjectType = constexpr ObjectType
const FinalObjectType = constexpr FinalObjectType
const JSFunctionType = constexpr JSFunctionType
const InternalFunctionType = constexpr InternalFunctionType
const ArrayType = constexpr ArrayType
const DerivedArrayType = constexpr DerivedArrayType
const ProxyObjectType = constexpr ProxyObjectType
const HeapBigIntType = constexpr HeapBigIntType
const FunctionExecutableType = constexpr FunctionExecutableType
# The typed array types need to be numbered in a particular order because of the manually written
# switch statement in get_by_val and put_by_val.
const Int8ArrayType = constexpr Int8ArrayType
const Uint8ArrayType = constexpr Uint8ArrayType
const Uint8ClampedArrayType = constexpr Uint8ClampedArrayType
const Int16ArrayType = constexpr Int16ArrayType
const Uint16ArrayType = constexpr Uint16ArrayType
const Int32ArrayType = constexpr Int32ArrayType
const Uint32ArrayType = constexpr Uint32ArrayType
const Float16ArrayType = constexpr Float16ArrayType
const Float32ArrayType = constexpr Float32ArrayType
const Float64ArrayType = constexpr Float64ArrayType
const FirstTypedArrayType = constexpr FirstTypedArrayType
const NumberOfTypedArrayTypesExcludingDataView = constexpr NumberOfTypedArrayTypesExcludingDataView
const NumberOfTypedArrayTypesExcludingBigIntArraysAndDataView = constexpr NumberOfTypedArrayTypesExcludingBigIntArraysAndDataView
# Type flags constants.
const MasqueradesAsUndefined = constexpr MasqueradesAsUndefined
const ImplementsDefaultHasInstance = constexpr ImplementsDefaultHasInstance
const OverridesGetPrototypeOutOfLine = constexpr OverridesGetPrototypeOutOfLine
# Bytecode operand constants.
const FirstConstantRegisterIndexNarrow = constexpr FirstConstantRegisterIndex8
const FirstConstantRegisterIndexWide16 = constexpr FirstConstantRegisterIndex16
const FirstConstantRegisterIndexWide32 = constexpr FirstConstantRegisterIndex
# Code type constants.
const GlobalCode = constexpr GlobalCode
const EvalCode = constexpr EvalCode
const FunctionCode = constexpr FunctionCode
const ModuleCode = constexpr ModuleCode
# The interpreter steals the tag word of the argument count.
const CallSiteIndex = ArgumentCountIncludingThis + TagOffset
# String flags.
const isRopeInPointer = constexpr JSString::isRopeInPointer
const HashFlags8BitBuffer = constexpr StringImpl::s_hashFlag8BitBuffer
# Copied from PropertyOffset.h
const firstOutOfLineOffset = constexpr firstOutOfLineOffset
const knownPolyProtoOffset = constexpr knownPolyProtoOffset
# ResolveType
const GlobalProperty = constexpr GlobalProperty
const GlobalVar = constexpr GlobalVar
const GlobalLexicalVar = constexpr GlobalLexicalVar
const ClosureVar = constexpr ClosureVar
const ResolvedClosureVar = constexpr ResolvedClosureVar
const ModuleVar = constexpr ModuleVar
const GlobalPropertyWithVarInjectionChecks = constexpr GlobalPropertyWithVarInjectionChecks
const GlobalVarWithVarInjectionChecks = constexpr GlobalVarWithVarInjectionChecks
const GlobalLexicalVarWithVarInjectionChecks = constexpr GlobalLexicalVarWithVarInjectionChecks
const ClosureVarWithVarInjectionChecks = constexpr ClosureVarWithVarInjectionChecks
const ResolveTypeMask = constexpr GetPutInfo::typeBits
const InitializationModeMask = constexpr GetPutInfo::initializationBits
const InitializationModeShift = constexpr GetPutInfo::initializationShift
const NotInitialization = constexpr InitializationMode::NotInitialization
const MarkedBlockSize = constexpr MarkedBlock::blockSize
const MarkedBlockMask = ~(MarkedBlockSize - 1)
const MarkedBlockHeaderOffset = constexpr MarkedBlock::offsetOfHeader
const PreciseAllocationHeaderSize = constexpr (PreciseAllocation::headerSize())
const PreciseAllocationVMOffset = (PreciseAllocation::m_weakSet + WeakSet::m_vm - PreciseAllocationHeaderSize)
const BlackThreshold = constexpr blackThreshold
const VectorBufferOffset = Vector::m_buffer
const VectorSizeOffset = Vector::m_size
# Some common utilities.
macro crash()
if C_LOOP
cloopCrash
else
call _llint_crash
end
end
macro assert(assertion)
if ASSERT_ENABLED
assertion(.ok)
crash()
.ok:
end
end
macro assert_with(assertion, crash)
if ASSERT_ENABLED
assertion(.ok)
crash()
.ok:
end
end
# The probe macro can be used to insert some debugging code without perturbing scalar
# registers. Presently, the probe macro only preserves scalar registers. Hence, the
# C probe callback function should not trash floating point registers.
#
# The macro you pass to probe() can pass whatever registers you like to your probe
# callback function. However, you need to be mindful of which of the registers are
# also used as argument registers, and ensure that you don't trash the register value
# before storing it in the probe callback argument register that you desire.
#
# Here's an example of how it's used:
#
# probe(
# macro()
# move cfr, a0 # pass the CallFrame* as arg0.
# move t0, a1 # pass the value of register t0 as arg1.
# call _cProbeCallbackFunction # to do whatever you want.
# end
# )
#
# LLIntSlowPaths.h
# extern "C" __attribute__((__used__)) __attribute__((visibility("hidden"))) void cProbeCallbackFunction(uint64_t i);
# LLIntSlowPaths.cpp:
# extern "C" void cProbeCallbackFunction(uint64_t i) {}
#
if X86_64 or ARM64 or ARM64E or ARMv7
macro probe(action)
# save all the registers that the LLInt may use.
if ARM64 or ARM64E or ARMv7
push cfr, lr
end
push a0, a1
push a2, a3
push t0, t1
push t2, t3
push t4, t5
push t6, t7
push ws0, ws1
if ARM64 or ARM64E
push csr0, csr1
push csr2, csr3
push csr4, csr5
push csr6, csr7
push csr8, csr9
elsif ARMv7
push csr0, csr1
end
action()
# restore all the registers we saved previously.
if ARM64 or ARM64E
pop csr9, csr8
pop csr7, csr6
pop csr5, csr4
pop csr3, csr2
pop csr1, csr0
elsif ARMv7
pop csr1, csr0
end
pop ws1, ws0
pop t7, t6
pop t5, t4
pop t3, t2
pop t1, t0
pop a3, a2
pop a1, a0
if ARM64 or ARM64E or ARMv7
pop lr, cfr
end
end
else
macro probe(action)
end
end
macro checkStackPointerAlignment(tempReg, location)
if ASSERT_ENABLED
if ARM64 or ARM64E or C_LOOP
# ARM64 and ARM64E will check for us!
# C_LOOP does not need the alignment, and can use a little perf
# improvement from avoiding useless work.
else
if ARMv7
# ARM can't do logical ops with the sp as a source
move sp, tempReg
andp StackAlignmentMask, tempReg
else
andp sp, StackAlignmentMask, tempReg
end
btpz tempReg, .stackPointerOkay
move location, tempReg
break
.stackPointerOkay:
end
end
end
if C_LOOP or ARM64 or ARM64E or X86_64 or RISCV64
const CalleeSaveRegisterCount = 0
elsif ARMv7
const CalleeSaveRegisterCount = 5 + 2 * 2 // 5 32-bit GPRs + 2 64-bit FPRs
end
const CalleeRegisterSaveSize = CalleeSaveRegisterCount * MachineRegisterSize
# VMEntryTotalFrameSize includes the space for struct VMEntryRecord and the
# callee save registers rounded up to keep the stack aligned
const VMEntryTotalFrameSize = (CalleeRegisterSaveSize + sizeof VMEntryRecord + StackAlignment - 1) & ~StackAlignmentMask
macro pushCalleeSaves()
# Note: Only registers that are in RegisterSetBuilder::calleeSaveRegisters(),
# but are not in RegisterSetBuilder::vmCalleeSaveRegisters() need to be saved here,
# i.e.: only those registers that are callee save in the C ABI, but are not
# callee save in the JIT ABI.
if C_LOOP or ARM64 or ARM64E or X86_64 or RISCV64
elsif ARMv7
emit "vpush.64 {d14, d15}"
emit "push {r4-r6, r8-r9}"
end
end
macro popCalleeSaves()
if C_LOOP or ARM64 or ARM64E or X86_64 or RISCV64
elsif ARMv7
emit "pop {r4-r6, r8-r9}"
emit "vpop.64 {d14, d15}"
end
end
macro preserveCallerPCAndCFR()
if C_LOOP or ARMv7
push lr
push cfr
elsif X86_64
push cfr
elsif ARM64 or ARM64E or RISCV64
push cfr, lr
else
error
end
move sp, cfr
end
macro restoreCallerPCAndCFR()
move cfr, sp
if C_LOOP or ARMv7
pop cfr
pop lr
elsif X86_64
pop cfr
elsif ARM64 or ARM64E or RISCV64
pop lr, cfr
end
end
macro preserveCalleeSavesUsedByLLInt()
subp CalleeSaveSpaceStackAligned, sp
if C_LOOP
storep metadataTable, -PtrSize[cfr]
elsif ARMv7
storep PB, -4[cfr]
storep metadataTable, -8[cfr]
elsif ARM64 or ARM64E
storepairq csr8, csr9, -16[cfr]
storepairq csr6, csr7, -32[cfr]
elsif X86_64
storep csr4, -8[cfr]
storep csr3, -16[cfr]
storep csr2, -24[cfr]
storep csr1, -32[cfr]
elsif RISCV64
storep csr9, -8[cfr]
storep csr8, -16[cfr]
storep csr7, -24[cfr]
storep csr6, -32[cfr]
end
end
macro restoreCalleeSavesUsedByLLInt()
if C_LOOP
loadp -PtrSize[cfr], metadataTable
elsif ARMv7
loadp -4[cfr], PB
loadp -8[cfr], metadataTable
elsif ARM64 or ARM64E
loadpairq -32[cfr], csr6, csr7
loadpairq -16[cfr], csr8, csr9
elsif X86_64
loadp -32[cfr], csr1
loadp -24[cfr], csr2
loadp -16[cfr], csr3
loadp -8[cfr], csr4
elsif RISCV64
loadp -32[cfr], csr6
loadp -24[cfr], csr7
loadp -16[cfr], csr8
loadp -8[cfr], csr9
end
end
macro forEachGPCalleeSave(func)
if ARM64 or ARM64E
func(csr0, 0)
func(csr1, 1)
func(csr2, 2)
func(csr3, 3)
func(csr4, 4)
func(csr5, 5)
func(csr6, 6)
func(csr7, 7)
func(csr8, 8)
func(csr9, 9)
elsif X86_64
func(csr0, 0)
func(csr1, 1)
func(csr2, 2)
func(csr3, 3)
func(csr4, 4)
else
error
end
end
macro forEachFPCalleeSave(func)
if ARM64 or ARM64E
func(csfr0, 0)
func(csfr1, 1)
func(csfr2, 2)
func(csfr3, 3)
func(csfr4, 4)
func(csfr5, 5)
func(csfr6, 6)
func(csfr7, 7)
elsif X86_64
else
error
end
end
macro copyCalleeSavesToEntryFrameCalleeSavesBuffer(entryFrame)
if ARM64 or ARM64E or X86_64 or ARMv7 or RISCV64
vmEntryRecord(entryFrame, entryFrame)
leap VMEntryRecord::calleeSaveRegistersBuffer[entryFrame], entryFrame
if ARM64 or ARM64E
storepairq csr0, csr1, [entryFrame]
storepairq csr2, csr3, 16[entryFrame]
storepairq csr4, csr5, 32[entryFrame]
storepairq csr6, csr7, 48[entryFrame]
storepairq csr8, csr9, 64[entryFrame]
storepaird csfr0, csfr1, 80[entryFrame]
storepaird csfr2, csfr3, 96[entryFrame]
storepaird csfr4, csfr5, 112[entryFrame]
storepaird csfr6, csfr7, 128[entryFrame]
elsif X86_64
storeq csr0, [entryFrame]
storeq csr1, 8[entryFrame]
storeq csr2, 16[entryFrame]
storeq csr3, 24[entryFrame]
storeq csr4, 32[entryFrame]
elsif ARMv7
storep csr0, [entryFrame]
storep csr1, 4[entryFrame]
stored csfr0, 8[entryFrame]
stored csfr1, 16[entryFrame]
stored csfr2, 24[entryFrame]
stored csfr3, 32[entryFrame]
stored csfr4, 40[entryFrame]
stored csfr5, 48[entryFrame]
elsif RISCV64
storep csr0, [entryFrame]
storep csr1, 8[entryFrame]
storep csr2, 16[entryFrame]
storep csr3, 24[entryFrame]
storep csr4, 32[entryFrame]
storep csr5, 40[entryFrame]
storep csr6, 48[entryFrame]
storep csr7, 56[entryFrame]
storep csr8, 64[entryFrame]
storep csr9, 72[entryFrame]
storep csr10, 80[entryFrame]
stored csfr0, 88[entryFrame]
stored csfr1, 96[entryFrame]
stored csfr2, 104[entryFrame]
stored csfr3, 112[entryFrame]
stored csfr4, 120[entryFrame]
stored csfr5, 128[entryFrame]
stored csfr6, 136[entryFrame]
stored csfr7, 144[entryFrame]
stored csfr8, 152[entryFrame]
stored csfr9, 160[entryFrame]
stored csfr10, 168[entryFrame]
stored csfr11, 176[entryFrame]
end
end
end
macro copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(vm, temp)
if ARM64 or ARM64E or X86_64 or ARMv7 or RISCV64
loadp VM::topEntryFrame[vm], temp
copyCalleeSavesToEntryFrameCalleeSavesBuffer(temp)
end
end
macro restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(vm, temp)
if ARM64 or ARM64E or X86_64 or ARMv7 or RISCV64
loadp VM::topEntryFrame[vm], temp
vmEntryRecord(temp, temp)
leap VMEntryRecord::calleeSaveRegistersBuffer[temp], temp
if ARM64 or ARM64E
loadpairq [temp], csr0, csr1
loadpairq 16[temp], csr2, csr3
loadpairq 32[temp], csr4, csr5
loadpairq 48[temp], csr6, csr7
loadpairq 64[temp], csr8, csr9
loadpaird 80[temp], csfr0, csfr1
loadpaird 96[temp], csfr2, csfr3
loadpaird 112[temp], csfr4, csfr5
loadpaird 128[temp], csfr6, csfr7
elsif X86_64
loadq [temp], csr0
loadq 8[temp], csr1
loadq 16[temp], csr2
loadq 24[temp], csr3
loadq 32[temp], csr4
elsif ARMv7
loadp [temp], csr0
loadp 4[temp], csr1
loadd 8[temp], csfr0
loadd 16[temp], csfr1
loadd 24[temp], csfr2
loadd 32[temp], csfr3
loadd 40[temp], csfr4
loadd 48[temp], csfr5
elsif RISCV64
loadq [temp], csr0
loadq 8[temp], csr1
loadq 16[temp], csr2