This repository has been archived by the owner on Jan 23, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 2.7k
/
emitxarch.cpp
11398 lines (9637 loc) · 331 KB
/
emitxarch.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX emitX86.cpp XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#if defined(_TARGET_XARCH_)
/*****************************************************************************/
/*****************************************************************************/
#include "instr.h"
#include "emit.h"
#include "codegen.h"
bool IsSSE2Instruction(instruction ins)
{
return (ins >= INS_FIRST_SSE2_INSTRUCTION && ins <= INS_LAST_SSE2_INSTRUCTION);
}
bool IsSSEOrAVXInstruction(instruction ins)
{
#ifdef FEATURE_AVX_SUPPORT
return (ins >= INS_FIRST_SSE2_INSTRUCTION && ins <= INS_LAST_AVX_INSTRUCTION);
#else // !FEATURE_AVX_SUPPORT
return IsSSE2Instruction(ins);
#endif // !FEATURE_AVX_SUPPORT
}
bool emitter::IsAVXInstruction(instruction ins)
{
#ifdef FEATURE_AVX_SUPPORT
return (UseAVX() && IsSSEOrAVXInstruction(ins));
#else
return false;
#endif
}
#define REX_PREFIX_MASK 0xFF00000000LL
#ifdef FEATURE_AVX_SUPPORT
// Returns true if the AVX instruction is a binary operator that requires 3 operands.
// When we emit an instruction with only two operands, we will duplicate the destination
// as a source.
// TODO-XArch-Cleanup: This is a temporary solution for now. Eventually this needs to
// be formalized by adding an additional field to instruction table to
// to indicate whether a 3-operand instruction.
bool emitter::IsThreeOperandBinaryAVXInstruction(instruction ins)
{
return IsAVXInstruction(ins) &&
(ins == INS_cvtsi2ss || ins == INS_cvtsi2sd || ins == INS_cvtss2sd || ins == INS_cvtsd2ss ||
ins == INS_addss || ins == INS_addsd || ins == INS_subss || ins == INS_subsd || ins == INS_mulss ||
ins == INS_mulsd || ins == INS_divss || ins == INS_divsd || ins == INS_addps || ins == INS_addpd ||
ins == INS_subps || ins == INS_subpd || ins == INS_mulps || ins == INS_mulpd || ins == INS_cmpps ||
ins == INS_cmppd || ins == INS_andps || ins == INS_andpd || ins == INS_orps || ins == INS_orpd ||
ins == INS_xorps || ins == INS_xorpd || ins == INS_dpps || ins == INS_dppd || ins == INS_haddpd ||
ins == INS_por || ins == INS_pand || ins == INS_pandn || ins == INS_pcmpeqd || ins == INS_pcmpgtd ||
ins == INS_pcmpeqw || ins == INS_pcmpgtw || ins == INS_pcmpeqb || ins == INS_pcmpgtb ||
ins == INS_pcmpeqq || ins == INS_pcmpgtq || ins == INS_pmulld || ins == INS_pmullw ||
ins == INS_shufps || ins == INS_shufpd || ins == INS_minps || ins == INS_minss || ins == INS_minpd ||
ins == INS_minsd || ins == INS_divps || ins == INS_divpd || ins == INS_maxps || ins == INS_maxpd ||
ins == INS_maxss || ins == INS_maxsd || ins == INS_andnps || ins == INS_andnpd || ins == INS_paddb ||
ins == INS_paddw || ins == INS_paddd || ins == INS_paddq || ins == INS_psubb || ins == INS_psubw ||
ins == INS_psubd || ins == INS_psubq || ins == INS_pmuludq || ins == INS_pxor || ins == INS_pmaxub ||
ins == INS_pminub || ins == INS_pmaxsw || ins == INS_pminsw || ins == INS_insertps || ins == INS_vinsertf128 ||
ins == INS_punpckldq
);
}
// Returns true if the AVX instruction is a move operator that requires 3 operands.
// When we emit an instruction with only two operands, we will duplicate the source
// register in the vvvv field. This is because these merge sources into the dest.
// TODO-XArch-Cleanup: This is a temporary solution for now. Eventually this needs to
// be formalized by adding an additional field to instruction table to
// to indicate whether a 3-operand instruction.
bool emitter::IsThreeOperandMoveAVXInstruction(instruction ins)
{
return IsAVXInstruction(ins) &&
(ins == INS_movlpd || ins == INS_movlps || ins == INS_movhpd || ins == INS_movhps || ins == INS_movss);
}
#endif // FEATURE_AVX_SUPPORT
// Returns true if the AVX instruction is a 4-byte opcode.
// Note that this should be true for any of the instructions in instrsXArch.h
// that use the SSE38 or SSE3A macro.
// TODO-XArch-Cleanup: This is a temporary solution for now. Eventually this
// needs to be addressed by expanding instruction encodings.
bool Is4ByteAVXInstruction(instruction ins)
{
#ifdef FEATURE_AVX_SUPPORT
return (ins == INS_dpps || ins == INS_dppd || ins == INS_insertps || ins == INS_pcmpeqq || ins == INS_pcmpgtq ||
ins == INS_vbroadcastss || ins == INS_vbroadcastsd || ins == INS_vpbroadcastb || ins == INS_vpbroadcastw ||
ins == INS_vpbroadcastd || ins == INS_vpbroadcastq || ins == INS_vextractf128 || ins == INS_vinsertf128 ||
ins == INS_pmulld);
#else
return false;
#endif
}
#ifdef FEATURE_AVX_SUPPORT
// Returns true if this instruction requires a VEX prefix
// All AVX instructions require a VEX prefix
bool emitter::TakesVexPrefix(instruction ins)
{
// special case vzeroupper as it requires 2-byte VEX prefix
if (ins == INS_vzeroupper)
{
return false;
}
return IsAVXInstruction(ins);
}
// Add base VEX prefix without setting W, R, X, or B bits
// L bit will be set based on emitter attr.
//
// 3-byte VEX prefix = C4 <R,X,B,m-mmmm> <W,vvvv,L,pp>
// - R, X, B, W - bits to express corresponding REX prefixes
// - m-mmmmm (5-bit)
// 0-00001 - implied leading 0F opcode byte
// 0-00010 - implied leading 0F 38 opcode bytes
// 0-00011 - implied leading 0F 3A opcode bytes
// Rest - reserved for future use and usage of them will uresult in Undefined instruction exception
//
// - vvvv (4-bits) - register specifier in 1's complement form; must be 1111 if unused
// - L - scalar or AVX-128 bit operations (L=0), 256-bit operations (L=1)
// - pp (2-bits) - opcode extension providing equivalent functionality of a SIMD size prefix
// these prefixes are treated mandatory when used with escape opcode 0Fh for
// some SIMD instructions
// 00 - None (0F - packed float)
// 01 - 66 (66 0F - packed double)
// 10 - F3 (F3 0F - scalar float
// 11 - F2 (F2 0F - scalar double)
//
// TODO-AMD64-CQ: for simplicity of implementation this routine always adds 3-byte VEX
// prefix. Based on 'attr' param we could add 2-byte VEX prefix in case of scalar
// and AVX-128 bit operations.
#define DEFAULT_3BYTE_VEX_PREFIX 0xC4E07800000000ULL
#define LBIT_IN_3BYTE_VEX_PREFIX 0X00000400000000ULL
size_t emitter::AddVexPrefix(instruction ins, size_t code, emitAttr attr)
{
// Only AVX instructions require VEX prefix
assert(IsAVXInstruction(ins));
// Shouldn't have already added Vex prefix
assert(!hasVexPrefix(code));
// Set L bit to 1 in case of instructions that operate on 256-bits.
code |= DEFAULT_3BYTE_VEX_PREFIX;
if (attr == EA_32BYTE)
{
code |= LBIT_IN_3BYTE_VEX_PREFIX;
}
return code;
}
#endif // FEATURE_AVX_SUPPORT
// Returns true if this instruction, for the given EA_SIZE(attr), will require a REX.W prefix
bool TakesRexWPrefix(instruction ins, emitAttr attr)
{
#ifdef _TARGET_AMD64_
// movsx should always sign extend out to 8 bytes just because we don't track
// whether the dest should be 4 bytes or 8 bytes (attr indicates the size
// of the source, not the dest).
// A 4-byte movzx is equivalent to an 8 byte movzx, so it is not special
// cased here.
//
// Rex_jmp = jmp with rex prefix always requires rex.w prefix.
if (ins == INS_movsx || ins == INS_rex_jmp)
{
return true;
}
if (EA_SIZE(attr) != EA_8BYTE)
{
return false;
}
if (IsSSEOrAVXInstruction(ins))
{
if (ins == INS_cvttsd2si || ins == INS_cvttss2si || ins == INS_cvtsd2si || ins == INS_cvtss2si ||
ins == INS_cvtsi2sd || ins == INS_cvtsi2ss || ins == INS_mov_xmm2i || ins == INS_mov_i2xmm)
{
return true;
}
return false;
}
// TODO-XArch-Cleanup: Better way to not emit REX.W when we don't need it, than just testing all these
// opcodes...
// These are all the instructions that default to 8-byte operand without the REX.W bit
// With 1 special case: movzx because the 4 byte version still zeros-out the hi 4 bytes
// so we never need it
if ((ins != INS_push) && (ins != INS_pop) && (ins != INS_movq) && (ins != INS_movzx) && (ins != INS_push_hide) &&
(ins != INS_pop_hide) && (ins != INS_ret) && (ins != INS_call) && !((ins >= INS_i_jmp) && (ins <= INS_l_jg)))
{
return true;
}
else
{
return false;
}
#else //!_TARGET_AMD64 = _TARGET_X86_
return false;
#endif //!_TARGET_AMD64_
}
// Returns true if using this register will require a REX.* prefix.
// Since XMM registers overlap with YMM registers, this routine
// can also be used to know whether a YMM register if the
// instruction in question is AVX.
bool IsExtendedReg(regNumber reg)
{
#ifdef _TARGET_AMD64_
return ((reg >= REG_R8) && (reg <= REG_R15)) || ((reg >= REG_XMM8) && (reg <= REG_XMM15));
#else
// X86 JIT operates in 32-bit mode and hence extended reg are not available.
return false;
#endif
}
// Returns true if using this register, for the given EA_SIZE(attr), will require a REX.* prefix
bool IsExtendedReg(regNumber reg, emitAttr attr)
{
#ifdef _TARGET_AMD64_
// Not a register, so doesn't need a prefix
if (reg > REG_XMM15)
{
return false;
}
// Opcode field only has 3 bits for the register, these high registers
// need a 4th bit, that comes from the REX prefix (eiter REX.X, REX.R, or REX.B)
if (IsExtendedReg(reg))
{
return true;
}
if (EA_SIZE(attr) != EA_1BYTE)
{
return false;
}
// There are 12 one byte registers addressible 'below' r8b:
// al, cl, dl, bl, ah, ch, dh, bh, spl, bpl, sil, dil.
// The first 4 are always addressible, the last 8 are divided into 2 sets:
// ah, ch, dh, bh
// -- or --
// spl, bpl, sil, dil
// Both sets are encoded exactly the same, the difference is the presence
// of a REX prefix, even a REX prefix with no other bits set (0x40).
// So in order to get to the second set we need a REX prefix (but no bits).
//
// TODO-AMD64-CQ: if we ever want to start using the first set, we'll need a different way of
// encoding/tracking/encoding registers.
return (reg >= REG_RSP);
#else
// X86 JIT operates in 32-bit mode and hence extended reg are not available.
return false;
#endif
}
// Since XMM registers overlap with YMM registers, this routine
// can also used to know whether a YMM register in case of AVX instructions.
//
// Legacy X86: we have XMM0-XMM7 available but this routine cannot be used to
// determine whether a reg is XMM because they share the same reg numbers
// with integer registers. Hence always return false.
bool IsXMMReg(regNumber reg)
{
#ifndef LEGACY_BACKEND
#ifdef _TARGET_AMD64_
return (reg >= REG_XMM0) && (reg <= REG_XMM15);
#else // !_TARGET_AMD64_
return (reg >= REG_XMM0) && (reg <= REG_XMM7);
#endif // !_TARGET_AMD64_
#else // LEGACY_BACKEND
return false;
#endif // LEGACY_BACKEND
}
// Returns bits to be encoded in instruction for the given register.
regNumber RegEncoding(regNumber reg)
{
#ifndef LEGACY_BACKEND
// XMM registers do not share the same reg numbers as integer registers.
// But register encoding of integer and XMM registers is the same.
// Therefore, subtract XMMBASE from regNumber to get the register encoding
// in case of XMM registers.
return (regNumber)((IsXMMReg(reg) ? reg - XMMBASE : reg) & 0x7);
#else // LEGACY_BACKEND
// Legacy X86: XMM registers share the same reg numbers as integer registers and
// hence nothing to do to get reg encoding.
return (regNumber)(reg & 0x7);
#endif // LEGACY_BACKEND
}
// Utility routines that abstract the logic of adding REX.W, REX.R, REX.X, REX.B and REX prefixes
// SSE2: separate 1-byte prefix gets added before opcode.
// AVX: specific bits within VEX prefix need to be set in bit-inverted form.
size_t emitter::AddRexWPrefix(instruction ins, size_t code)
{
#ifdef _TARGET_AMD64_
if (UseAVX() && IsAVXInstruction(ins))
{
// W-bit is available only in 3-byte VEX prefix that starts with byte C4.
assert(hasVexPrefix(code));
// W-bit is the only bit that is added in non bit-inverted form.
return code | 0x00008000000000ULL;
}
return code | 0x4800000000ULL;
#else
assert(!"UNREACHED");
return code;
#endif
}
#ifdef _TARGET_AMD64_
size_t emitter::AddRexRPrefix(instruction ins, size_t code)
{
if (UseAVX() && IsAVXInstruction(ins))
{
// Right now support 3-byte VEX prefix
assert(hasVexPrefix(code));
// R-bit is added in bit-inverted form.
return code & 0xFF7FFFFFFFFFFFULL;
}
return code | 0x4400000000ULL;
}
size_t emitter::AddRexXPrefix(instruction ins, size_t code)
{
if (UseAVX() && IsAVXInstruction(ins))
{
// Right now support 3-byte VEX prefix
assert(hasVexPrefix(code));
// X-bit is added in bit-inverted form.
return code & 0xFFBFFFFFFFFFFFULL;
}
return code | 0x4200000000ULL;
}
size_t emitter::AddRexBPrefix(instruction ins, size_t code)
{
if (UseAVX() && IsAVXInstruction(ins))
{
// Right now support 3-byte VEX prefix
assert(hasVexPrefix(code));
// B-bit is added in bit-inverted form.
return code & 0xFFDFFFFFFFFFFFULL;
}
return code | 0x4100000000ULL;
}
// Adds REX prefix (0x40) without W, R, X or B bits set
size_t emitter::AddRexPrefix(instruction ins, size_t code)
{
assert(!UseAVX() || !IsAVXInstruction(ins));
return code | 0x4000000000ULL;
}
bool isPrefix(BYTE b)
{
assert(b != 0); // Caller should check this
assert(b != 0x67); // We don't use the address size prefix
assert(b != 0x65); // The GS segment override prefix is emitted separately
assert(b != 0x64); // The FS segment override prefix is emitted separately
assert(b != 0xF0); // The lock prefix is emitted separately
assert(b != 0x2E); // We don't use the CS segment override prefix
assert(b != 0x3E); // Or the DS segment override prefix
assert(b != 0x26); // Or the ES segment override prefix
assert(b != 0x36); // Or the SS segment override prefix
// That just leaves the size prefixes used in SSE opcodes:
// Scalar Double Scalar Single Packed Double
return ((b == 0xF2) || (b == 0xF3) || (b == 0x66));
}
#endif //_TARGET_AMD64_
// Outputs VEX prefix (in case of AVX instructions) and REX.R/X/W/B otherwise.
unsigned emitter::emitOutputRexOrVexPrefixIfNeeded(instruction ins, BYTE* dst, size_t& code)
{
#ifdef _TARGET_AMD64_ // TODO-x86: This needs to be enabled for AVX support on x86.
if (hasVexPrefix(code))
{
// Only AVX instructions should have a VEX prefix
assert(UseAVX() && IsAVXInstruction(ins));
size_t vexPrefix = (code >> 32) & 0x00FFFFFF;
code &= 0x00000000FFFFFFFFLL;
WORD leadingBytes = 0;
BYTE check = (code >> 24) & 0xFF;
if (check != 0)
{
// 3-byte opcode: with the bytes ordered as 0x2211RM33 or
// 4-byte opcode: with the bytes ordered as 0x22114433
// check for a prefix in the 11 position
BYTE sizePrefix = (code >> 16) & 0xFF;
if (sizePrefix != 0 && isPrefix(sizePrefix))
{
// 'pp' bits in byte2 of VEX prefix allows us to encode SIMD size prefixes as two bits
//
// 00 - None (0F - packed float)
// 01 - 66 (66 0F - packed double)
// 10 - F3 (F3 0F - scalar float
// 11 - F2 (F2 0F - scalar double)
switch (sizePrefix)
{
case 0x66:
vexPrefix |= 0x01;
break;
case 0xF3:
vexPrefix |= 0x02;
break;
case 0xF2:
vexPrefix |= 0x03;
break;
default:
assert(!"unrecognized SIMD size prefix");
unreached();
}
// Now the byte in the 22 position must be an escape byte 0F
leadingBytes = check;
assert(leadingBytes == 0x0F);
// Get rid of both sizePrefix and escape byte
code &= 0x0000FFFFLL;
// Check the byte in the 33 position to see if it is 3A or 38.
// In such a case escape bytes must be 0x0F3A or 0x0F38
check = code & 0xFF;
if (check == 0x3A || check == 0x38)
{
leadingBytes = (leadingBytes << 8) | check;
code &= 0x0000FF00LL;
}
}
}
else
{
// 2-byte opcode with the bytes ordered as 0x0011RM22
// the byte in position 11 must be an escape byte.
leadingBytes = (code >> 16) & 0xFF;
assert(leadingBytes == 0x0F || leadingBytes == 0x00);
code &= 0xFFFF;
}
// If there is an escape byte it must be 0x0F or 0x0F3A or 0x0F38
// m-mmmmm bits in byte 1 of VEX prefix allows us to encode these
// implied leading bytes
switch (leadingBytes)
{
case 0x00:
// there is no leading byte
break;
case 0x0F:
vexPrefix |= 0x0100;
break;
case 0x0F38:
vexPrefix |= 0x0200;
break;
case 0x0F3A:
vexPrefix |= 0x0300;
break;
default:
assert(!"encountered unknown leading bytes");
unreached();
}
// At this point
// VEX.2211RM33 got transformed as VEX.0000RM33
// VEX.0011RM22 got transformed as VEX.0000RM22
//
// Now output VEX prefix leaving the 4-byte opcode
emitOutputByte(dst, ((vexPrefix >> 16) & 0xFF));
emitOutputByte(dst + 1, ((vexPrefix >> 8) & 0xFF));
emitOutputByte(dst + 2, vexPrefix & 0xFF);
return 3;
}
else if (code > 0x00FFFFFFFFLL)
{
BYTE prefix = (code >> 32) & 0xFF;
noway_assert(prefix >= 0x40 && prefix <= 0x4F);
code &= 0x00000000FFFFFFFFLL;
// TODO-AMD64-Cleanup: when we remove the prefixes (just the SSE opcodes right now)
// we can remove this code as well
// The REX prefix is required to come after all other prefixes.
// Some of our 'opcodes' actually include some prefixes, if that
// is the case, shift them over and place the REX prefix after
// the other prefixes, and emit any prefix that got moved out.
BYTE check = (code >> 24) & 0xFF;
if (check == 0)
{
// 3-byte opcode: with the bytes ordered as 0x00113322
// check for a prefix in the 11 position
check = (code >> 16) & 0xFF;
if (check != 0 && isPrefix(check))
{
// Swap the rex prefix and whatever this prefix is
code = (((DWORD)prefix << 16) | (code & 0x0000FFFFLL));
// and then emit the other prefix
return emitOutputByte(dst, check);
}
}
else
{
// 4-byte opcode with the bytes ordered as 0x22114433
// first check for a prefix in the 11 position
BYTE check2 = (code >> 16) & 0xFF;
if (isPrefix(check2))
{
assert(!isPrefix(check)); // We currently don't use this, so it is untested
if (isPrefix(check))
{
// 3 prefixes were rex = rr, check = c1, check2 = c2 encoded as 0xrrc1c2XXXX
// Change to c2rrc1XXXX, and emit check2 now
code = (((size_t)prefix << 24) | ((size_t)check << 16) | (code & 0x0000FFFFLL));
}
else
{
// 2 prefixes were rex = rr, check2 = c2 encoded as 0xrrXXc2XXXX, (check is part of the opcode)
// Change to c2XXrrXXXX, and emit check2 now
code = (((size_t)check << 24) | ((size_t)prefix << 16) | (code & 0x0000FFFFLL));
}
return emitOutputByte(dst, check2);
}
}
return emitOutputByte(dst, prefix);
}
#endif // _TARGET_AMD64_
return 0;
}
#ifdef _TARGET_AMD64_
/*****************************************************************************
* Is the last instruction emitted a call instruction?
*/
bool emitter::emitIsLastInsCall()
{
if ((emitLastIns != nullptr) && (emitLastIns->idIns() == INS_call))
{
return true;
}
return false;
}
/*****************************************************************************
* We're about to create an epilog. If the last instruction we output was a 'call',
* then we need to insert a NOP, to allow for proper exception-handling behavior.
*/
void emitter::emitOutputPreEpilogNOP()
{
if (emitIsLastInsCall())
{
emitIns(INS_nop);
}
}
#endif //_TARGET_AMD64_
// Size of rex prefix in bytes
unsigned emitter::emitGetRexPrefixSize(instruction ins)
{
// In case of AVX instructions, REX prefixes are part of VEX prefix.
// And hence requires no additional byte to encode REX prefixes.
if (IsAVXInstruction(ins))
{
return 0;
}
// If not AVX, then we would need 1-byte to encode REX prefix.
return 1;
}
// Size of vex prefix in bytes
unsigned emitter::emitGetVexPrefixSize(instruction ins, emitAttr attr)
{
// TODO-XArch-CQ: right now we default to 3-byte VEX prefix. There is a
// scope for size win by using 2-byte vex prefix for some of the
// scalar, avx-128 and most common avx-256 instructions.
if (IsAVXInstruction(ins))
{
return 3;
}
// If not AVX, then we don't need to encode vex prefix.
return 0;
}
// VEX prefix encodes some bytes of the opcode and as a result, overall size of the instruction reduces.
// Therefore, to estimate the size adding VEX prefix size and size of instruction opcode bytes will always overstimate.
// Instead this routine will adjust the size of VEX prefix based on the number of bytes of opcode it encodes so that
// instruction size estimate will be accurate.
// Basically this function will decrease the vexPrefixSize,
// so that opcodeSize + vexPrefixAdjustedSize will be the right size.
// rightOpcodeSize + vexPrefixSize
//=(opcodeSize - ExtrabytesSize) + vexPrefixSize
//=opcodeSize + (vexPrefixSize - ExtrabytesSize)
//=opcodeSize + vexPrefixAdjustedSize
unsigned emitter::emitGetVexPrefixAdjustedSize(instruction ins, emitAttr attr, size_t code)
{
#ifdef FEATURE_AVX_SUPPORT
if (IsAVXInstruction(ins))
{
unsigned vexPrefixAdjustedSize = emitGetVexPrefixSize(ins, attr);
// Currently vex prefix size is hard coded as 3 bytes,
// In future we should support 2 bytes vex prefix.
assert(vexPrefixAdjustedSize == 3);
// In this case, opcode will contains escape prefix at least one byte,
// vexPrefixAdjustedSize should be minus one.
vexPrefixAdjustedSize -= 1;
// Get the fourth byte in Opcode.
// If this byte is non-zero, then we should check whether the opcode contains SIMD prefix or not.
BYTE check = (code >> 24) & 0xFF;
if (check != 0)
{
// 3-byte opcode: with the bytes ordered as 0x2211RM33 or
// 4-byte opcode: with the bytes ordered as 0x22114433
// Simd prefix is at the first byte.
BYTE sizePrefix = (code >> 16) & 0xFF;
if (sizePrefix != 0 && isPrefix(sizePrefix))
{
vexPrefixAdjustedSize -= 1;
}
// If the opcode size is 4 bytes, then the second escape prefix is at fourth byte in opcode.
// But in this case the opcode has not counted R\M part.
// opcodeSize + VexPrefixAdjustedSize - ExtraEscapePrefixSize + ModR\MSize
//=opcodeSize + VexPrefixAdjustedSize -1 + 1
//=opcodeSize + VexPrefixAdjustedSize
// So although we may have second byte escape prefix, we won't decrease vexPrefixAjustedSize.
}
return vexPrefixAdjustedSize;
}
#endif // FEATURE_AVX_SUPPORT
return 0;
}
// Get size of rex or vex prefix emitted in code
unsigned emitter::emitGetPrefixSize(size_t code)
{
#ifdef FEATURE_AVX_SUPPORT
if (code & VEX_PREFIX_MASK_3BYTE)
{
return 3;
}
else
#endif
if (code & REX_PREFIX_MASK)
{
return 1;
}
return 0;
}
#ifdef _TARGET_X86_
/*****************************************************************************
*
* Record a non-empty stack
*/
void emitter::emitMarkStackLvl(unsigned stackLevel)
{
assert(int(stackLevel) >= 0);
assert(emitCurStackLvl == 0);
assert(emitCurIG->igStkLvl == 0);
assert(emitCurIGfreeNext == emitCurIGfreeBase);
assert(stackLevel && stackLevel % sizeof(int) == 0);
emitCurStackLvl = emitCurIG->igStkLvl = stackLevel;
if (emitMaxStackDepth < emitCurStackLvl)
emitMaxStackDepth = emitCurStackLvl;
}
#endif
/*****************************************************************************
*
* Get hold of the address mode displacement value for an indirect call.
*/
inline ssize_t emitter::emitGetInsCIdisp(instrDesc* id)
{
if (id->idIsLargeCall())
{
return ((instrDescCGCA*)id)->idcDisp;
}
else
{
assert(!id->idIsLargeDsp());
assert(!id->idIsLargeCns());
return id->idAddr()->iiaAddrMode.amDisp;
}
}
/** ***************************************************************************
*
* The following table is used by the instIsFP()/instUse/DefFlags() helpers.
*/
#define INST_DEF_FL 0x20 // does the instruction set flags?
#define INST_USE_FL 0x40 // does the instruction use flags?
// clang-format off
const BYTE CodeGenInterface::instInfo[] =
{
#define INST0(id, nm, fp, um, rf, wf, mr ) (INST_USE_FL*rf|INST_DEF_FL*wf|INST_FP*fp),
#define INST1(id, nm, fp, um, rf, wf, mr ) (INST_USE_FL*rf|INST_DEF_FL*wf|INST_FP*fp),
#define INST2(id, nm, fp, um, rf, wf, mr, mi ) (INST_USE_FL*rf|INST_DEF_FL*wf|INST_FP*fp),
#define INST3(id, nm, fp, um, rf, wf, mr, mi, rm ) (INST_USE_FL*rf|INST_DEF_FL*wf|INST_FP*fp),
#define INST4(id, nm, fp, um, rf, wf, mr, mi, rm, a4 ) (INST_USE_FL*rf|INST_DEF_FL*wf|INST_FP*fp),
#define INST5(id, nm, fp, um, rf, wf, mr, mi, rm, a4, rr ) (INST_USE_FL*rf|INST_DEF_FL*wf|INST_FP*fp),
#include "instrs.h"
#undef INST0
#undef INST1
#undef INST2
#undef INST3
#undef INST4
#undef INST5
};
// clang-format on
/*****************************************************************************
*
* Initialize the table used by emitInsModeFormat().
*/
// clang-format off
const BYTE emitter::emitInsModeFmtTab[] =
{
#define INST0(id, nm, fp, um, rf, wf, mr ) um,
#define INST1(id, nm, fp, um, rf, wf, mr ) um,
#define INST2(id, nm, fp, um, rf, wf, mr, mi ) um,
#define INST3(id, nm, fp, um, rf, wf, mr, mi, rm ) um,
#define INST4(id, nm, fp, um, rf, wf, mr, mi, rm, a4 ) um,
#define INST5(id, nm, fp, um, rf, wf, mr, mi, rm, a4, rr) um,
#include "instrs.h"
#undef INST0
#undef INST1
#undef INST2
#undef INST3
#undef INST4
#undef INST5
};
// clang-format on
#ifdef DEBUG
unsigned const emitter::emitInsModeFmtCnt = sizeof(emitInsModeFmtTab) / sizeof(emitInsModeFmtTab[0]);
#endif
/*****************************************************************************
*
* Combine the given base format with the update mode of the instuction.
*/
inline emitter::insFormat emitter::emitInsModeFormat(instruction ins, insFormat base)
{
assert(IF_RRD + IUM_RD == IF_RRD);
assert(IF_RRD + IUM_WR == IF_RWR);
assert(IF_RRD + IUM_RW == IF_RRW);
return (insFormat)(base + emitInsUpdateMode(ins));
}
/*****************************************************************************
*
* A version of scInsModeFormat() that handles X87 floating-point instructions.
*/
#if FEATURE_STACK_FP_X87
emitter::insFormat emitter::emitInsModeFormat(instruction ins, insFormat base, insFormat FPld, insFormat FPst)
{
if (CodeGen::instIsFP(ins))
{
assert(IF_TRD_SRD + 1 == IF_TWR_SRD);
assert(IF_TRD_SRD + 2 == IF_TRW_SRD);
assert(IF_TRD_MRD + 1 == IF_TWR_MRD);
assert(IF_TRD_MRD + 2 == IF_TRW_MRD);
assert(IF_TRD_ARD + 1 == IF_TWR_ARD);
assert(IF_TRD_ARD + 2 == IF_TRW_ARD);
switch (ins)
{
case INS_fst:
case INS_fstp:
case INS_fistp:
case INS_fistpl:
return (insFormat)(FPst);
case INS_fld:
case INS_fild:
return (insFormat)(FPld + 1);
case INS_fcomp:
case INS_fcompp:
case INS_fcomip:
return (insFormat)(FPld);
default:
return (insFormat)(FPld + 2);
}
}
else
{
return emitInsModeFormat(ins, base);
}
}
#endif // FEATURE_STACK_FP_X87
// This is a helper we need due to Vs Whidbey #254016 in order to distinguish
// if we can not possibly be updating an integer register. This is not the best
// solution, but the other ones (see bug) are going to be much more complicated.
// The issue here is that on legacy x86, the XMM registers use the same register numbers
// as the general purpose registers, so we need to distinguish them.
// We really only need this for x86 where this issue exists.
bool emitter::emitInsCanOnlyWriteSSE2OrAVXReg(instrDesc* id)
{
instruction ins = id->idIns();
// The following SSE2 instructions write to a general purpose integer register.
if (!IsSSEOrAVXInstruction(ins) || ins == INS_mov_xmm2i || ins == INS_cvttsd2si
#ifndef LEGACY_BACKEND
|| ins == INS_cvttss2si || ins == INS_cvtsd2si || ins == INS_cvtss2si
#endif // !LEGACY_BACKEND
)
{
return false;
}
return true;
}
/*****************************************************************************
*
* Returns the base encoding of the given CPU instruction.
*/
inline size_t insCode(instruction ins)
{
// clang-format off
const static
size_t insCodes[] =
{
#define INST0(id, nm, fp, um, rf, wf, mr ) mr,
#define INST1(id, nm, fp, um, rf, wf, mr ) mr,
#define INST2(id, nm, fp, um, rf, wf, mr, mi ) mr,
#define INST3(id, nm, fp, um, rf, wf, mr, mi, rm ) mr,
#define INST4(id, nm, fp, um, rf, wf, mr, mi, rm, a4 ) mr,
#define INST5(id, nm, fp, um, rf, wf, mr, mi, rm, a4, rr) mr,
#include "instrs.h"
#undef INST0
#undef INST1
#undef INST2
#undef INST3
#undef INST4
#undef INST5
};
// clang-format on
assert((unsigned)ins < sizeof(insCodes) / sizeof(insCodes[0]));
assert((insCodes[ins] != BAD_CODE));
return insCodes[ins];
}
/*****************************************************************************
*
* Returns the "[r/m], 32-bit icon" encoding of the given CPU instruction.
*/
inline size_t insCodeMI(instruction ins)
{
// clang-format off
const static
size_t insCodesMI[] =
{
#define INST0(id, nm, fp, um, rf, wf, mr )
#define INST1(id, nm, fp, um, rf, wf, mr )
#define INST2(id, nm, fp, um, rf, wf, mr, mi ) mi,
#define INST3(id, nm, fp, um, rf, wf, mr, mi, rm ) mi,
#define INST4(id, nm, fp, um, rf, wf, mr, mi, rm, a4 ) mi,
#define INST5(id, nm, fp, um, rf, wf, mr, mi, rm, a4, rr) mi,
#include "instrs.h"
#undef INST0
#undef INST1
#undef INST2
#undef INST3
#undef INST4
#undef INST5
};
// clang-format on
assert((unsigned)ins < sizeof(insCodesMI) / sizeof(insCodesMI[0]));
assert((insCodesMI[ins] != BAD_CODE));
return insCodesMI[ins];
}
/*****************************************************************************
*
* Returns the "reg, [r/m]" encoding of the given CPU instruction.
*/
inline size_t insCodeRM(instruction ins)
{
// clang-format off
const static
size_t insCodesRM[] =
{
#define INST0(id, nm, fp, um, rf, wf, mr )
#define INST1(id, nm, fp, um, rf, wf, mr )
#define INST2(id, nm, fp, um, rf, wf, mr, mi )
#define INST3(id, nm, fp, um, rf, wf, mr, mi, rm ) rm,
#define INST4(id, nm, fp, um, rf, wf, mr, mi, rm, a4 ) rm,
#define INST5(id, nm, fp, um, rf, wf, mr, mi, rm, a4, rr) rm,
#include "instrs.h"
#undef INST0
#undef INST1
#undef INST2
#undef INST3
#undef INST4
#undef INST5
};
// clang-format on
assert((unsigned)ins < sizeof(insCodesRM) / sizeof(insCodesRM[0]));
assert((insCodesRM[ins] != BAD_CODE));
return insCodesRM[ins];
}
/*****************************************************************************
*
* Returns the "AL/AX/EAX, imm" accumulator encoding of the given instruction.
*/
inline size_t insCodeACC(instruction ins)
{
// clang-format off
const static
size_t insCodesACC[] =
{
#define INST0(id, nm, fp, um, rf, wf, mr )
#define INST1(id, nm, fp, um, rf, wf, mr )
#define INST2(id, nm, fp, um, rf, wf, mr, mi )
#define INST3(id, nm, fp, um, rf, wf, mr, mi, rm )
#define INST4(id, nm, fp, um, rf, wf, mr, mi, rm, a4 ) a4,
#define INST5(id, nm, fp, um, rf, wf, mr, mi, rm, a4, rr) a4,
#include "instrs.h"
#undef INST0
#undef INST1
#undef INST2
#undef INST3
#undef INST4
#undef INST5
};
// clang-format on