@@ -3830,6 +3830,14 @@ void Assembler::evpermi2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int
3830
3830
emit_int16 (0x76 , (0xC0 | encode));
3831
3831
}
3832
3832
3833
+ void Assembler::evpermt2b (XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3834
+ assert (VM_Version::supports_avx512_vbmi (), " " );
3835
+ InstructionAttr attributes (vector_len, /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ true );
3836
+ attributes.set_is_evex_instruction ();
3837
+ int encode = vex_prefix_and_encode (dst->encoding (), nds->encoding (), src->encoding (), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3838
+ emit_int16 (0x7D , (0xC0 | encode));
3839
+ }
3840
+
3833
3841
void Assembler::pause () {
3834
3842
emit_int16 ((unsigned char )0xF3 , (unsigned char )0x90 );
3835
3843
}
@@ -4549,6 +4557,15 @@ void Assembler::vpmaddwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int
4549
4557
emit_int16 ((unsigned char )0xF5 , (0xC0 | encode));
4550
4558
}
4551
4559
4560
+ void Assembler::vpmaddubsw (XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
4561
+ assert (vector_len == AVX_128bit? VM_Version::supports_avx () :
4562
+ vector_len == AVX_256bit? VM_Version::supports_avx2 () :
4563
+ vector_len == AVX_512bit? VM_Version::supports_avx512bw () : 0 , " " );
4564
+ InstructionAttr attributes (vector_len, /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true , /* uses_vl */ true );
4565
+ int encode = simd_prefix_and_encode (dst, src1, src2, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4566
+ emit_int16 (0x04 , (0xC0 | encode));
4567
+ }
4568
+
4552
4569
void Assembler::evpdpwssd (XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4553
4570
assert (VM_Version::supports_evex (), " " );
4554
4571
assert (VM_Version::supports_avx512_vnni (), " must support vnni" );
@@ -4857,6 +4874,15 @@ void Assembler::vptest(XMMRegister dst, XMMRegister src, int vector_len) {
4857
4874
emit_int16 (0x17 , (0xC0 | encode));
4858
4875
}
4859
4876
4877
+ void Assembler::evptestmb (KRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4878
+ assert (VM_Version::supports_avx512vlbw (), " " );
4879
+ // Encoding: EVEX.NDS.XXX.66.0F.W0 DB /r
4880
+ InstructionAttr attributes (vector_len, /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ true );
4881
+ attributes.set_is_evex_instruction ();
4882
+ int encode = vex_prefix_and_encode (dst->encoding (), nds->encoding (), src->encoding (), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4883
+ emit_int16 ((unsigned char )0x26 , (0xC0 | encode));
4884
+ }
4885
+
4860
4886
void Assembler::punpcklbw (XMMRegister dst, Address src) {
4861
4887
NOT_LP64 (assert (VM_Version::supports_sse2 (), " " ));
4862
4888
assert ((UseAVX > 0 ), " SSE mode requires address alignment 16 bytes" );
@@ -9410,6 +9436,13 @@ void Assembler::shlxq(Register dst, Register src1, Register src2) {
9410
9436
emit_int16 ((unsigned char )0xF7 , (0xC0 | encode));
9411
9437
}
9412
9438
9439
+ void Assembler::shrxl (Register dst, Register src1, Register src2) {
9440
+ assert (VM_Version::supports_bmi2 (), " " );
9441
+ InstructionAttr attributes (AVX_128bit, /* vex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ true );
9442
+ int encode = vex_prefix_and_encode (dst->encoding (), src2->encoding (), src1->encoding (), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
9443
+ emit_int16 ((unsigned char )0xF7 , (0xC0 | encode));
9444
+ }
9445
+
9413
9446
void Assembler::shrxq (Register dst, Register src1, Register src2) {
9414
9447
assert (VM_Version::supports_bmi2 (), " " );
9415
9448
InstructionAttr attributes (AVX_128bit, /* vex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ true );
0 commit comments