@@ -3829,6 +3829,14 @@ void Assembler::evpermi2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int
3829
3829
emit_int16 (0x76 , (0xC0 | encode));
3830
3830
}
3831
3831
3832
+ void Assembler::evpermt2b (XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3833
+ assert (VM_Version::supports_avx512_vbmi (), " " );
3834
+ InstructionAttr attributes (vector_len, /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ true );
3835
+ attributes.set_is_evex_instruction ();
3836
+ int encode = vex_prefix_and_encode (dst->encoding (), nds->encoding (), src->encoding (), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3837
+ emit_int16 (0x7D , (0xC0 | encode));
3838
+ }
3839
+
3832
3840
void Assembler::pause () {
3833
3841
emit_int16 ((unsigned char )0xF3 , (unsigned char )0x90 );
3834
3842
}
@@ -4548,6 +4556,15 @@ void Assembler::vpmaddwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int
4548
4556
emit_int16 ((unsigned char )0xF5 , (0xC0 | encode));
4549
4557
}
4550
4558
4559
+ void Assembler::vpmaddubsw (XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
4560
+ assert (vector_len == AVX_128bit? VM_Version::supports_avx () :
4561
+ vector_len == AVX_256bit? VM_Version::supports_avx2 () :
4562
+ vector_len == AVX_512bit? VM_Version::supports_avx512bw () : 0 , " " );
4563
+ InstructionAttr attributes (vector_len, /* rex_w */ false , /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true , /* uses_vl */ true );
4564
+ int encode = simd_prefix_and_encode (dst, src1, src2, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4565
+ emit_int16 (0x04 , (0xC0 | encode));
4566
+ }
4567
+
4551
4568
void Assembler::evpdpwssd (XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4552
4569
assert (VM_Version::supports_evex (), " " );
4553
4570
assert (VM_Version::supports_avx512_vnni (), " must support vnni" );
@@ -4856,6 +4873,15 @@ void Assembler::vptest(XMMRegister dst, XMMRegister src, int vector_len) {
4856
4873
emit_int16 (0x17 , (0xC0 | encode));
4857
4874
}
4858
4875
4876
+ void Assembler::evptestmb (KRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4877
+ assert (VM_Version::supports_avx512vlbw (), " " );
4878
+ // Encoding: EVEX.NDS.XXX.66.0F.W0 DB /r
4879
+ InstructionAttr attributes (vector_len, /* vex_w */ false , /* legacy_mode */ false , /* no_mask_reg */ true , /* uses_vl */ true );
4880
+ attributes.set_is_evex_instruction ();
4881
+ int encode = vex_prefix_and_encode (dst->encoding (), nds->encoding (), src->encoding (), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4882
+ emit_int16 ((unsigned char )0x26 , (0xC0 | encode));
4883
+ }
4884
+
4859
4885
void Assembler::punpcklbw (XMMRegister dst, Address src) {
4860
4886
NOT_LP64 (assert (VM_Version::supports_sse2 (), " " ));
4861
4887
assert ((UseAVX > 0 ), " SSE mode requires address alignment 16 bytes" );
@@ -9403,6 +9429,13 @@ void Assembler::shlxq(Register dst, Register src1, Register src2) {
9403
9429
emit_int16 ((unsigned char )0xF7 , (0xC0 | encode));
9404
9430
}
9405
9431
9432
+ void Assembler::shrxl (Register dst, Register src1, Register src2) {
9433
+ assert (VM_Version::supports_bmi2 (), " " );
9434
+ InstructionAttr attributes (AVX_128bit, /* vex_w */ false , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ true );
9435
+ int encode = vex_prefix_and_encode (dst->encoding (), src2->encoding (), src1->encoding (), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
9436
+ emit_int16 ((unsigned char )0xF7 , (0xC0 | encode));
9437
+ }
9438
+
9406
9439
void Assembler::shrxq (Register dst, Register src1, Register src2) {
9407
9440
assert (VM_Version::supports_bmi2 (), " " );
9408
9441
InstructionAttr attributes (AVX_128bit, /* vex_w */ true , /* legacy_mode */ true , /* no_mask_reg */ true , /* uses_vl */ true );
0 commit comments