-
Notifications
You must be signed in to change notification settings - Fork 15.1k
[X86] Improve variable 8-bit shifts on AVX512BW #164136
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
The existing implementation used three shifts by an immediate followed by selects. This commit changes the implementation to use two variable 16 bit shifts instead.
|
@llvm/pr-subscribers-backend-x86 Author: Sp00ph (Sp00ph) ChangesPreviously, .LCPI0_2:
.byte 8
.byte 4
.byte 2
.byte 1
.byte 0
.byte 0
.byte 0
.byte 0
.LCPI0_3:
.byte 32
.byte 16
.byte 8
.byte 4
.byte 2
.byte 1
.byte 0
.byte 0
shl:
vpsllw zmm1, zmm1, 5
vpmovb2m k1, zmm1
vpaddb zmm1, zmm1, zmm1
vgf2p8affineqb zmm0 {k1}, zmm0, qword ptr [rip + .LCPI0_2]{1to8}, 0
vpmovb2m k1, zmm1
vpaddb zmm1, zmm1, zmm1
vgf2p8affineqb zmm0 {k1}, zmm0, qword ptr [rip + .LCPI0_3]{1to8}, 0
vpmovb2m k1, zmm1
vpaddb zmm0 {k1}, zmm0, zmm0
ret
.LCPI1_3:
.byte 0
.byte 0
.byte 0
.byte 0
.byte 128
.byte 64
.byte 32
.byte 16
.LCPI1_4:
.byte 0
.byte 0
.byte 128
.byte 64
.byte 32
.byte 16
.byte 8
.byte 4
.LCPI1_5:
.byte 0
.byte 128
.byte 64
.byte 32
.byte 16
.byte 8
.byte 4
.byte 2
lshr:
vpsllw zmm1, zmm1, 5
vpmovb2m k1, zmm1
vpaddb zmm1, zmm1, zmm1
vgf2p8affineqb zmm0 {k1}, zmm0, qword ptr [rip + .LCPI1_3]{1to8}, 0
vpmovb2m k1, zmm1
vpaddb zmm1, zmm1, zmm1
vgf2p8affineqb zmm0 {k1}, zmm0, qword ptr [rip + .LCPI1_4]{1to8}, 0
vpmovb2m k1, zmm1
vgf2p8affineqb zmm0 {k1}, zmm0, qword ptr [rip + .LCPI1_5]{1to8}, 0
ret
ashr:
vpsllw zmm1, zmm1, 5
vpunpckhbw zmm2, zmm0, zmm0
vpunpckhbw zmm4, zmm1, zmm1
vpsraw zmm3, zmm2, 4
vpunpcklbw zmm0, zmm0, zmm0
vpmovb2m k1, zmm4
vpaddw zmm4, zmm4, zmm4
vpunpcklbw zmm1, zmm1, zmm1
vmovdqu8 zmm2 {k1}, zmm3
vpmovb2m k1, zmm4
vpsraw zmm3, zmm2, 2
vpaddw zmm4, zmm4, zmm4
vmovdqu8 zmm2 {k1}, zmm3
vpsraw zmm3, zmm2, 1
vpmovb2m k1, zmm4
vmovdqu8 zmm2 {k1}, zmm3
vpmovb2m k1, zmm1
vpsraw zmm3, zmm0, 4
vpaddw zmm1, zmm1, zmm1
vpsrlw zmm2, zmm2, 8
vmovdqu8 zmm0 {k1}, zmm3
vpmovb2m k1, zmm1
vpsraw zmm3, zmm0, 2
vpaddw zmm1, zmm1, zmm1
vmovdqu8 zmm0 {k1}, zmm3
vpsraw zmm3, zmm0, 1
vpmovb2m k1, zmm1
vmovdqu8 zmm0 {k1}, zmm3
vpsrlw zmm0, zmm0, 8
vpackuswb zmm0, zmm0, zmm2
retWith this commit, the generated assembly becomes this: .LCPI0_2:
.byte 0
.byte 255
.byte 0
.byte 255
.LCPI0_3:
.byte 255
.byte 0
.byte 255
.byte 0
shl:
vpsrlw zmm2, zmm1, 8
vpandd zmm3, zmm0, dword ptr [rip + .LCPI0_2]{1to16}
vpandd zmm1, zmm1, dword ptr [rip + .LCPI0_3]{1to16}
movabs rax, -6148914691236517206
kmovq k1, rax
vpsllvw zmm2, zmm3, zmm2
vpsllvw zmm0, zmm0, zmm1
vmovdqu8 zmm0 {k1}, zmm2
ret
.LCPI1_0:
.byte 255
.byte 0
lshr:
vpbroadcastw zmm2, word ptr [rip + .LCPI1_0]
movabs rax, -6148914691236517206
kmovq k1, rax
vpandq zmm3, zmm1, zmm2
vpandq zmm2, zmm0, zmm2
vpsrlw zmm1, zmm1, 8
vpsrlvw zmm2, zmm2, zmm3
vpsrlvw zmm0, zmm0, zmm1
vmovdqu8 zmm2 {k1}, zmm0
vmovdqa64 zmm0, zmm2
ret
.LCPI2_1:
.byte 255
.byte 0
.byte 255
.byte 0
ashr:
vpsrlw zmm2, zmm1, 8
vpandd zmm1, zmm1, dword ptr [rip + .LCPI2_1]{1to16}
movabs rax, -6148914691236517206
vpsravw zmm2, zmm0, zmm2
vpsllw zmm0, zmm0, 8
kmovq k1, rax
vpsraw zmm0, zmm0, 8
vpsravw zmm0, zmm0, zmm1
vmovdqu8 zmm0 {k1}, zmm2
retWhile I don't have AVX512 hardware, llvm-mca suggests significant speedups, and I've done some simple correctness tests on random inputs using the Intel Software Development Emulator. I've also adjusted the affected codegen tests. I am however unsure about the test cases in Full diff: https://github.com/llvm/llvm-project/pull/164136.diff 5 Files Affected:
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index b05d7c7fd7da3..e829456a83c77 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -30968,6 +30968,76 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR);
}
+ if (VT == MVT::v64i8 && Subtarget.canExtendTo512BW()) {
+ // On AVX512BW, we can use variable 16-bit shifts to implement variable
+ // 8-bit shifts. For this, we split the input into two vectors, RLo and RHi.
+ // The i-th lane of RLo contains the (2*i)-th lane of R, and the i-th lane
+ // of RHi contains the (2*i+1)-th lane of R. After shifting, these vectors
+ // can efficiently be merged together using a masked move.
+ MVT ExtVT = MVT::v32i16;
+
+ // When used in a vectorshuffle, selects even-index lanes from the first
+ // vector and odd index lanes from the second vector.
+ SmallVector<int, 64> InterleaveIndices;
+ for (unsigned i = 0; i < 64; ++i) {
+ unsigned offset = (i % 2 == 0) ? 0 : 64;
+ InterleaveIndices.push_back(i + offset);
+ }
+
+ SDValue zero = DAG.getConstant(0, dl, VT);
+ SDValue eight = DAG.getTargetConstant(8, dl, MVT::i8);
+ SDValue RLo, RHi;
+
+ // Isolate lower and upper lanes of Amt by shuffling zeros into AmtLo and
+ // right shifting AmtHi.
+ SDValue AmtLo = DAG.getBitcast(
+ ExtVT, DAG.getVectorShuffle(VT, dl, Amt, zero, InterleaveIndices));
+ SDValue AmtHi = DAG.getNode(X86ISD::VSRLI, dl, ExtVT,
+ DAG.getBitcast(ExtVT, Amt), eight);
+ unsigned int ShiftOp;
+ switch (Opc) {
+ case ISD::SHL:
+ // Because we shift left, no bits from the high half can influence the low
+ // half, so we don't need to mask RLo. We do however need to mask RHi, to
+ // prevent high bits of an even lane overflowing into low bits of an odd
+ // lane.
+ RLo = DAG.getBitcast(ExtVT, R);
+ RHi = DAG.getBitcast(
+ ExtVT, DAG.getVectorShuffle(VT, dl, zero, R, InterleaveIndices));
+ ShiftOp = X86ISD::VSHLV;
+ break;
+ case ISD::SRL:
+ // Same idea as above, but this time we need to make sure no low bits of
+ // an odd lane can overflow into high bits of an even lane.
+ RLo = DAG.getBitcast(
+ ExtVT, DAG.getVectorShuffle(VT, dl, R, zero, InterleaveIndices));
+ RHi = DAG.getBitcast(ExtVT, R);
+ ShiftOp = X86ISD::VSRLV;
+ break;
+ case ISD::SRA:
+ // For arithmetic right shifts, we want to sign extend each even lane of R
+ // such that the upper half of the corresponding lane of RLo is 0 or -1
+ // depending on the sign bit of the original lane. We do this using 2
+ // immediate shifts.
+ RHi = DAG.getBitcast(ExtVT, R);
+ RLo = DAG.getNode(X86ISD::VSHLI, dl, ExtVT, RHi, eight);
+ RLo = DAG.getNode(X86ISD::VSRAI, dl, ExtVT, RLo, eight);
+ ShiftOp = X86ISD::VSRAV;
+ break;
+ default:
+ llvm_unreachable("Unexpected Shift Op");
+ return SDValue();
+ }
+
+ SDValue ShiftedLo =
+ DAG.getBitcast(VT, DAG.getNode(ShiftOp, dl, ExtVT, RLo, AmtLo));
+ SDValue ShiftedHi =
+ DAG.getBitcast(VT, DAG.getNode(ShiftOp, dl, ExtVT, RHi, AmtHi));
+
+ return DAG.getVectorShuffle(VT, dl, ShiftedLo, ShiftedHi,
+ InterleaveIndices);
+ }
+
if (VT == MVT::v16i8 ||
(VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
(VT == MVT::v64i8 && Subtarget.hasBWI())) {
diff --git a/llvm/test/CodeGen/X86/gfni-shifts.ll b/llvm/test/CodeGen/X86/gfni-shifts.ll
index feac3dcad243a..abcf7ce6aa098 100644
--- a/llvm/test/CodeGen/X86/gfni-shifts.ll
+++ b/llvm/test/CodeGen/X86/gfni-shifts.ll
@@ -1684,15 +1684,14 @@ define <64 x i8> @var_shl_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
;
; GFNIAVX512BW-LABEL: var_shl_v64i8:
; GFNIAVX512BW: # %bb.0:
-; GFNIAVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
-; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; GFNIAVX512BW-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 {%k1}
-; GFNIAVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
-; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; GFNIAVX512BW-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 {%k1}
-; GFNIAVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
-; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; GFNIAVX512BW-NEXT: vpaddb %zmm0, %zmm0, %zmm0 {%k1}
+; GFNIAVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm2
+; GFNIAVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm3
+; GFNIAVX512BW-NEXT: vpsllvw %zmm2, %zmm3, %zmm2
+; GFNIAVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm1
+; GFNIAVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT: movabsq $-6148914691236517206, %rax # imm = 0xAAAAAAAAAAAAAAAA
+; GFNIAVX512BW-NEXT: kmovq %rax, %k1
+; GFNIAVX512BW-NEXT: vmovdqu8 %zmm2, %zmm0 {%k1}
; GFNIAVX512BW-NEXT: retq
%shift = shl <64 x i8> %a, %b
ret <64 x i8> %shift
@@ -1876,15 +1875,16 @@ define <64 x i8> @var_lshr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
;
; GFNIAVX512BW-LABEL: var_lshr_v64i8:
; GFNIAVX512BW: # %bb.0:
-; GFNIAVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
-; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; GFNIAVX512BW-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 {%k1}
-; GFNIAVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
-; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; GFNIAVX512BW-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 {%k1}
-; GFNIAVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
-; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; GFNIAVX512BW-NEXT: vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 {%k1}
+; GFNIAVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; GFNIAVX512BW-NEXT: vpandq %zmm2, %zmm1, %zmm3
+; GFNIAVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm2
+; GFNIAVX512BW-NEXT: vpsrlvw %zmm3, %zmm2, %zmm2
+; GFNIAVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
+; GFNIAVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT: movabsq $-6148914691236517206, %rax # imm = 0xAAAAAAAAAAAAAAAA
+; GFNIAVX512BW-NEXT: kmovq %rax, %k1
+; GFNIAVX512BW-NEXT: vmovdqu8 %zmm0, %zmm2 {%k1}
+; GFNIAVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
; GFNIAVX512BW-NEXT: retq
%shift = lshr <64 x i8> %a, %b
ret <64 x i8> %shift
@@ -2232,36 +2232,15 @@ define <64 x i8> @var_ashr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
;
; GFNIAVX512BW-LABEL: var_ashr_v64i8:
; GFNIAVX512BW: # %bb.0:
-; GFNIAVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; GFNIAVX512BW-NEXT: vpsraw $4, %zmm2, %zmm3
-; GFNIAVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
-; GFNIAVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; GFNIAVX512BW-NEXT: vpmovb2m %zmm4, %k1
-; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
-; GFNIAVX512BW-NEXT: vpsraw $2, %zmm2, %zmm3
-; GFNIAVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm5
-; GFNIAVX512BW-NEXT: vpmovb2m %zmm5, %k1
-; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
-; GFNIAVX512BW-NEXT: vpsraw $1, %zmm2, %zmm3
-; GFNIAVX512BW-NEXT: vpsllw $2, %zmm4, %zmm4
-; GFNIAVX512BW-NEXT: vpmovb2m %zmm4, %k1
-; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
-; GFNIAVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
-; GFNIAVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; GFNIAVX512BW-NEXT: vpsraw $4, %zmm0, %zmm3
-; GFNIAVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
-; GFNIAVX512BW-NEXT: vpsraw $2, %zmm0, %zmm3
-; GFNIAVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm4
-; GFNIAVX512BW-NEXT: vpmovb2m %zmm4, %k1
-; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
-; GFNIAVX512BW-NEXT: vpsraw $1, %zmm0, %zmm3
-; GFNIAVX512BW-NEXT: vpsllw $2, %zmm1, %zmm1
-; GFNIAVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; GFNIAVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
-; GFNIAVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
-; GFNIAVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm2
+; GFNIAVX512BW-NEXT: vpsravw %zmm2, %zmm0, %zmm2
+; GFNIAVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm1
+; GFNIAVX512BW-NEXT: vpsllw $8, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT: vpsraw $8, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
+; GFNIAVX512BW-NEXT: movabsq $-6148914691236517206, %rax # imm = 0xAAAAAAAAAAAAAAAA
+; GFNIAVX512BW-NEXT: kmovq %rax, %k1
+; GFNIAVX512BW-NEXT: vmovdqu8 %zmm2, %zmm0 {%k1}
; GFNIAVX512BW-NEXT: retq
%shift = ashr <64 x i8> %a, %b
ret <64 x i8> %shift
diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll
index 0fb0420bb2609..d4bb8835c5d9e 100644
--- a/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-ashr-512.ll
@@ -106,36 +106,15 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
;
; AVX512BW-LABEL: var_shift_v64i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BW-NEXT: vpsraw $4, %zmm2, %zmm3
-; AVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
-; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
-; AVX512BW-NEXT: vpmovb2m %zmm4, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
-; AVX512BW-NEXT: vpsraw $2, %zmm2, %zmm3
-; AVX512BW-NEXT: vpaddw %zmm4, %zmm4, %zmm5
-; AVX512BW-NEXT: vpmovb2m %zmm5, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
-; AVX512BW-NEXT: vpsraw $1, %zmm2, %zmm3
-; AVX512BW-NEXT: vpsllw $2, %zmm4, %zmm4
-; AVX512BW-NEXT: vpmovb2m %zmm4, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm2 {%k1}
-; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BW-NEXT: vpsraw $4, %zmm0, %zmm3
-; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
-; AVX512BW-NEXT: vpsraw $2, %zmm0, %zmm3
-; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm4
-; AVX512BW-NEXT: vpmovb2m %zmm4, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
-; AVX512BW-NEXT: vpsraw $1, %zmm0, %zmm3
-; AVX512BW-NEXT: vpsllw $2, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm3, %zmm0 {%k1}
-; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
-; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm2
+; AVX512BW-NEXT: vpsravw %zmm2, %zmm0, %zmm2
+; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm1
+; AVX512BW-NEXT: vpsllw $8, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsraw $8, %zmm0, %zmm0
+; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: movabsq $-6148914691236517206, %rax # imm = 0xAAAAAAAAAAAAAAAA
+; AVX512BW-NEXT: kmovq %rax, %k1
+; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm0 {%k1}
; AVX512BW-NEXT: retq
%shift = ashr <64 x i8> %a, %b
ret <64 x i8> %shift
diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-512.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-512.ll
index 103d5702fb93a..38ac0f8ea6f8a 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lshr-512.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lshr-512.ll
@@ -85,21 +85,16 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
;
; AVX512BW-LABEL: var_shift_v64i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm2
-; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2
-; AVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm0 {%k1}
-; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm2
-; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2
-; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm0 {%k1}
-; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm2
-; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2
-; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm0 {%k1}
+; AVX512BW-NEXT: vpbroadcastw {{.*#+}} zmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX512BW-NEXT: vpandq %zmm2, %zmm1, %zmm3
+; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm2
+; AVX512BW-NEXT: vpsrlvw %zmm3, %zmm2, %zmm2
+; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
+; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: movabsq $-6148914691236517206, %rax # imm = 0xAAAAAAAAAAAAAAAA
+; AVX512BW-NEXT: kmovq %rax, %k1
+; AVX512BW-NEXT: vmovdqu8 %zmm0, %zmm2 {%k1}
+; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
; AVX512BW-NEXT: retq
%shift = lshr <64 x i8> %a, %b
ret <64 x i8> %shift
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-512.ll b/llvm/test/CodeGen/X86/vector-shift-shl-512.ll
index efd742956ed09..1fca3a2682b21 100644
--- a/llvm/test/CodeGen/X86/vector-shift-shl-512.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-shl-512.ll
@@ -82,19 +82,14 @@ define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
;
; AVX512BW-LABEL: var_shift_v64i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm2
-; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2
-; AVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm0 {%k1}
-; AVX512BW-NEXT: vpsllw $2, %zmm0, %zmm2
-; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2
-; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
+; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm2
+; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm3
+; AVX512BW-NEXT: vpsllvw %zmm2, %zmm3, %zmm2
+; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm1
+; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: movabsq $-6148914691236517206, %rax # imm = 0xAAAAAAAAAAAAAAAA
+; AVX512BW-NEXT: kmovq %rax, %k1
; AVX512BW-NEXT: vmovdqu8 %zmm2, %zmm0 {%k1}
-; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm1
-; AVX512BW-NEXT: vpmovb2m %zmm1, %k1
-; AVX512BW-NEXT: vpaddb %zmm0, %zmm0, %zmm0 {%k1}
; AVX512BW-NEXT: retq
%shift = shl <64 x i8> %a, %b
ret <64 x i8> %shift
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
thanks for this - a few comments
|
Ping 🙂 |
|
Ping |
| break; | ||
| default: | ||
| llvm_unreachable("Unexpected Shift Op"); | ||
| return SDValue(); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
(unnecessary) - drop the return
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM - cheers
Previously,
clang -march=znver5 -O3would emit the following forshl,lshrandashr <64 x i8>:With this commit, the generated assembly becomes this:
While I don't have AVX512 hardware, llvm-mca suggests significant speedups, and I've done some simple correctness tests on random inputs using the Intel Software Development Emulator.
I've also adjusted the affected codegen tests. I am however unsure about the test cases in
gfni-shifts.ll, which now obviously don't use gfni anymore. Is it fine to just remove them?