Skip to content

Commit

Permalink
[X86] Combine concat(shufps,shufps) -> shufps(concat,concat)
Browse files Browse the repository at this point in the history
Now that rG18c19441d105 has improved VPERM2X128 handling, we can perform this to improve x64->x32 truncation without poor cross-lane issues.

Someday combineX86ShufflesRecursively will handle this, but we're still really bad at dealing with different vector widths.
  • Loading branch information
RKSimon committed Mar 21, 2020
1 parent 7a62ea3 commit 4ceade0
Show file tree
Hide file tree
Showing 13 changed files with 217 additions and 307 deletions.
18 changes: 18 additions & 0 deletions llvm/lib/Target/X86/X86ISelLowering.cpp
Expand Up @@ -46402,6 +46402,24 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
})) {
unsigned NumOps = Ops.size();
switch (Op0.getOpcode()) {
case X86ISD::SHUFP: {
// Add SHUFPD support if/when necessary.
if (!IsSplat && VT.getScalarType() == MVT::f32 &&
llvm::all_of(Ops, [Op0](SDValue Op) {
return Op.getOperand(2) == Op0.getOperand(2);
})) {
SmallVector<SDValue, 2> LHS, RHS;
for (unsigned i = 0; i != NumOps; ++i) {
LHS.push_back(Ops[i].getOperand(0));
RHS.push_back(Ops[i].getOperand(1));
}
return DAG.getNode(Op0.getOpcode(), DL, VT,
DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS),
DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, RHS),
Op0.getOperand(2));
}
break;
}
case X86ISD::PSHUFHW:
case X86ISD::PSHUFLW:
case X86ISD::PSHUFD:
Expand Down
18 changes: 6 additions & 12 deletions llvm/test/CodeGen/X86/masked_store_trunc.ll
Expand Up @@ -163,11 +163,9 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %p, <8 x i32> %mask
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3],ymm1[2,3]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm3[0,2],ymm0[4,6],ymm3[4,6]
; AVX1-NEXT: vmaskmovps %ymm0, %ymm2, (%rdi)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
Expand All @@ -178,11 +176,9 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %p, <8 x i32> %mask
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3],ymm1[2,3]
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm3[0,2],ymm0[4,6],ymm3[4,6]
; AVX2-NEXT: vpmaskmovd %ymm0, %ymm2, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
Expand Down Expand Up @@ -457,11 +453,9 @@ define void @truncstore_v8i64_v8i16(<8 x i64> %x, <8 x i16>* %p, <8 x i32> %mask
; AVX2-LABEL: truncstore_v8i64_v8i16:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,2]
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[2,3],ymm1[2,3]
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm4[0,2],ymm0[4,6],ymm4[4,6]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm1
Expand Down
36 changes: 17 additions & 19 deletions llvm/test/CodeGen/X86/masked_store_trunc_ssat.ll
Expand Up @@ -300,26 +300,26 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %p, <8 x i32> %mask
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [2147483647,2147483647]
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm9
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm6
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm4, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm4, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm4, %xmm5
; AVX1-NEXT: vblendvpd %xmm5, %xmm0, %xmm4, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [18446744071562067968,18446744071562067968]
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm0, %xmm10
; AVX1-NEXT: vblendvpd %xmm2, %xmm7, %xmm4, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm2, %xmm7
; AVX1-NEXT: vblendvpd %xmm6, %xmm1, %xmm4, %xmm1
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm1, %xmm6
; AVX1-NEXT: vblendvpd %xmm2, %xmm1, %xmm4, %xmm1
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm1, %xmm2
; AVX1-NEXT: vblendvpd %xmm7, %xmm6, %xmm4, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm7
; AVX1-NEXT: vblendvpd %xmm9, %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm3, %xmm4
; AVX1-NEXT: vblendvpd %xmm4, %xmm3, %xmm5, %xmm3
; AVX1-NEXT: vblendvpd %xmm6, %xmm1, %xmm5, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
; AVX1-NEXT: vblendvpd %xmm7, %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vblendvpd %xmm7, %xmm6, %xmm5, %xmm4
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
; AVX1-NEXT: vblendvpd %xmm2, %xmm1, %xmm5, %xmm1
; AVX1-NEXT: vblendvpd %xmm10, %xmm0, %xmm5, %xmm0
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm3[0,2],ymm0[4,6],ymm3[4,6]
; AVX1-NEXT: vmaskmovps %ymm0, %ymm8, (%rdi)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
Expand All @@ -331,20 +331,18 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %p, <8 x i32> %mask
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [2147483647,2147483647,2147483647,2147483647]
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm3, %ymm4
; AVX2-NEXT: vblendvpd %ymm4, %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm3, %ymm4
; AVX2-NEXT: vblendvpd %ymm4, %ymm0, %ymm3, %ymm0
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm3, %ymm4
; AVX2-NEXT: vblendvpd %ymm4, %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm0, %ymm4
; AVX2-NEXT: vblendvpd %ymm4, %ymm0, %ymm3, %ymm0
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm4
; AVX2-NEXT: vblendvpd %ymm4, %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm0, %ymm4
; AVX2-NEXT: vblendvpd %ymm4, %ymm0, %ymm3, %ymm0
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3],ymm1[2,3]
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm3[0,2],ymm0[4,6],ymm3[4,6]
; AVX2-NEXT: vpmaskmovd %ymm0, %ymm2, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
Expand Down
30 changes: 14 additions & 16 deletions llvm/test/CodeGen/X86/masked_store_trunc_usat.ll
Expand Up @@ -240,22 +240,22 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %p, <8 x i32> %mask
; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372041149743103,9223372041149743103]
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm9
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
; AVX1-NEXT: vpxor %xmm3, %xmm6, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm5, %xmm7
; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm6
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7
; AVX1-NEXT: vpxor %xmm3, %xmm7, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3
; AVX1-NEXT: vmovapd {{.*#+}} xmm5 = [4294967295,4294967295]
; AVX1-NEXT: vblendvpd %xmm3, %xmm4, %xmm5, %xmm3
; AVX1-NEXT: vblendvpd %xmm2, %xmm1, %xmm5, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
; AVX1-NEXT: vblendvpd %xmm7, %xmm6, %xmm5, %xmm2
; AVX1-NEXT: vblendvpd %xmm2, %xmm7, %xmm5, %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX1-NEXT: vblendvpd %xmm6, %xmm1, %xmm5, %xmm1
; AVX1-NEXT: vblendvpd %xmm9, %xmm0, %xmm5, %xmm0
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX1-NEXT: vmaskmovps %ymm0, %ymm8, (%rdi)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
Expand All @@ -268,18 +268,16 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %p, <8 x i32> %mask
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm3 = [4294967295,4294967295,4294967295,4294967295]
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm5
; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm5
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm6 = [9223372041149743103,9223372041149743103,9223372041149743103,9223372041149743103]
; AVX2-NEXT: vpcmpgtq %ymm5, %ymm6, %ymm5
; AVX2-NEXT: vblendvpd %ymm5, %ymm0, %ymm3, %ymm0
; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm4
; AVX2-NEXT: vblendvpd %ymm5, %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm4
; AVX2-NEXT: vpcmpgtq %ymm4, %ymm6, %ymm4
; AVX2-NEXT: vblendvpd %ymm4, %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
; AVX2-NEXT: vblendvpd %ymm4, %ymm0, %ymm3, %ymm0
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3],ymm1[2,3]
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm3[0,2],ymm0[4,6],ymm3[4,6]
; AVX2-NEXT: vpmaskmovd %ymm0, %ymm2, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
Expand Down
6 changes: 2 additions & 4 deletions llvm/test/CodeGen/X86/pr40891.ll
Expand Up @@ -8,11 +8,9 @@ define <8 x i32> @foo(<8 x i64> %x, <4 x i64> %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vandps %ymm2, %ymm0, %ymm0
; CHECK-NEXT: vandps {{\.LCPI.*}}, %ymm1, %ymm1
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2
; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; CHECK-NEXT: retl
%a = shufflevector <4 x i64> %y, <4 x i64> <i64 12345, i64 67890, i64 13579, i64 24680>, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%b = and <8 x i64> %x, %a
Expand Down
6 changes: 2 additions & 4 deletions llvm/test/CodeGen/X86/vector-reduce-and-bool.ll
Expand Up @@ -566,11 +566,9 @@ define i1 @trunc_v8i64_v8i1(<8 x i64>) {
;
; AVX2-LABEL: trunc_v8i64_v8i1:
; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpsllw $15, %xmm0, %xmm0
Expand Down
6 changes: 2 additions & 4 deletions llvm/test/CodeGen/X86/vector-reduce-or-bool.ll
Expand Up @@ -560,11 +560,9 @@ define i1 @trunc_v8i64_v8i1(<8 x i64>) {
;
; AVX2-LABEL: trunc_v8i64_v8i1:
; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpsllw $15, %xmm0, %xmm0
Expand Down
6 changes: 2 additions & 4 deletions llvm/test/CodeGen/X86/vector-reduce-xor-bool.ll
Expand Up @@ -611,11 +611,9 @@ define i1 @trunc_v8i64_v8i1(<8 x i64>) {
;
; AVX2-LABEL: trunc_v8i64_v8i1:
; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpsllw $15, %xmm0, %xmm0
Expand Down

0 comments on commit 4ceade0

Please sign in to comment.