Skip to content

Commit 6878be5

Browse files
committed
[X86][SSE] Attempt to merge single-op hops for slow targets.
For slow-hop targets, see if any single-op hops are duplicating work already done on another (dual-op) hop, which can sometimes occur as isHorizontalBinOp tries to find potential duplicates (but can't merge them itself). If so, reuse the other hop and shuffle the result.
1 parent 40d8e4d commit 6878be5

File tree

2 files changed

+59
-37
lines changed

2 files changed

+59
-37
lines changed

llvm/lib/Target/X86/X86ISelLowering.cpp

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43379,6 +43379,35 @@ static SDValue combineVectorHADDSUB(SDNode *N, SelectionDAG &DAG,
4337943379
X86ISD::HSUB == N->getOpcode() || X86ISD::FHSUB == N->getOpcode()) &&
4338043380
"Unexpected horizontal add/sub opcode");
4338143381

43382+
// For slow-hop targets, if we have a hop with a single op, see if we already
43383+
// have another user that we can reuse and shuffle the result.
43384+
if (!shouldUseHorizontalOp(true, DAG, Subtarget)) {
43385+
MVT VT = N->getSimpleValueType(0);
43386+
SDValue LHS = N->getOperand(0);
43387+
SDValue RHS = N->getOperand(1);
43388+
if (VT.is128BitVector() && LHS == RHS) {
43389+
for (SDNode *User : LHS->uses()) {
43390+
if (User != N && User->getOpcode() == N->getOpcode()) {
43391+
MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32;
43392+
if (User->getOperand(0) == LHS && !User->getOperand(1).isUndef()) {
43393+
return DAG.getBitcast(
43394+
VT,
43395+
DAG.getVectorShuffle(ShufVT, SDLoc(N),
43396+
DAG.getBitcast(ShufVT, SDValue(User, 0)),
43397+
DAG.getUNDEF(ShufVT), {0, 1, 0, 1}));
43398+
}
43399+
if (User->getOperand(1) == LHS && !User->getOperand(0).isUndef()) {
43400+
return DAG.getBitcast(
43401+
VT,
43402+
DAG.getVectorShuffle(ShufVT, SDLoc(N),
43403+
DAG.getBitcast(ShufVT, SDValue(User, 0)),
43404+
DAG.getUNDEF(ShufVT), {2, 3, 2, 3}));
43405+
}
43406+
}
43407+
}
43408+
}
43409+
}
43410+
4338243411
// Try to fold HOP(SHUFFLE(),SHUFFLE()) -> SHUFFLE(HOP()).
4338343412
if (SDValue V = combineHorizOpWithShuffle(N, DAG, Subtarget))
4338443413
return V;

llvm/test/CodeGen/X86/horizontal-sum.ll

Lines changed: 30 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -270,16 +270,13 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
270270
; AVX1-SLOW-NEXT: vaddps %xmm0, %xmm1, %xmm0
271271
; AVX1-SLOW-NEXT: vunpcklps {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
272272
; AVX1-SLOW-NEXT: vhaddps %xmm3, %xmm2, %xmm1
273-
; AVX1-SLOW-NEXT: vhaddps %xmm2, %xmm2, %xmm2
274-
; AVX1-SLOW-NEXT: vhaddps %xmm3, %xmm3, %xmm3
275-
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[1],xmm3[1],zero,zero
276-
; AVX1-SLOW-NEXT: vhaddps %xmm4, %xmm4, %xmm3
277-
; AVX1-SLOW-NEXT: vhaddps %xmm5, %xmm5, %xmm4
278-
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,1]
279-
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
280-
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1],xmm3[1,3]
281-
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[1]
282-
; AVX1-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1
273+
; AVX1-SLOW-NEXT: vhaddps %xmm4, %xmm4, %xmm2
274+
; AVX1-SLOW-NEXT: vhaddps %xmm5, %xmm5, %xmm3
275+
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm1[0,2],xmm2[0,1]
276+
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm3[0]
277+
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,1]
278+
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[1]
279+
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm4, %xmm1
283280
; AVX1-SLOW-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
284281
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
285282
; AVX1-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
@@ -423,15 +420,14 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
423420
; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm2
424421
; SSSE3-SLOW-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
425422
; SSSE3-SLOW-NEXT: phaddd %xmm3, %xmm8
426-
; SSSE3-SLOW-NEXT: movdqa %xmm5, %xmm1
427423
; SSSE3-SLOW-NEXT: phaddd %xmm4, %xmm5
428-
; SSSE3-SLOW-NEXT: phaddd %xmm4, %xmm4
429-
; SSSE3-SLOW-NEXT: phaddd %xmm1, %xmm1
430-
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm4[1,1]
431-
; SSSE3-SLOW-NEXT: movdqa %xmm8, %xmm2
432-
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[2,0]
433-
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,3],xmm1[2,0]
434-
; SSSE3-SLOW-NEXT: paddd %xmm2, %xmm8
424+
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
425+
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm2 = xmm5[0,1,0,1]
426+
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1]
427+
; SSSE3-SLOW-NEXT: movdqa %xmm8, %xmm1
428+
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[2,0]
429+
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,3],xmm2[2,0]
430+
; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm8
435431
; SSSE3-SLOW-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm8[0]
436432
; SSSE3-SLOW-NEXT: phaddd %xmm6, %xmm6
437433
; SSSE3-SLOW-NEXT: phaddd %xmm7, %xmm7
@@ -470,30 +466,27 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
470466
; AVX1-SLOW-NEXT: vphaddd %xmm1, %xmm1, %xmm1
471467
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
472468
; AVX1-SLOW-NEXT: vpaddd %xmm0, %xmm1, %xmm0
473-
; AVX1-SLOW-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
469+
; AVX1-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
474470
; AVX1-SLOW-NEXT: vphaddd %xmm3, %xmm2, %xmm1
475-
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
476-
; AVX1-SLOW-NEXT: vphaddd %xmm2, %xmm2, %xmm2
477-
; AVX1-SLOW-NEXT: vphaddd %xmm3, %xmm3, %xmm3
478-
; AVX1-SLOW-NEXT: vphaddd %xmm4, %xmm4, %xmm4
479-
; AVX1-SLOW-NEXT: vphaddd %xmm5, %xmm5, %xmm5
480-
; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
481-
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm5[0,0,0,0]
482-
; AVX1-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
483-
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
484-
; AVX1-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5,6,7]
485-
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm4[1],zero
486-
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm5[1]
487-
; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
488-
; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm8[0],xmm0[0]
489-
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
490-
; AVX1-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
471+
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,2,1,3]
472+
; AVX1-SLOW-NEXT: vphaddd %xmm4, %xmm4, %xmm3
473+
; AVX1-SLOW-NEXT: vphaddd %xmm5, %xmm5, %xmm4
474+
; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
475+
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[0,0,0,0]
476+
; AVX1-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5],xmm5[6,7]
477+
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,3,1,1]
478+
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[1],zero
479+
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[1]
480+
; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm2, %xmm1
481+
; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
482+
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
483+
; AVX1-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
491484
; AVX1-SLOW-NEXT: vphaddd %xmm6, %xmm6, %xmm2
492485
; AVX1-SLOW-NEXT: vphaddd %xmm7, %xmm7, %xmm3
493486
; AVX1-SLOW-NEXT: vphaddd %xmm3, %xmm2, %xmm2
494487
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
495-
; AVX1-SLOW-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
496-
; AVX1-SLOW-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[2]
488+
; AVX1-SLOW-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
489+
; AVX1-SLOW-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[2]
497490
; AVX1-SLOW-NEXT: retq
498491
;
499492
; AVX1-FAST-LABEL: pair_sum_v8i32_v4i32:

0 commit comments

Comments
 (0)