198 changes: 91 additions & 107 deletions llvm/test/CodeGen/X86/horizontal-sum.ll
Original file line number Diff line number Diff line change
Expand Up @@ -98,11 +98,9 @@ define <4 x i32> @pair_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm0
; SSSE3-SLOW-NEXT: phaddd %xmm2, %xmm3
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,1,0,1]
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1]
; SSSE3-SLOW-NEXT: paddd %xmm3, %xmm2
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0]
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
; SSSE3-SLOW-NEXT: paddd %xmm3, %xmm1
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; SSSE3-SLOW-NEXT: retq
;
; SSSE3-FAST-LABEL: pair_sum_v4i32_v4i32:
Expand Down Expand Up @@ -138,9 +136,7 @@ define <4 x i32> @pair_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
; AVX2-SLOW-LABEL: pair_sum_v4i32_v4i32:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX2-SLOW-NEXT: vphaddd %xmm2, %xmm2, %xmm1
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
Expand Down Expand Up @@ -187,50 +183,43 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm0[1,3]
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSSE3-SLOW-NEXT: addps %xmm1, %xmm0
; SSSE3-SLOW-NEXT: movaps %xmm3, %xmm1
; SSSE3-SLOW-NEXT: haddps %xmm2, %xmm1
; SSSE3-SLOW-NEXT: haddps %xmm4, %xmm5
; SSSE3-SLOW-NEXT: haddps %xmm3, %xmm2
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[2,0]
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm5[3,1]
; SSSE3-SLOW-NEXT: addps %xmm2, %xmm1
; SSSE3-SLOW-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSSE3-SLOW-NEXT: movaps %xmm5, %xmm1
; SSSE3-SLOW-NEXT: haddps %xmm4, %xmm1
; SSSE3-SLOW-NEXT: haddps %xmm1, %xmm2
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1,3,2]
; SSSE3-SLOW-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSSE3-SLOW-NEXT: haddps %xmm7, %xmm6
; SSSE3-SLOW-NEXT: haddps %xmm6, %xmm6
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,3],xmm6[0,3]
; SSSE3-SLOW-NEXT: haddps %xmm5, %xmm4
; SSSE3-SLOW-NEXT: haddps %xmm6, %xmm4
; SSSE3-SLOW-NEXT: movaps %xmm4, %xmm1
; SSSE3-SLOW-NEXT: retq
;
; SSSE3-FAST-LABEL: pair_sum_v8f32_v4f32:
; SSSE3-FAST: # %bb.0:
; SSSE3-FAST-NEXT: haddps %xmm1, %xmm0
; SSSE3-FAST-NEXT: haddps %xmm0, %xmm0
; SSSE3-FAST-NEXT: movaps %xmm3, %xmm1
; SSSE3-FAST-NEXT: haddps %xmm2, %xmm1
; SSSE3-FAST-NEXT: haddps %xmm4, %xmm5
; SSSE3-FAST-NEXT: haddps %xmm3, %xmm2
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[2,0]
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm5[3,1]
; SSSE3-FAST-NEXT: addps %xmm2, %xmm1
; SSSE3-FAST-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSSE3-FAST-NEXT: haddps %xmm6, %xmm6
; SSSE3-FAST-NEXT: haddps %xmm7, %xmm7
; SSSE3-FAST-NEXT: haddps %xmm5, %xmm4
; SSSE3-FAST-NEXT: haddps %xmm4, %xmm2
; SSSE3-FAST-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSSE3-FAST-NEXT: haddps %xmm7, %xmm6
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,3],xmm6[0,2]
; SSSE3-FAST-NEXT: haddps %xmm6, %xmm4
; SSSE3-FAST-NEXT: movaps %xmm4, %xmm1
; SSSE3-FAST-NEXT: retq
;
; AVX1-SLOW-LABEL: pair_sum_v8f32_v4f32:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm0[1,3],xmm1[0,1]
; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,3,1,3]
; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vhaddps %xmm4, %xmm4, %xmm1
; AVX1-SLOW-NEXT: vhaddps %xmm5, %xmm5, %xmm4
; AVX1-SLOW-NEXT: vhaddps %xmm3, %xmm2, %xmm2
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm1[0,1]
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,1]
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[1]
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm3, %xmm1
; AVX1-SLOW-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
Expand All @@ -245,32 +234,28 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
; AVX1-FAST-LABEL: pair_sum_v8f32_v4f32:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm8
; AVX1-FAST-NEXT: vhaddps %xmm2, %xmm2, %xmm1
; AVX1-FAST-NEXT: vhaddps %xmm3, %xmm3, %xmm0
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[1],xmm0[1],zero,zero
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: vhaddps %xmm4, %xmm4, %xmm1
; AVX1-FAST-NEXT: vhaddps %xmm5, %xmm5, %xmm4
; AVX1-FAST-NEXT: vhaddps %xmm3, %xmm2, %xmm2
; AVX1-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,1]
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[0]
; AVX1-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,3]
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[1]
; AVX1-FAST-NEXT: vaddps %xmm0, %xmm2, %xmm0
; AVX1-FAST-NEXT: vmovlhps {{.*#+}} xmm1 = xmm8[0],xmm0[0]
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-FAST-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm1[0,1]
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
; AVX1-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[1]
; AVX1-FAST-NEXT: vaddps %xmm1, %xmm3, %xmm1
; AVX1-FAST-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX1-FAST-NEXT: vhaddps %xmm7, %xmm6, %xmm2
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm2, %xmm2
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-FAST-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[2]
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-FAST-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[2]
; AVX1-FAST-NEXT: retq
;
; AVX2-SLOW-LABEL: pair_sum_v8f32_v4f32:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX2-SLOW-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm0[1,3],xmm1[0,1]
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,3,1,3]
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX2-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX2-SLOW-NEXT: vhaddps %xmm4, %xmm4, %xmm1
Expand Down Expand Up @@ -365,37 +350,24 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,1,3]
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm0
; SSSE3-SLOW-NEXT: phaddd %xmm4, %xmm5
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,1,0,1]
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[1,1]
; SSSE3-SLOW-NEXT: phaddd %xmm3, %xmm2
; SSSE3-SLOW-NEXT: movdqa %xmm2, %xmm1
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[2,0]
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm4[2,0]
; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm2
; SSSE3-SLOW-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSSE3-SLOW-NEXT: phaddd %xmm4, %xmm5
; SSSE3-SLOW-NEXT: phaddd %xmm5, %xmm2
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,1,3,2]
; SSSE3-SLOW-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSSE3-SLOW-NEXT: phaddd %xmm7, %xmm6
; SSSE3-SLOW-NEXT: phaddd %xmm6, %xmm6
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm6[0,1,1,1]
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,3],xmm1[0,2]
; SSSE3-SLOW-NEXT: movaps %xmm2, %xmm1
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm2 = xmm6[0,1,1,1]
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,3],xmm2[0,2]
; SSSE3-SLOW-NEXT: retq
;
; SSSE3-FAST-LABEL: pair_sum_v8i32_v4i32:
; SSSE3-FAST: # %bb.0:
; SSSE3-FAST-NEXT: phaddd %xmm1, %xmm0
; SSSE3-FAST-NEXT: phaddd %xmm0, %xmm0
; SSSE3-FAST-NEXT: movdqa %xmm5, %xmm1
; SSSE3-FAST-NEXT: phaddd %xmm4, %xmm5
; SSSE3-FAST-NEXT: phaddd %xmm4, %xmm4
; SSSE3-FAST-NEXT: phaddd %xmm1, %xmm1
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm4[1,1]
; SSSE3-FAST-NEXT: phaddd %xmm3, %xmm2
; SSSE3-FAST-NEXT: movdqa %xmm2, %xmm3
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm5[2,0]
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[2,0]
; SSSE3-FAST-NEXT: paddd %xmm3, %xmm2
; SSSE3-FAST-NEXT: phaddd %xmm5, %xmm4
; SSSE3-FAST-NEXT: phaddd %xmm4, %xmm2
; SSSE3-FAST-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSSE3-FAST-NEXT: phaddd %xmm6, %xmm6
; SSSE3-FAST-NEXT: phaddd %xmm7, %xmm7
Expand All @@ -407,18 +379,17 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
; AVX1-SLOW-LABEL: pair_sum_v8i32_v4i32:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,3,1,1]
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,3,1,3]
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vphaddd %xmm4, %xmm4, %xmm1
; AVX1-SLOW-NEXT: vphaddd %xmm5, %xmm5, %xmm4
; AVX1-SLOW-NEXT: vphaddd %xmm3, %xmm2, %xmm2
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,2,1,3]
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,2,2,3]
; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[0,0,0,0]
; AVX1-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5],xmm5[6,7]
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,3,1,1]
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[1],zero
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
; AVX1-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3]
; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm3, %xmm1
; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
Expand All @@ -433,34 +404,30 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
; AVX1-FAST-LABEL: pair_sum_v8i32_v4i32:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm8
; AVX1-FAST-NEXT: vphaddd %xmm2, %xmm2, %xmm1
; AVX1-FAST-NEXT: vphaddd %xmm3, %xmm3, %xmm0
; AVX1-FAST-NEXT: vphaddd %xmm4, %xmm4, %xmm4
; AVX1-FAST-NEXT: vphaddd %xmm5, %xmm5, %xmm5
; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: vphaddd %xmm4, %xmm4, %xmm1
; AVX1-FAST-NEXT: vphaddd %xmm5, %xmm5, %xmm4
; AVX1-FAST-NEXT: vphaddd %xmm3, %xmm2, %xmm2
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
; AVX1-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm5[0,0,0,0]
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5],xmm3[6,7]
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm4[1],zero
; AVX1-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm5[3]
; AVX1-FAST-NEXT: vpaddd %xmm0, %xmm2, %xmm0
; AVX1-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm8[0],xmm0[0]
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,2,2,3]
; AVX1-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[0,0,0,0]
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5],xmm5[6,7]
; AVX1-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
; AVX1-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3]
; AVX1-FAST-NEXT: vpaddd %xmm1, %xmm3, %xmm1
; AVX1-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX1-FAST-NEXT: vphaddd %xmm7, %xmm6, %xmm2
; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm2, %xmm2
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-FAST-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[2]
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-FAST-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[2]
; AVX1-FAST-NEXT: retq
;
; AVX2-SLOW-LABEL: pair_sum_v8i32_v4i32:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,3,1,1]
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,3,1,3]
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX2-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX2-SLOW-NEXT: vphaddd %xmm4, %xmm4, %xmm1
Expand Down Expand Up @@ -1138,22 +1105,39 @@ define <4 x i32> @reduction_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32
; AVX-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; AVX-SLOW-NEXT: retq
;
; AVX-FAST-LABEL: reduction_sum_v4i32_v4i32:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
; AVX-FAST-NEXT: vpaddd %xmm4, %xmm0, %xmm0
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
; AVX-FAST-NEXT: vpaddd %xmm4, %xmm1, %xmm1
; AVX-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
; AVX-FAST-NEXT: vpaddd %xmm1, %xmm2, %xmm1
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
; AVX-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
; AVX-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-FAST-NEXT: retq
; AVX1-FAST-LABEL: reduction_sum_v4i32_v4i32:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
; AVX1-FAST-NEXT: vpaddd %xmm4, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
; AVX1-FAST-NEXT: vpaddd %xmm4, %xmm1, %xmm1
; AVX1-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
; AVX1-FAST-NEXT: vpaddd %xmm1, %xmm2, %xmm1
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
; AVX1-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
; AVX1-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,0,2]
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-FAST-NEXT: retq
;
; AVX2-FAST-LABEL: reduction_sum_v4i32_v4i32:
; AVX2-FAST: # %bb.0:
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
; AVX2-FAST-NEXT: vpaddd %xmm4, %xmm0, %xmm0
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
; AVX2-FAST-NEXT: vpaddd %xmm4, %xmm1, %xmm1
; AVX2-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
; AVX2-FAST-NEXT: vpaddd %xmm1, %xmm2, %xmm1
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
; AVX2-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
; AVX2-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,0,2]
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX2-FAST-NEXT: retq
%5 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %0)
%6 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %1)
%7 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %2)
Expand Down