@@ -270,16 +270,13 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
270
270
; AVX1-SLOW-NEXT: vaddps %xmm0, %xmm1, %xmm0
271
271
; AVX1-SLOW-NEXT: vunpcklps {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
272
272
; AVX1-SLOW-NEXT: vhaddps %xmm3, %xmm2, %xmm1
273
- ; AVX1-SLOW-NEXT: vhaddps %xmm2, %xmm2, %xmm2
274
- ; AVX1-SLOW-NEXT: vhaddps %xmm3, %xmm3, %xmm3
275
- ; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[1],xmm3[1],zero,zero
276
- ; AVX1-SLOW-NEXT: vhaddps %xmm4, %xmm4, %xmm3
277
- ; AVX1-SLOW-NEXT: vhaddps %xmm5, %xmm5, %xmm4
278
- ; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,1]
279
- ; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
280
- ; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,1],xmm3[1,3]
281
- ; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[1]
282
- ; AVX1-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1
273
+ ; AVX1-SLOW-NEXT: vhaddps %xmm4, %xmm4, %xmm2
274
+ ; AVX1-SLOW-NEXT: vhaddps %xmm5, %xmm5, %xmm3
275
+ ; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm1[0,2],xmm2[0,1]
276
+ ; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm3[0]
277
+ ; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,1]
278
+ ; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[1]
279
+ ; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm4, %xmm1
283
280
; AVX1-SLOW-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
284
281
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
285
282
; AVX1-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
@@ -423,15 +420,14 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
423
420
; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm2
424
421
; SSSE3-SLOW-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
425
422
; SSSE3-SLOW-NEXT: phaddd %xmm3, %xmm8
426
- ; SSSE3-SLOW-NEXT: movdqa %xmm5, %xmm1
427
423
; SSSE3-SLOW-NEXT: phaddd %xmm4, %xmm5
428
- ; SSSE3-SLOW-NEXT: phaddd %xmm4, %xmm4
429
- ; SSSE3-SLOW-NEXT: phaddd %xmm1, %xmm1
430
- ; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1 [1,1],xmm4 [1,1]
431
- ; SSSE3-SLOW-NEXT: movdqa %xmm8, %xmm2
432
- ; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2 [0,2],xmm5[2,0]
433
- ; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,3],xmm1 [2,0]
434
- ; SSSE3-SLOW-NEXT: paddd %xmm2 , %xmm8
424
+ ; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
425
+ ; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm2 = xmm5[0,1,0,1]
426
+ ; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2 [1,1],xmm1 [1,1]
427
+ ; SSSE3-SLOW-NEXT: movdqa %xmm8, %xmm1
428
+ ; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1 [0,2],xmm5[2,0]
429
+ ; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,3],xmm2 [2,0]
430
+ ; SSSE3-SLOW-NEXT: paddd %xmm1 , %xmm8
435
431
; SSSE3-SLOW-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm8[0]
436
432
; SSSE3-SLOW-NEXT: phaddd %xmm6, %xmm6
437
433
; SSSE3-SLOW-NEXT: phaddd %xmm7, %xmm7
@@ -470,30 +466,27 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
470
466
; AVX1-SLOW-NEXT: vphaddd %xmm1, %xmm1, %xmm1
471
467
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
472
468
; AVX1-SLOW-NEXT: vpaddd %xmm0, %xmm1, %xmm0
473
- ; AVX1-SLOW-NEXT: vpunpckldq {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
469
+ ; AVX1-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
474
470
; AVX1-SLOW-NEXT: vphaddd %xmm3, %xmm2, %xmm1
475
- ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
476
- ; AVX1-SLOW-NEXT: vphaddd %xmm2, %xmm2, %xmm2
477
- ; AVX1-SLOW-NEXT: vphaddd %xmm3, %xmm3, %xmm3
478
- ; AVX1-SLOW-NEXT: vphaddd %xmm4, %xmm4, %xmm4
479
- ; AVX1-SLOW-NEXT: vphaddd %xmm5, %xmm5, %xmm5
480
- ; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
481
- ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm5[0,0,0,0]
482
- ; AVX1-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
483
- ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
484
- ; AVX1-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5,6,7]
485
- ; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm4[1],zero
486
- ; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm5[1]
487
- ; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
488
- ; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm8[0],xmm0[0]
489
- ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
490
- ; AVX1-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
471
+ ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,2,1,3]
472
+ ; AVX1-SLOW-NEXT: vphaddd %xmm4, %xmm4, %xmm3
473
+ ; AVX1-SLOW-NEXT: vphaddd %xmm5, %xmm5, %xmm4
474
+ ; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
475
+ ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[0,0,0,0]
476
+ ; AVX1-SLOW-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5],xmm5[6,7]
477
+ ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,3,1,1]
478
+ ; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[1],zero
479
+ ; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[1]
480
+ ; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm2, %xmm1
481
+ ; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
482
+ ; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
483
+ ; AVX1-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
491
484
; AVX1-SLOW-NEXT: vphaddd %xmm6, %xmm6, %xmm2
492
485
; AVX1-SLOW-NEXT: vphaddd %xmm7, %xmm7, %xmm3
493
486
; AVX1-SLOW-NEXT: vphaddd %xmm3, %xmm2, %xmm2
494
487
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
495
- ; AVX1-SLOW-NEXT: vinsertf128 $1, %xmm2, %ymm1 , %ymm1
496
- ; AVX1-SLOW-NEXT: vshufpd {{.*#+}} ymm0 = ymm0 [0],ymm1 [1],ymm0 [2],ymm1 [2]
488
+ ; AVX1-SLOW-NEXT: vinsertf128 $1, %xmm2, %ymm0 , %ymm0
489
+ ; AVX1-SLOW-NEXT: vshufpd {{.*#+}} ymm0 = ymm1 [0],ymm0 [1],ymm1 [2],ymm0 [2]
497
490
; AVX1-SLOW-NEXT: retq
498
491
;
499
492
; AVX1-FAST-LABEL: pair_sum_v8i32_v4i32:
0 commit comments