Expand Up
@@ -113,7 +113,7 @@ define float @test_v3f32(<3 x float> %a0) {
; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm3
; AVX-NEXT: vblendvps %xmm3, %xmm1, %xmm2, %xmm1
; AVX-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm1
; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
Expand All
@@ -125,7 +125,7 @@ define float @test_v3f32(<3 x float> %a0) {
; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
; AVX512-NEXT: vcmpunordss %xmm2, %xmm2, %k1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; AVX512-NEXT: retq
Expand Down
Expand Up
@@ -198,7 +198,7 @@ define float @test_v4f32(<4 x float> %a0) {
; AVX-LABEL: test_v4f32:
; AVX: # %bb.0:
; AVX-NEXT: vshufps {{.*#+}} xmm1 = xmm0[3,3,3,3]
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX-NEXT: vmaxss %xmm0, %xmm3, %xmm4
; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
Expand All
@@ -214,7 +214,7 @@ define float @test_v4f32(<4 x float> %a0) {
; AVX512-LABEL: test_v4f32:
; AVX512: # %bb.0:
; AVX512-NEXT: vshufps {{.*#+}} xmm1 = xmm0[3,3,3,3]
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX512-NEXT: vmaxss %xmm0, %xmm3, %xmm4
; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1
Expand Down
Expand Up
@@ -303,10 +303,10 @@ define float @test_v8f32(<8 x float> %a0) {
; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm1[3,3,3,3]
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
; AVX-NEXT: vshufpd {{.*#+}} xmm3 = xmm1[1,0]
; AVX-NEXT: vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
; AVX-NEXT: vshufps {{.*#+}} xmm5 = xmm0[3,3,3,3]
; AVX-NEXT: vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
; AVX-NEXT: vshufpd {{.*#+}} xmm6 = xmm0[1,0]
; AVX-NEXT: vmovshdup {{.*#+}} xmm7 = xmm0[1,1,3,3]
; AVX-NEXT: vmaxss %xmm0, %xmm7, %xmm8
; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm0
Expand Down
Expand Up
@@ -336,10 +336,10 @@ define float @test_v8f32(<8 x float> %a0) {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX512BW-NEXT: vshufps {{.*#+}} xmm1 = xmm3[3,3,3,3]
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm3[1,0]
; AVX512BW-NEXT: vshufpd {{.*#+}} xmm2 = xmm3[1,0]
; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm4 = xmm3[1,1,3,3]
; AVX512BW-NEXT: vshufps {{.*#+}} xmm5 = xmm0[3,3,3,3]
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
; AVX512BW-NEXT: vshufpd {{.*#+}} xmm6 = xmm0[1,0]
; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm7 = xmm0[1,1,3,3]
; AVX512BW-NEXT: vmaxss %xmm0, %xmm7, %xmm8
; AVX512BW-NEXT: vcmpunordss %xmm0, %xmm0, %k1
Expand Down
Expand Up
@@ -369,10 +369,10 @@ define float @test_v8f32(<8 x float> %a0) {
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512VL-NEXT: vshufps {{.*#+}} xmm2 = xmm1[3,3,3,3]
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
; AVX512VL-NEXT: vshufpd {{.*#+}} xmm3 = xmm1[1,0]
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
; AVX512VL-NEXT: vshufps {{.*#+}} xmm5 = xmm0[3,3,3,3]
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
; AVX512VL-NEXT: vshufpd {{.*#+}} xmm6 = xmm0[1,0]
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm7 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vmaxss %xmm0, %xmm7, %xmm8
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
Expand Down
Expand Up
@@ -502,7 +502,7 @@ define float @test_v16f32(<16 x float> %a0) {
; AVX-NEXT: vcmpunordss %xmm0, %xmm0, %xmm3
; AVX-NEXT: vblendvps %xmm3, %xmm1, %xmm2, %xmm1
; AVX-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX-NEXT: vshufpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX-NEXT: vmaxss %xmm1, %xmm3, %xmm1
; AVX-NEXT: vblendvps %xmm2, %xmm3, %xmm1, %xmm1
; AVX-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
Expand All
@@ -518,7 +518,7 @@ define float @test_v16f32(<16 x float> %a0) {
; AVX-NEXT: vmaxss %xmm1, %xmm3, %xmm1
; AVX-NEXT: vblendvps %xmm2, %xmm3, %xmm1, %xmm1
; AVX-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX-NEXT: vshufpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX-NEXT: vmaxss %xmm1, %xmm3, %xmm1
; AVX-NEXT: vblendvps %xmm2, %xmm3, %xmm1, %xmm1
; AVX-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
Expand All
@@ -535,7 +535,7 @@ define float @test_v16f32(<16 x float> %a0) {
; AVX512BW-NEXT: vmaxss %xmm0, %xmm2, %xmm3
; AVX512BW-NEXT: vcmpunordss %xmm0, %xmm0, %k1
; AVX512BW-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1}
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512BW-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512BW-NEXT: vcmpunordss %xmm3, %xmm3, %k1
; AVX512BW-NEXT: vmaxss %xmm3, %xmm2, %xmm3
; AVX512BW-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1}
Expand All
@@ -550,7 +550,7 @@ define float @test_v16f32(<16 x float> %a0) {
; AVX512BW-NEXT: vcmpunordss %xmm3, %xmm3, %k1
; AVX512BW-NEXT: vmaxss %xmm3, %xmm2, %xmm3
; AVX512BW-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1}
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512BW-NEXT: vshufpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512BW-NEXT: vcmpunordss %xmm3, %xmm3, %k1
; AVX512BW-NEXT: vmaxss %xmm3, %xmm2, %xmm3
; AVX512BW-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1}
Expand All
@@ -566,7 +566,7 @@ define float @test_v16f32(<16 x float> %a0) {
; AVX512BW-NEXT: vcmpunordss %xmm3, %xmm3, %k1
; AVX512BW-NEXT: vmaxss %xmm3, %xmm1, %xmm3
; AVX512BW-NEXT: vmovss %xmm1, %xmm3, %xmm3 {%k1}
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
; AVX512BW-NEXT: vshufpd {{.*#+}} xmm1 = xmm2[1,0]
; AVX512BW-NEXT: vcmpunordss %xmm3, %xmm3, %k1
; AVX512BW-NEXT: vmaxss %xmm3, %xmm1, %xmm3
; AVX512BW-NEXT: vmovss %xmm1, %xmm3, %xmm3 {%k1}
Expand All
@@ -582,7 +582,7 @@ define float @test_v16f32(<16 x float> %a0) {
; AVX512BW-NEXT: vcmpunordss %xmm2, %xmm2, %k1
; AVX512BW-NEXT: vmaxss %xmm2, %xmm1, %xmm2
; AVX512BW-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512BW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512BW-NEXT: vcmpunordss %xmm2, %xmm2, %k1
; AVX512BW-NEXT: vmaxss %xmm2, %xmm1, %xmm2
; AVX512BW-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
Expand All
@@ -597,18 +597,18 @@ define float @test_v16f32(<16 x float> %a0) {
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextractf32x4 $3, %zmm0, %xmm3
; AVX512VL-NEXT: vshufps {{.*#+}} xmm1 = xmm3[3,3,3,3]
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm3[1,0]
; AVX512VL-NEXT: vshufpd {{.*#+}} xmm2 = xmm3[1,0]
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm4 = xmm3[1,1,3,3]
; AVX512VL-NEXT: vextractf32x4 $2, %zmm0, %xmm6
; AVX512VL-NEXT: vshufps {{.*#+}} xmm5 = xmm6[3,3,3,3]
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm7 = xmm6[1,0]
; AVX512VL-NEXT: vshufpd {{.*#+}} xmm7 = xmm6[1,0]
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm8 = xmm6[1,1,3,3]
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm9
; AVX512VL-NEXT: vshufps {{.*#+}} xmm10 = xmm9[3,3,3,3]
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm11 = xmm9[1,0]
; AVX512VL-NEXT: vshufpd {{.*#+}} xmm11 = xmm9[1,0]
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm12 = xmm9[1,1,3,3]
; AVX512VL-NEXT: vshufps {{.*#+}} xmm13 = xmm0[3,3,3,3]
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm14 = xmm0[1,0]
; AVX512VL-NEXT: vshufpd {{.*#+}} xmm14 = xmm0[1,0]
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm15 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vmaxss %xmm0, %xmm15, %xmm16
; AVX512VL-NEXT: vcmpunordss %xmm0, %xmm0, %k1
Expand Down
Expand Up
@@ -682,15 +682,15 @@ define double @test_v2f64(<2 x double> %a0) {
;
; AVX-LABEL: test_v2f64:
; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vmaxsd %xmm0, %xmm1, %xmm2
; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm2, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v2f64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vmaxsd %xmm0, %xmm2, %xmm1
; AVX512-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
; AVX512-NEXT: vmovsd %xmm2, %xmm1, %xmm1 {%k1}
Expand Down
Expand Up
@@ -740,8 +740,8 @@ define double @test_v4f64(<4 x double> %a0) {
; AVX-LABEL: test_v4f64:
; AVX: # %bb.0:
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX-NEXT: vshufpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX-NEXT: vshufpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX-NEXT: vmaxsd %xmm0, %xmm3, %xmm4
; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vblendvpd %xmm0, %xmm3, %xmm4, %xmm0
Expand All
@@ -757,8 +757,8 @@ define double @test_v4f64(<4 x double> %a0) {
; AVX512-LABEL: test_v4f64:
; AVX512: # %bb.0:
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX512-NEXT: vshufpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512-NEXT: vshufpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX512-NEXT: vmaxsd %xmm0, %xmm3, %xmm4
; AVX512-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
; AVX512-NEXT: vmovsd %xmm3, %xmm4, %xmm4 {%k1}
Expand Down
Expand Up
@@ -838,7 +838,7 @@ define double @test_v8f64(<8 x double> %a0) {
; AVX-NEXT: vmaxpd %ymm0, %ymm1, %ymm2
; AVX-NEXT: vcmpunordpd %ymm0, %ymm0, %ymm0
; AVX-NEXT: vblendvpd %ymm0, %ymm1, %ymm2, %ymm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vmaxsd %xmm0, %xmm1, %xmm2
; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm3
; AVX-NEXT: vblendvpd %xmm3, %xmm1, %xmm2, %xmm1
Expand All
@@ -847,7 +847,7 @@ define double @test_v8f64(<8 x double> %a0) {
; AVX-NEXT: vmaxsd %xmm1, %xmm0, %xmm1
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
; AVX-NEXT: vcmpunordsd %xmm1, %xmm1, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vmaxsd %xmm1, %xmm0, %xmm1
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: vzeroupper
Expand All
@@ -856,12 +856,12 @@ define double @test_v8f64(<8 x double> %a0) {
; AVX512BW-LABEL: test_v8f64:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vextractf32x4 $3, %zmm0, %xmm2
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
; AVX512BW-NEXT: vshufpd {{.*#+}} xmm1 = xmm2[1,0]
; AVX512BW-NEXT: vextractf32x4 $2, %zmm0, %xmm3
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm3[1,0]
; AVX512BW-NEXT: vshufpd {{.*#+}} xmm4 = xmm3[1,0]
; AVX512BW-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm6 = xmm5[1,0]
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm7 = xmm0[1,0]
; AVX512BW-NEXT: vshufpd {{.*#+}} xmm6 = xmm5[1,0]
; AVX512BW-NEXT: vshufpd {{.*#+}} xmm7 = xmm0[1,0]
; AVX512BW-NEXT: vmaxsd %xmm0, %xmm7, %xmm8
; AVX512BW-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
; AVX512BW-NEXT: vmovsd %xmm7, %xmm8, %xmm8 {%k1}
Expand Down
Expand Up
@@ -889,12 +889,12 @@ define double @test_v8f64(<8 x double> %a0) {
; AVX512VL-LABEL: test_v8f64:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vextractf32x4 $3, %zmm0, %xmm1
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512VL-NEXT: vshufpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512VL-NEXT: vextractf32x4 $2, %zmm0, %xmm3
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm4 = xmm3[1,0]
; AVX512VL-NEXT: vshufpd {{.*#+}} xmm4 = xmm3[1,0]
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm6 = xmm5[1,0]
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm7 = xmm0[1,0]
; AVX512VL-NEXT: vshufpd {{.*#+}} xmm6 = xmm5[1,0]
; AVX512VL-NEXT: vshufpd {{.*#+}} xmm7 = xmm0[1,0]
; AVX512VL-NEXT: vmaxsd %xmm0, %xmm7, %xmm8
; AVX512VL-NEXT: vcmpunordsd %xmm0, %xmm0, %k1
; AVX512VL-NEXT: vmovsd %xmm7, %xmm8, %xmm8 {%k1}
Expand Down
Expand Up
@@ -1036,7 +1036,7 @@ define double @test_v16f64(<16 x double> %a0) {
; AVX-NEXT: vmaxpd %ymm0, %ymm1, %ymm2
; AVX-NEXT: vcmpunordpd %ymm0, %ymm0, %ymm0
; AVX-NEXT: vblendvpd %ymm0, %ymm1, %ymm2, %ymm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vmaxsd %xmm0, %xmm1, %xmm2
; AVX-NEXT: vcmpunordsd %xmm0, %xmm0, %xmm3
; AVX-NEXT: vblendvpd %xmm3, %xmm1, %xmm2, %xmm1
Expand All
@@ -1045,7 +1045,7 @@ define double @test_v16f64(<16 x double> %a0) {
; AVX-NEXT: vmaxsd %xmm1, %xmm0, %xmm1
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
; AVX-NEXT: vcmpunordsd %xmm1, %xmm1, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vmaxsd %xmm1, %xmm0, %xmm1
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: vzeroupper
Expand All
@@ -1056,7 +1056,7 @@ define double @test_v16f64(<16 x double> %a0) {
; AVX512-NEXT: vmaxpd %zmm0, %zmm1, %zmm2
; AVX512-NEXT: vcmpunordpd %zmm0, %zmm0, %k1
; AVX512-NEXT: vmovapd %zmm1, %zmm2 {%k1}
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm2[1,0]
; AVX512-NEXT: vshufpd {{.*#+}} xmm0 = xmm2[1,0]
; AVX512-NEXT: vmaxsd %xmm2, %xmm0, %xmm1
; AVX512-NEXT: vcmpunordsd %xmm2, %xmm2, %k1
; AVX512-NEXT: vmovsd %xmm0, %xmm1, %xmm1 {%k1}
Expand All
@@ -1065,23 +1065,23 @@ define double @test_v16f64(<16 x double> %a0) {
; AVX512-NEXT: vmaxsd %xmm1, %xmm0, %xmm1
; AVX512-NEXT: vmovsd %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vcmpunordsd %xmm1, %xmm1, %k1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vmaxsd %xmm1, %xmm0, %xmm1
; AVX512-NEXT: vmovsd %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vcmpunordsd %xmm1, %xmm1, %k1
; AVX512-NEXT: vextractf32x4 $2, %zmm2, %xmm0
; AVX512-NEXT: vmaxsd %xmm1, %xmm0, %xmm1
; AVX512-NEXT: vmovsd %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vcmpunordsd %xmm1, %xmm1, %k1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vmaxsd %xmm1, %xmm0, %xmm1
; AVX512-NEXT: vmovsd %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vcmpunordsd %xmm1, %xmm1, %k1
; AVX512-NEXT: vextractf32x4 $3, %zmm2, %xmm0
; AVX512-NEXT: vmaxsd %xmm1, %xmm0, %xmm1
; AVX512-NEXT: vmovsd %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vcmpunordsd %xmm1, %xmm1, %k1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vmaxsd %xmm1, %xmm2, %xmm0
; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm0 {%k1}
; AVX512-NEXT: vzeroupper
Expand Down