34 changes: 17 additions & 17 deletions llvm/test/CodeGen/X86/vec_umulo.ll
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ define <2 x i32> @umulo_v2i32(<2 x i32> %a0, <2 x i32> %a1, ptr %p2) nounwind {
; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vpternlogq {{.*#+}} xmm0 = ~xmm0
; AVX512-NEXT: vmovq %xmm1, (%rdi)
; AVX512-NEXT: retq
%t = call {<2 x i32>, <2 x i1>} @llvm.umul.with.overflow.v2i32(<2 x i32> %a0, <2 x i32> %a1)
Expand Down Expand Up @@ -1028,7 +1028,7 @@ define <16 x i32> @umulo_v16i32(<16 x i32> %a0, <16 x i32> %a1, ptr %p2) nounwin
; AVX512-NEXT: vpermi2d %zmm3, %zmm2, %zmm4
; AVX512-NEXT: vptestmd %zmm4, %zmm4, %k1
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm1
; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
; AVX512-NEXT: vmovdqa64 %zmm1, (%rdi)
; AVX512-NEXT: retq
%t = call {<16 x i32>, <16 x i1>} @llvm.umul.with.overflow.v16i32(<16 x i32> %a0, <16 x i32> %a1)
Expand Down Expand Up @@ -1218,7 +1218,7 @@ define <16 x i32> @umulo_v16i8(<16 x i8> %a0, <16 x i8> %a1, ptr %p2) nounwind {
; AVX512F-NEXT: vpsrlw $8, %ymm1, %ymm0
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
; AVX512F-NEXT: vpmovdb %zmm1, (%rdi)
; AVX512F-NEXT: retq
Expand All @@ -1230,7 +1230,7 @@ define <16 x i32> @umulo_v16i8(<16 x i8> %a0, <16 x i8> %a1, ptr %p2) nounwind {
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm1
; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm0
; AVX512BW-NEXT: vptestmw %ymm0, %ymm0, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
; AVX512BW-NEXT: vpmovwb %ymm1, (%rdi)
; AVX512BW-NEXT: retq
%t = call {<16 x i8>, <16 x i1>} @llvm.umul.with.overflow.v16i8(<16 x i8> %a0, <16 x i8> %a1)
Expand Down Expand Up @@ -1589,8 +1589,8 @@ define <32 x i32> @umulo_v32i8(<32 x i8> %a0, <32 x i8> %a1, ptr %p2) nounwind {
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm0
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k2
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; AVX512F-NEXT: vpternlogd {{.*#+}} zmm0 {%k2} {z} = -1
; AVX512F-NEXT: vpternlogd {{.*#+}} zmm1 {%k1} {z} = -1
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512F-NEXT: vpmovdb %zmm2, 16(%rdi)
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
Expand All @@ -1604,9 +1604,9 @@ define <32 x i32> @umulo_v32i8(<32 x i8> %a0, <32 x i8> %a1, ptr %p2) nounwind {
; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm0
; AVX512BW-NEXT: vptestmw %zmm0, %zmm0, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
; AVX512BW-NEXT: kshiftrd $16, %k1, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 {%k1} {z} = -1
; AVX512BW-NEXT: vpmovwb %zmm2, (%rdi)
; AVX512BW-NEXT: retq
%t = call {<32 x i8>, <32 x i1>} @llvm.umul.with.overflow.v32i8(<32 x i8> %a0, <32 x i8> %a1)
Expand Down Expand Up @@ -2297,10 +2297,10 @@ define <64 x i32> @umulo_v64i8(<64 x i8> %a0, <64 x i8> %a1, ptr %p2) nounwind {
; AVX512F-NEXT: vpsrlw $8, %ymm7, %ymm0
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k4
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k4} {z}
; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k3} {z}
; AVX512F-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k2} {z}
; AVX512F-NEXT: vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k1} {z}
; AVX512F-NEXT: vpternlogd {{.*#+}} zmm0 {%k4} {z} = -1
; AVX512F-NEXT: vpternlogd {{.*#+}} zmm1 {%k3} {z} = -1
; AVX512F-NEXT: vpternlogd {{.*#+}} zmm2 {%k2} {z} = -1
; AVX512F-NEXT: vpternlogd {{.*#+}} zmm3 {%k1} {z} = -1
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
; AVX512F-NEXT: vpmovdb %zmm4, 48(%rdi)
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm4 = ymm5[0],zero,ymm5[1],zero,ymm5[2],zero,ymm5[3],zero,ymm5[4],zero,ymm5[5],zero,ymm5[6],zero,ymm5[7],zero,ymm5[8],zero,ymm5[9],zero,ymm5[10],zero,ymm5[11],zero,ymm5[12],zero,ymm5[13],zero,ymm5[14],zero,ymm5[15],zero
Expand Down Expand Up @@ -2328,13 +2328,13 @@ define <64 x i32> @umulo_v64i8(<64 x i8> %a0, <64 x i8> %a1, ptr %p2) nounwind {
; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vptestmb %zmm0, %zmm0, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm0 {%k1} {z} = -1
; AVX512BW-NEXT: kshiftrd $16, %k1, %k2
; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 {%k2} {z} = -1
; AVX512BW-NEXT: kshiftrq $32, %k1, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm2 {%k1} {z} = -1
; AVX512BW-NEXT: kshiftrd $16, %k1, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm3, %zmm3, %zmm3 {%k1} {z}
; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm3 {%k1} {z} = -1
; AVX512BW-NEXT: vmovdqa64 %zmm4, (%rdi)
; AVX512BW-NEXT: retq
%t = call {<64 x i8>, <64 x i1>} @llvm.umul.with.overflow.v64i8(<64 x i8> %a0, <64 x i8> %a1)
Expand Down Expand Up @@ -2428,7 +2428,7 @@ define <8 x i32> @umulo_v8i16(<8 x i16> %a0, <8 x i16> %a1, ptr %p2) nounwind {
; AVX512F-NEXT: vpmulhuw %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX512F-NEXT: vpternlogq $15, %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: vpternlogq {{.*#+}} xmm0 = ~xmm0
; AVX512F-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512F-NEXT: vptestmd %ymm0, %ymm0, %k1
; AVX512F-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
Expand Down
168 changes: 84 additions & 84 deletions llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll

Large diffs are not rendered by default.

24 changes: 12 additions & 12 deletions llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-3.ll
Original file line number Diff line number Diff line change
Expand Up @@ -858,7 +858,7 @@ define void @load_i8_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
; AVX512-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10]
; AVX512-NEXT: vpalignr {{.*#+}} xmm2 = xmm3[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; AVX512-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10]
; AVX512-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
; AVX512-NEXT: vmovdqa %xmm0, (%rsi)
Expand All @@ -879,7 +879,7 @@ define void @load_i8_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
; AVX512-FCP-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10]
; AVX512-FCP-NEXT: vpalignr {{.*#+}} xmm2 = xmm3[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10]
; AVX512-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
; AVX512-FCP-NEXT: vmovdqa %xmm0, (%rsi)
Expand All @@ -900,7 +900,7 @@ define void @load_i8_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
; AVX512DQ-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10]
; AVX512DQ-NEXT: vpalignr {{.*#+}} xmm2 = xmm3[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; AVX512DQ-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512DQ-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10]
; AVX512DQ-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
; AVX512DQ-NEXT: vmovdqa %xmm0, (%rsi)
Expand All @@ -921,7 +921,7 @@ define void @load_i8_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10]
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} xmm2 = xmm3[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} xmm1 = xmm1 ^ (mem & (xmm1 ^ xmm0))
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10]
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
; AVX512DQ-FCP-NEXT: vmovdqa %xmm0, (%rsi)
Expand Down Expand Up @@ -1412,7 +1412,7 @@ define void @load_i8_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; AVX512-NEXT: # ymm4 = mem[0,1,0,1]
; AVX512-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
; AVX512-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26]
; AVX512-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
; AVX512-NEXT: vmovdqa %ymm0, (%rsi)
Expand Down Expand Up @@ -1440,7 +1440,7 @@ define void @load_i8_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; AVX512-FCP-NEXT: # ymm4 = mem[0,1,0,1]
; AVX512-FCP-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512-FCP-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26]
; AVX512-FCP-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
; AVX512-FCP-NEXT: vmovdqa %ymm0, (%rsi)
Expand Down Expand Up @@ -1468,7 +1468,7 @@ define void @load_i8_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; AVX512DQ-NEXT: # ymm4 = mem[0,1,0,1]
; AVX512DQ-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
; AVX512DQ-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512DQ-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26]
; AVX512DQ-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
; AVX512DQ-NEXT: vmovdqa %ymm0, (%rsi)
Expand Down Expand Up @@ -1496,7 +1496,7 @@ define void @load_i8_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; AVX512DQ-FCP-NEXT: # ymm4 = mem[0,1,0,1]
; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm1, %ymm0, %ymm4
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} ymm4 = ymm1 ^ (ymm4 & (ymm0 ^ ymm1))
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26]
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, (%rsi)
Expand Down Expand Up @@ -2436,7 +2436,7 @@ define void @load_i8_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-NEXT: vpalignr {{.*#+}} ymm4 = ymm6[11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7,8,9,10],ymm6[27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23,24,25,26]
; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm5 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; AVX512-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT: vpternlogq $202, %zmm2, %zmm8, %zmm5
; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm2 ^ (zmm5 & (zmm8 ^ zmm2))
; AVX512-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23,24,25,26]
; AVX512-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
; AVX512-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23,24,25,26]
Expand Down Expand Up @@ -2483,7 +2483,7 @@ define void @load_i8_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512-FCP-NEXT: vpalignr {{.*#+}} ymm4 = ymm6[11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7,8,9,10],ymm6[27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23,24,25,26]
; AVX512-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm5 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; AVX512-FCP-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-FCP-NEXT: vpternlogq $202, %zmm2, %zmm8, %zmm5
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm2 ^ (zmm5 & (zmm8 ^ zmm2))
; AVX512-FCP-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23,24,25,26]
; AVX512-FCP-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
; AVX512-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23,24,25,26]
Expand Down Expand Up @@ -2530,7 +2530,7 @@ define void @load_i8_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-NEXT: vpalignr {{.*#+}} ymm4 = ymm6[11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7,8,9,10],ymm6[27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23,24,25,26]
; AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} zmm5 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; AVX512DQ-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-NEXT: vpternlogq $202, %zmm2, %zmm8, %zmm5
; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm2 ^ (zmm5 & (zmm8 ^ zmm2))
; AVX512DQ-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23,24,25,26]
; AVX512DQ-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
; AVX512DQ-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23,24,25,26]
Expand Down Expand Up @@ -2577,7 +2577,7 @@ define void @load_i8_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} ymm4 = ymm6[11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7,8,9,10],ymm6[27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23,24,25,26]
; AVX512DQ-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm5 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; AVX512DQ-FCP-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-FCP-NEXT: vpternlogq $202, %zmm2, %zmm8, %zmm5
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm2 ^ (zmm5 & (zmm8 ^ zmm2))
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23,24,25,26]
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
; AVX512DQ-FCP-NEXT: vpalignr {{.*#+}} ymm2 = ymm3[11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23,24,25,26]
Expand Down
568 changes: 284 additions & 284 deletions llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll

Large diffs are not rendered by default.

1,024 changes: 512 additions & 512 deletions llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll

Large diffs are not rendered by default.

96 changes: 48 additions & 48 deletions llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll

Large diffs are not rendered by default.

184 changes: 92 additions & 92 deletions llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll

Large diffs are not rendered by default.

216 changes: 108 additions & 108 deletions llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll

Large diffs are not rendered by default.

320 changes: 160 additions & 160 deletions llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll

Large diffs are not rendered by default.

16 changes: 8 additions & 8 deletions llvm/test/CodeGen/X86/vector-lzcnt-512.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; AVX512BW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
; AVX512BW-NEXT: vpternlogq {{.*#+}} zmm0 = ~zmm0
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0
Expand All @@ -59,7 +59,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512DQ-NEXT: vpandn %ymm1, %ymm0, %ymm2
; AVX512DQ-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = ~zmm0
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512DQ-NEXT: vpand %ymm1, %ymm3, %ymm4
; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
Expand Down Expand Up @@ -113,7 +113,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; AVX512BW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
; AVX512BW-NEXT: vpternlogq {{.*#+}} zmm0 = ~zmm0
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0
Expand All @@ -138,7 +138,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpbroadcastb {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512DQ-NEXT: vpandn %ymm1, %ymm0, %ymm2
; AVX512DQ-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = ~zmm0
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm0, %ymm3
; AVX512DQ-NEXT: vpand %ymm1, %ymm3, %ymm4
; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
Expand Down Expand Up @@ -190,7 +190,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; AVX512BW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
; AVX512BW-NEXT: vpternlogq {{.*#+}} zmm0 = ~zmm0
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0
Expand All @@ -217,7 +217,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512DQ-NEXT: vpord %zmm1, %zmm0, %zmm1
; AVX512DQ-NEXT: vpbroadcastb {{.*#+}} ymm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512DQ-NEXT: vpandn %ymm0, %ymm1, %ymm2
; AVX512DQ-NEXT: vpternlogq $15, %zmm1, %zmm1, %zmm1
; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm1 = ~zmm1
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm1, %ymm3
; AVX512DQ-NEXT: vpand %ymm0, %ymm3, %ymm4
; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
Expand Down Expand Up @@ -277,7 +277,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
; AVX512BW-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-NEXT: vpshufb %zmm2, %zmm3, %zmm2
; AVX512BW-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
; AVX512BW-NEXT: vpternlogq {{.*#+}} zmm0 = ~zmm0
; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpshufb %zmm0, %zmm3, %zmm0
Expand All @@ -304,7 +304,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512DQ-NEXT: vpord %zmm1, %zmm0, %zmm1
; AVX512DQ-NEXT: vpbroadcastb {{.*#+}} ymm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512DQ-NEXT: vpandn %ymm0, %ymm1, %ymm2
; AVX512DQ-NEXT: vpternlogq $15, %zmm1, %zmm1, %zmm1
; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm1 = ~zmm1
; AVX512DQ-NEXT: vextracti64x4 $1, %zmm1, %ymm3
; AVX512DQ-NEXT: vpand %ymm0, %ymm3, %ymm4
; AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
Expand Down
64 changes: 32 additions & 32 deletions llvm/test/CodeGen/X86/vector-tzcnt-512.ll
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
; AVX512CD-LABEL: testv8i64:
; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512CD-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CD-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; AVX512CD-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
Expand All @@ -18,7 +18,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
;
; AVX512CDBW-LABEL: testv8i64:
; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CDBW-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; AVX512CDBW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vplzcntq %zmm0, %zmm0
Expand All @@ -28,7 +28,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
;
; AVX512BW-LABEL: testv8i64:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512BW-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
Expand All @@ -46,15 +46,15 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
;
; AVX512VPOPCNTDQ-LABEL: testv8i64:
; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512VPOPCNTDQ-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG-LABEL: testv8i64:
; BITALG: # %bb.0:
; BITALG-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; BITALG-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; BITALG-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; BITALG-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; BITALG-NEXT: vpopcntb %zmm0, %zmm0
Expand All @@ -68,7 +68,7 @@ define <8 x i64> @testv8i64(<8 x i64> %in) nounwind {
define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
; AVX512CD-LABEL: testv8i64u:
; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512CD-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CD-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; AVX512CD-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
Expand All @@ -78,7 +78,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
;
; AVX512CDBW-LABEL: testv8i64u:
; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CDBW-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; AVX512CDBW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vplzcntq %zmm0, %zmm0
Expand All @@ -88,7 +88,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
;
; AVX512BW-LABEL: testv8i64u:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512BW-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
Expand All @@ -106,15 +106,15 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
;
; AVX512VPOPCNTDQ-LABEL: testv8i64u:
; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512VPOPCNTDQ-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG-LABEL: testv8i64u:
; BITALG: # %bb.0:
; BITALG-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; BITALG-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; BITALG-NEXT: vpaddq %zmm1, %zmm0, %zmm1
; BITALG-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; BITALG-NEXT: vpopcntb %zmm0, %zmm0
Expand All @@ -128,7 +128,7 @@ define <8 x i64> @testv8i64u(<8 x i64> %in) nounwind {
define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
; AVX512CD-LABEL: testv16i32:
; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512CD-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CD-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; AVX512CD-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
Expand All @@ -138,7 +138,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
;
; AVX512CDBW-LABEL: testv16i32:
; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CDBW-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; AVX512CDBW-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0
Expand All @@ -148,7 +148,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
;
; AVX512BW-LABEL: testv16i32:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
Expand All @@ -170,15 +170,15 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
;
; AVX512VPOPCNTDQ-LABEL: testv16i32:
; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512VPOPCNTDQ-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG-LABEL: testv16i32:
; BITALG: # %bb.0:
; BITALG-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; BITALG-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; BITALG-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; BITALG-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; BITALG-NEXT: vpopcntb %zmm0, %zmm0
Expand All @@ -196,7 +196,7 @@ define <16 x i32> @testv16i32(<16 x i32> %in) nounwind {
define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
; AVX512CD-LABEL: testv16i32u:
; AVX512CD: # %bb.0:
; AVX512CD-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512CD-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CD-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; AVX512CD-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
Expand All @@ -206,7 +206,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
;
; AVX512CDBW-LABEL: testv16i32u:
; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CDBW-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; AVX512CDBW-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vplzcntd %zmm0, %zmm0
Expand All @@ -216,7 +216,7 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
;
; AVX512BW-LABEL: testv16i32u:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
Expand All @@ -238,15 +238,15 @@ define <16 x i32> @testv16i32u(<16 x i32> %in) nounwind {
;
; AVX512VPOPCNTDQ-LABEL: testv16i32u:
; AVX512VPOPCNTDQ: # %bb.0:
; AVX512VPOPCNTDQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512VPOPCNTDQ-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; AVX512VPOPCNTDQ-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG-LABEL: testv16i32u:
; BITALG: # %bb.0:
; BITALG-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; BITALG-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; BITALG-NEXT: vpaddd %zmm1, %zmm0, %zmm1
; BITALG-NEXT: vpandnd %zmm1, %zmm0, %zmm0
; BITALG-NEXT: vpopcntb %zmm0, %zmm0
Expand Down Expand Up @@ -296,7 +296,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
;
; AVX512CDBW-LABEL: testv32i16:
; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CDBW-NEXT: vpaddw %zmm1, %zmm0, %zmm1
; AVX512CDBW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
Expand All @@ -315,7 +315,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
;
; AVX512BW-LABEL: testv32i16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
Expand Down Expand Up @@ -351,7 +351,7 @@ define <32 x i16> @testv32i16(<32 x i16> %in) nounwind {
;
; BITALG-LABEL: testv32i16:
; BITALG: # %bb.0:
; BITALG-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; BITALG-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; BITALG-NEXT: vpaddw %zmm1, %zmm0, %zmm1
; BITALG-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; BITALG-NEXT: vpopcntw %zmm0, %zmm0
Expand Down Expand Up @@ -395,7 +395,7 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
;
; AVX512CDBW-LABEL: testv32i16u:
; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CDBW-NEXT: vpaddw %zmm1, %zmm0, %zmm1
; AVX512CDBW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
Expand All @@ -414,7 +414,7 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
;
; AVX512BW-LABEL: testv32i16u:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
Expand Down Expand Up @@ -450,7 +450,7 @@ define <32 x i16> @testv32i16u(<32 x i16> %in) nounwind {
;
; BITALG-LABEL: testv32i16u:
; BITALG: # %bb.0:
; BITALG-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; BITALG-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; BITALG-NEXT: vpaddw %zmm1, %zmm0, %zmm1
; BITALG-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; BITALG-NEXT: vpopcntw %zmm0, %zmm0
Expand Down Expand Up @@ -488,7 +488,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
;
; AVX512CDBW-LABEL: testv64i8:
; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CDBW-NEXT: vpaddb %zmm1, %zmm0, %zmm1
; AVX512CDBW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
Expand All @@ -504,7 +504,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
;
; AVX512BW-LABEL: testv64i8:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
Expand Down Expand Up @@ -546,7 +546,7 @@ define <64 x i8> @testv64i8(<64 x i8> %in) nounwind {
;
; BITALG-LABEL: testv64i8:
; BITALG: # %bb.0:
; BITALG-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; BITALG-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; BITALG-NEXT: vpaddb %zmm1, %zmm0, %zmm1
; BITALG-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; BITALG-NEXT: vpopcntb %zmm0, %zmm0
Expand Down Expand Up @@ -584,7 +584,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
;
; AVX512CDBW-LABEL: testv64i8u:
; AVX512CDBW: # %bb.0:
; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512CDBW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512CDBW-NEXT: vpaddb %zmm1, %zmm0, %zmm1
; AVX512CDBW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512CDBW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
Expand All @@ -600,7 +600,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
;
; AVX512BW-LABEL: testv64i8u:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vpbroadcastb {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
Expand Down Expand Up @@ -642,7 +642,7 @@ define <64 x i8> @testv64i8u(<64 x i8> %in) nounwind {
;
; BITALG-LABEL: testv64i8u:
; BITALG: # %bb.0:
; BITALG-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; BITALG-NEXT: vpternlogd {{.*#+}} zmm1 = -1
; BITALG-NEXT: vpaddb %zmm1, %zmm0, %zmm1
; BITALG-NEXT: vpandnq %zmm1, %zmm0, %zmm0
; BITALG-NEXT: vpopcntb %zmm0, %zmm0
Expand Down