diff --git a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll index 423f2c49e70e5..9e1686e19ce1b 100644 --- a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll +++ b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll @@ -654,3 +654,113 @@ define <64 x i8> @ext_i64_64i8(i64 %a0) { %2 = sext <64 x i1> %1 to <64 x i8> ret <64 x i8> %2 } + +define <8 x i32> @PR157382(ptr %p0, ptr %p1, ptr %p2) { +; SSE2-SSSE3-LABEL: PR157382: +; SSE2-SSSE3: # %bb.0: +; SSE2-SSSE3-NEXT: movdqu (%rdi), %xmm3 +; SSE2-SSSE3-NEXT: movdqu 16(%rdi), %xmm2 +; SSE2-SSSE3-NEXT: movdqu (%rsi), %xmm0 +; SSE2-SSSE3-NEXT: movdqu 16(%rsi), %xmm4 +; SSE2-SSSE3-NEXT: movq {{.*#+}} xmm1 = mem[0],zero +; SSE2-SSSE3-NEXT: pxor %xmm5, %xmm5 +; SSE2-SSSE3-NEXT: pxor %xmm6, %xmm6 +; SSE2-SSSE3-NEXT: pcmpgtd %xmm3, %xmm6 +; SSE2-SSSE3-NEXT: pcmpeqd %xmm7, %xmm7 +; SSE2-SSSE3-NEXT: pxor %xmm7, %xmm6 +; SSE2-SSSE3-NEXT: pxor %xmm8, %xmm8 +; SSE2-SSSE3-NEXT: pcmpgtd %xmm2, %xmm8 +; SSE2-SSSE3-NEXT: pxor %xmm7, %xmm8 +; SSE2-SSSE3-NEXT: pcmpgtd %xmm5, %xmm0 +; SSE2-SSSE3-NEXT: por %xmm6, %xmm0 +; SSE2-SSSE3-NEXT: pcmpgtd %xmm5, %xmm4 +; SSE2-SSSE3-NEXT: por %xmm8, %xmm4 +; SSE2-SSSE3-NEXT: packssdw %xmm4, %xmm0 +; SSE2-SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-SSSE3-NEXT: pcmpeqb %xmm5, %xmm1 +; SSE2-SSSE3-NEXT: pxor %xmm7, %xmm1 +; SSE2-SSSE3-NEXT: por %xmm0, %xmm1 +; SSE2-SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-SSSE3-NEXT: psrad $16, %xmm0 +; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0 +; SSE2-SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] +; SSE2-SSSE3-NEXT: pslld $31, %xmm1 +; SSE2-SSSE3-NEXT: psrad $31, %xmm1 +; SSE2-SSSE3-NEXT: pand %xmm2, %xmm1 +; SSE2-SSSE3-NEXT: retq +; +; AVX1-LABEL: PR157382: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovdqu (%rdi), %ymm0 +; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpcmpgtd %xmm0, %xmm2, %xmm3 +; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4 +; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 +; AVX1-NEXT: vpcmpgtd %xmm5, %xmm2, %xmm5 +; AVX1-NEXT: vpxor %xmm4, %xmm5, %xmm5 +; AVX1-NEXT: vmovdqu (%rsi), %xmm6 +; AVX1-NEXT: vmovdqu 16(%rsi), %xmm7 +; AVX1-NEXT: vpcmpgtd %xmm2, %xmm6, %xmm6 +; AVX1-NEXT: vpor %xmm6, %xmm3, %xmm3 +; AVX1-NEXT: vpcmpgtd %xmm2, %xmm7, %xmm6 +; AVX1-NEXT: vpor %xmm6, %xmm5, %xmm5 +; AVX1-NEXT: vpackssdw %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1 +; AVX1-NEXT: vpor %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vpmovsxwd %xmm1, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] +; AVX1-NEXT: vpmovsxwd %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 +; AVX1-NEXT: vandps %ymm0, %ymm1, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: PR157382: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; AVX2-NEXT: vmovdqu (%rsi), %ymm1 +; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpcmpgtd %ymm0, %ymm3, %ymm4 +; AVX2-NEXT: vpcmpeqd %ymm5, %ymm5, %ymm5 +; AVX2-NEXT: vpxor %ymm5, %ymm4, %ymm4 +; AVX2-NEXT: vpcmpgtd %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vpor %ymm1, %ymm4, %ymm1 +; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpcmpeqb %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX2-NEXT: vpxor %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vpmovsxbw %xmm2, %xmm2 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 +; AVX2-NEXT: vpackssdw %xmm3, %xmm1, %xmm1 +; AVX2-NEXT: vpor %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1 +; AVX2-NEXT: vpand %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: retq +; +; AVX512-LABEL: PR157382: +; AVX512: # %bb.0: +; AVX512-NEXT: vmovdqu (%rdi), %ymm0 +; AVX512-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vpcmpnltd %ymm2, %ymm0, %k0 +; AVX512-NEXT: vpcmpltd (%rsi), %ymm2, %k1 +; AVX512-NEXT: vptestmb %xmm1, %xmm1, %k2 +; AVX512-NEXT: korw %k1, %k0, %k0 +; AVX512-NEXT: korw %k2, %k0, %k1 +; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; AVX512-NEXT: retq + %ld0 = load <8 x i32>, ptr %p0, align 1 + %ld1 = load <8 x i32>, ptr %p1, align 1 + %ld2 = load <8 x i8>, ptr %p2, align 1 + %cmp0 = icmp sge <8 x i32> %ld0, zeroinitializer + %cmp1 = icmp sgt <8 x i32> %ld1, zeroinitializer + %cmp2 = icmp ne <8 x i8> %ld2, zeroinitializer + %cmp01 = or <8 x i1> %cmp0, %cmp1 + %cmp012 = or <8 x i1> %cmp01, %cmp2 + %res = select <8 x i1> %cmp012, <8 x i32> %ld0, <8 x i32> zeroinitializer + ret <8 x i32> %res +}