diff --git a/llvm/test/CodeGen/X86/combine-ptest.ll b/llvm/test/CodeGen/X86/combine-ptest.ll index d23277f627680..2928023c7fc2a 100644 --- a/llvm/test/CodeGen/X86/combine-ptest.ll +++ b/llvm/test/CodeGen/X86/combine-ptest.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX2 ; ; testz(~X,Y) -> testc(X,Y) @@ -297,6 +298,102 @@ start: ret i1 %6 } +; +; TODO: testz(ashr(X,bw-1),-1) -> movmsk(X) +; + +define i32 @ptestz_v2i64_signbits(<2 x i64> %c, i32 %a, i32 %b) { +; CHECK-LABEL: ptestz_v2i64_signbits: +; CHECK: # %bb.0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0 +; CHECK-NEXT: vptest %xmm0, %xmm0 +; CHECK-NEXT: cmovnel %esi, %eax +; CHECK-NEXT: retq + %t1 = ashr <2 x i64> %c, + %t2 = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %t1, <2 x i64> ) + %t3 = icmp ne i32 %t2, 0 + %t4 = select i1 %t3, i32 %a, i32 %b + ret i32 %t4 +} + +define i32 @ptestz_v8i32_signbits(<8 x i32> %c, i32 %a, i32 %b) { +; AVX1-LABEL: ptestz_v8i32_signbits: +; AVX1: # %bb.0: +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm1 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vptest %ymm0, %ymm0 +; AVX1-NEXT: cmovnel %esi, %eax +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: ptestz_v8i32_signbits: +; AVX2: # %bb.0: +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 +; AVX2-NEXT: vptest %ymm0, %ymm0 +; AVX2-NEXT: cmovnel %esi, %eax +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq + %t1 = ashr <8 x i32> %c, + %t2 = bitcast <8 x i32> %t1 to <4 x i64> + %t3 = call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %t2, <4 x i64> ) + %t4 = icmp ne i32 %t3, 0 + %t5 = select i1 %t4, i32 %a, i32 %b + ret i32 %t5 +} + +define i32 @ptestz_v8i16_signbits(<8 x i16> %c, i32 %a, i32 %b) { +; CHECK-LABEL: ptestz_v8i16_signbits: +; CHECK: # %bb.0: +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: vpsraw $15, %xmm0, %xmm0 +; CHECK-NEXT: vptest %xmm0, %xmm0 +; CHECK-NEXT: cmovnel %esi, %eax +; CHECK-NEXT: retq + %t1 = ashr <8 x i16> %c, + %t2 = bitcast <8 x i16> %t1 to <2 x i64> + %t3 = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %t2, <2 x i64> ) + %t4 = icmp ne i32 %t3, 0 + %t5 = select i1 %t4, i32 %a, i32 %b + ret i32 %t5 +} + +define i32 @ptestz_v32i8_signbits(<32 x i8> %c, i32 %a, i32 %b) { +; AVX1-LABEL: ptestz_v32i8_signbits: +; AVX1: # %bb.0: +; AVX1-NEXT: movl %edi, %eax +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vptest %ymm0, %ymm0 +; AVX1-NEXT: cmovnel %esi, %eax +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: ptestz_v32i8_signbits: +; AVX2: # %bb.0: +; AVX2-NEXT: movl %edi, %eax +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vptest %ymm0, %ymm0 +; AVX2-NEXT: cmovnel %esi, %eax +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq + %t1 = ashr <32 x i8> %c, + %t2 = bitcast <32 x i8> %t1 to <4 x i64> + %t3 = call i32 @llvm.x86.avx.ptestz.256(<4 x i64> %t2, <4 x i64> ) + %t4 = icmp ne i32 %t3, 0 + %t5 = select i1 %t4, i32 %a, i32 %b + ret i32 %t5 +} + declare i32 @llvm.x86.sse41.ptestz(<2 x i64>, <2 x i64>) nounwind readnone declare i32 @llvm.x86.sse41.ptestc(<2 x i64>, <2 x i64>) nounwind readnone declare i32 @llvm.x86.sse41.ptestnzc(<2 x i64>, <2 x i64>) nounwind readnone