diff --git a/llvm/test/Transforms/InstCombine/cmp-intrinsic.ll b/llvm/test/Transforms/InstCombine/cmp-intrinsic.ll index 65a89976cf713..529f107fafffc 100644 --- a/llvm/test/Transforms/InstCombine/cmp-intrinsic.ll +++ b/llvm/test/Transforms/InstCombine/cmp-intrinsic.ll @@ -7,6 +7,8 @@ declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>) declare i32 @llvm.cttz.i32(i32, i1) declare i33 @llvm.cttz.i33(i33, i1) declare i32 @llvm.ctlz.i32(i32, i1) +declare i8 @llvm.umax.i8(i8, i8) +declare i8 @llvm.uadd.sat.i8(i8, i8) declare i33 @llvm.ctlz.i33(i33, i1) declare i8 @llvm.ctpop.i8(i8) declare i11 @llvm.ctpop.i11(i11) @@ -16,6 +18,7 @@ declare <2 x i32> @llvm.ctpop.v2i32(<2 x i32>) declare i8 @llvm.bitreverse.i8(i8) declare <2 x i8> @llvm.bitreverse.v2i8(<2 x i8>) declare void @use6(i6) +declare void @use8(i8) define i1 @bswap_eq_i16(i16 %x) { ; CHECK-LABEL: @bswap_eq_i16( @@ -786,7 +789,6 @@ define i1 @bitreverse_ult_22_fail_not_equality_pred(i8 %x) { ret i1 %z } - define <2 x i1> @bitreverse_vec_eq_2_2(<2 x i8> %x) { ; CHECK-LABEL: @bitreverse_vec_eq_2_2( ; CHECK-NEXT: [[Z:%.*]] = icmp eq <2 x i8> [[X:%.*]], @@ -807,3 +809,75 @@ define <2 x i1> @bitreverse_vec_eq_1_2_todo_no_splat(<2 x i8> %x) { %z = icmp eq <2 x i8> %y, ret <2 x i1> %z } + +define i1 @umax_eq_zero(i8 %x, i8 %y) { +; CHECK-LABEL: @umax_eq_zero( +; CHECK-NEXT: [[M:%.*]] = call i8 @llvm.umax.i8(i8 [[X:%.*]], i8 [[Y:%.*]]) +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[M]], 0 +; CHECK-NEXT: ret i1 [[R]] +; + %m = call i8 @llvm.umax.i8(i8 %x, i8 %y) + %r = icmp eq i8 %m, 0 + ret i1 %r +} + +define i1 @umax_eq_1_fail(i8 %x, i8 %y) { +; CHECK-LABEL: @umax_eq_1_fail( +; CHECK-NEXT: [[M:%.*]] = call i8 @llvm.umax.i8(i8 [[X:%.*]], i8 [[Y:%.*]]) +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[M]], 1 +; CHECK-NEXT: ret i1 [[R]] +; + %m = call i8 @llvm.umax.i8(i8 %x, i8 %y) + %r = icmp eq i8 %m, 1 + ret i1 %r +} + +define i1 @umax_sle_zero_fail(i8 %x, i8 %y) { +; CHECK-LABEL: @umax_sle_zero_fail( +; CHECK-NEXT: [[M:%.*]] = call i8 @llvm.umax.i8(i8 [[X:%.*]], i8 [[Y:%.*]]) +; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[M]], 1 +; CHECK-NEXT: ret i1 [[R]] +; + %m = call i8 @llvm.umax.i8(i8 %x, i8 %y) + %r = icmp sle i8 %m, 0 + ret i1 %r +} + +define i1 @umax_ne_zero(i8 %x, i8 %y) { +; CHECK-LABEL: @umax_ne_zero( +; CHECK-NEXT: [[M:%.*]] = call i8 @llvm.umax.i8(i8 [[X:%.*]], i8 [[Y:%.*]]) +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[M]], 0 +; CHECK-NEXT: ret i1 [[R]] +; + %m = call i8 @llvm.umax.i8(i8 %x, i8 %y) + %r = icmp ne i8 %m, 0 + ret i1 %r +} + +define i1 @umax_ne_zero_fail_multiuse(i8 %x, i8 %y) { +; CHECK-LABEL: @umax_ne_zero_fail_multiuse( +; CHECK-NEXT: [[M:%.*]] = call i8 @llvm.umax.i8(i8 [[X:%.*]], i8 [[Y:%.*]]) +; CHECK-NEXT: call void @use8(i8 [[M]]) +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[M]], 0 +; CHECK-NEXT: ret i1 [[R]] +; + %m = call i8 @llvm.umax.i8(i8 %x, i8 %y) + call void @use8(i8 %m) + %r = icmp ne i8 %m, 0 + ret i1 %r +} + + +define i1 @uadd_sat_ne_zero_fail_multiuse(i8 %x, i8 %y) { +; CHECK-LABEL: @uadd_sat_ne_zero_fail_multiuse( +; CHECK-NEXT: [[M:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[X:%.*]], i8 [[Y:%.*]]) +; CHECK-NEXT: call void @use8(i8 [[M]]) +; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[X]], [[Y]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[TMP1]], 0 +; CHECK-NEXT: ret i1 [[R]] +; + %m = call i8 @llvm.uadd.sat.i8(i8 %x, i8 %y) + call void @use8(i8 %m) + %r = icmp ne i8 %m, 0 + ret i1 %r +}