diff --git a/llvm/test/Transforms/InstCombine/icmp-uadd-sat.ll b/llvm/test/Transforms/InstCombine/icmp-uadd-sat.ll new file mode 100644 index 0000000000000..e5ce97a6e34c6 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/icmp-uadd-sat.ll @@ -0,0 +1,274 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3 +; RUN: opt < %s -passes=instcombine -S | FileCheck %s + +; Tests for InstCombineCompares.cpp::foldICmpUSubSatOrUAddSatWithConstant +; - uadd_sat case + +; ============================================================================== +; Basic tests with one user +; ============================================================================== +define i1 @icmp_eq_basic(i8 %arg) { +; CHECK-LABEL: define i1 @icmp_eq_basic +; CHECK-SAME: (i8 [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[ARG]], i8 2) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[ADD]], 5 +; CHECK-NEXT: ret i1 [[CMP]] +; + %add = call i8 @llvm.uadd.sat.i8(i8 %arg, i8 2) + %cmp = icmp eq i8 %add, 5 + ret i1 %cmp +} + +define i1 @icmp_ne_basic(i16 %arg) { +; CHECK-LABEL: define i1 @icmp_ne_basic +; CHECK-SAME: (i16 [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG]], i16 8) +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i16 [[ADD]], 9 +; CHECK-NEXT: ret i1 [[CMP]] +; + %add = call i16 @llvm.uadd.sat.i16(i16 %arg, i16 8) + %cmp = icmp ne i16 %add, 9 + ret i1 %cmp +} + +define i1 @icmp_ule_basic(i32 %arg) { +; CHECK-LABEL: define i1 @icmp_ule_basic +; CHECK-SAME: (i32 [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[ARG]], i32 2) +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD]], 4 +; CHECK-NEXT: ret i1 [[CMP]] +; + %add = call i32 @llvm.uadd.sat.i32(i32 %arg, i32 2) + %cmp = icmp ule i32 %add, 3 + ret i1 %cmp +} + +define i1 @icmp_ult_basic(i64 %arg) { +; CHECK-LABEL: define i1 @icmp_ult_basic +; CHECK-SAME: (i64 [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[ARG]], i64 5) +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[ADD]], 20 +; CHECK-NEXT: ret i1 [[CMP]] +; + %add = call i64 @llvm.uadd.sat.i64(i64 %arg, i64 5) + %cmp = icmp ult i64 %add, 20 + ret i1 %cmp +} + +define i1 @icmp_uge_basic(i8 %arg) { +; CHECK-LABEL: define i1 @icmp_uge_basic +; CHECK-SAME: (i8 [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[ARG]], i8 4) +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[ADD]], 7 +; CHECK-NEXT: ret i1 [[CMP]] +; + %add = call i8 @llvm.uadd.sat.i8(i8 %arg, i8 4) + %cmp = icmp uge i8 %add, 8 + ret i1 %cmp +} + +define i1 @icmp_ugt_basic(i16 %arg) { +; CHECK-LABEL: define i1 @icmp_ugt_basic +; CHECK-SAME: (i16 [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG]], i16 1) +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i16 [[ADD]], 3 +; CHECK-NEXT: ret i1 [[CMP]] +; + %add = call i16 @llvm.uadd.sat.i16(i16 %arg, i16 1) + %cmp = icmp ugt i16 %add, 3 + ret i1 %cmp +} + +define i1 @icmp_sle_basic(i32 %arg) { +; CHECK-LABEL: define i1 @icmp_sle_basic +; CHECK-SAME: (i32 [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[ARG]], i32 10) +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD]], 9 +; CHECK-NEXT: ret i1 [[CMP]] +; + %add = call i32 @llvm.uadd.sat.i32(i32 %arg, i32 10) + %cmp = icmp sle i32 %add, 8 + ret i1 %cmp +} + +define i1 @icmp_slt_basic(i64 %arg) { +; CHECK-LABEL: define i1 @icmp_slt_basic +; CHECK-SAME: (i64 [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call i64 @llvm.uadd.sat.i64(i64 [[ARG]], i64 24) +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[ADD]], 5 +; CHECK-NEXT: ret i1 [[CMP]] +; + %add = call i64 @llvm.uadd.sat.i64(i64 %arg, i64 24) + %cmp = icmp slt i64 %add, 5 + ret i1 %cmp +} + +define i1 @icmp_sge_basic(i8 %arg) { +; CHECK-LABEL: define i1 @icmp_sge_basic +; CHECK-SAME: (i8 [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[ARG]], i8 1) +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[ADD]], 3 +; CHECK-NEXT: ret i1 [[CMP]] +; + %add = call i8 @llvm.uadd.sat.i8(i8 %arg, i8 1) + %cmp = icmp sge i8 %add, 4 + ret i1 %cmp +} + +define i1 @icmp_sgt_basic(i16 %arg) { +; CHECK-LABEL: define i1 @icmp_sgt_basic +; CHECK-SAME: (i16 [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG]], i16 2) +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i16 [[ADD]], 5 +; CHECK-NEXT: ret i1 [[CMP]] +; + %add = call i16 @llvm.uadd.sat.i16(i16 %arg, i16 2) + %cmp = icmp sgt i16 %add, 5 + ret i1 %cmp +} + +; ============================================================================== +; Tests with more than user +; ============================================================================== +define i1 @icmp_eq_multiuse(i8 %arg) { +; CHECK-LABEL: define i1 @icmp_eq_multiuse +; CHECK-SAME: (i8 [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[ARG]], i8 2) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[ADD]], 5 +; CHECK-NEXT: call void @use.i8(i8 [[ADD]]) +; CHECK-NEXT: ret i1 [[CMP]] +; + %add = call i8 @llvm.uadd.sat.i8(i8 %arg, i8 2) + %cmp = icmp eq i8 %add, 5 + call void @use.i8(i8 %add) + ret i1 %cmp +} + +; ============================================================================== +; Tests with vector types +; ============================================================================== +define <2 x i1> @icmp_eq_vector_equal(<2 x i8> %arg) { +; CHECK-LABEL: define <2 x i1> @icmp_eq_vector_equal +; CHECK-SAME: (<2 x i8> [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[ARG]], <2 x i8> ) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i8> [[ADD]], +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %add = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %arg, <2 x i8> ) + %cmp = icmp eq <2 x i8> %add, + ret <2 x i1> %cmp +} + +define <2 x i1> @icmp_eq_vector_unequal(<2 x i8> %arg) { +; CHECK-LABEL: define <2 x i1> @icmp_eq_vector_unequal +; CHECK-SAME: (<2 x i8> [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[ARG]], <2 x i8> ) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i8> [[ADD]], +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %add = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %arg, <2 x i8> ) + %cmp = icmp eq <2 x i8> %add, + ret <2 x i1> %cmp +} + +define <2 x i1> @icmp_ne_vector_equal(<2 x i16> %arg) { +; CHECK-LABEL: define <2 x i1> @icmp_ne_vector_equal +; CHECK-SAME: (<2 x i16> [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[ARG]], <2 x i16> ) +; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i16> [[ADD]], +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %add = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %arg, <2 x i16> ) + %cmp = icmp ne <2 x i16> %add, + ret <2 x i1> %cmp +} + +define <2 x i1> @icmp_ne_vector_unequal(<2 x i16> %arg) { +; CHECK-LABEL: define <2 x i1> @icmp_ne_vector_unequal +; CHECK-SAME: (<2 x i16> [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[ARG]], <2 x i16> ) +; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i16> [[ADD]], +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %add = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %arg, <2 x i16> ) + %cmp = icmp ne <2 x i16> %add, + ret <2 x i1> %cmp +} + +define <2 x i1> @icmp_ule_vector_equal(<2 x i32> %arg) { +; CHECK-LABEL: define <2 x i1> @icmp_ule_vector_equal +; CHECK-SAME: (<2 x i32> [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[ARG]], <2 x i32> ) +; CHECK-NEXT: [[CMP:%.*]] = icmp ult <2 x i32> [[ADD]], +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %add = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %arg, <2 x i32> ) + %cmp = icmp ult <2 x i32> %add, + ret <2 x i1> %cmp +} + +define <2 x i1> @icmp_ule_vector_unequal(<2 x i32> %arg) { +; CHECK-LABEL: define <2 x i1> @icmp_ule_vector_unequal +; CHECK-SAME: (<2 x i32> [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> [[ARG]], <2 x i32> ) +; CHECK-NEXT: [[CMP:%.*]] = icmp ult <2 x i32> [[ADD]], +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %add = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %arg, <2 x i32> ) + %cmp = icmp ult <2 x i32> %add, + ret <2 x i1> %cmp +} + +define <2 x i1> @icmp_sgt_vector_equal(<2 x i64> %arg) { +; CHECK-LABEL: define <2 x i1> @icmp_sgt_vector_equal +; CHECK-SAME: (<2 x i64> [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> [[ARG]], <2 x i64> ) +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <2 x i64> [[ADD]], +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %add = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %arg, <2 x i64> ) + %cmp = icmp sgt <2 x i64> %add, + ret <2 x i1> %cmp +} + +define <2 x i1> @icmp_sgt_vector_unequal(<2 x i64> %arg) { +; CHECK-LABEL: define <2 x i1> @icmp_sgt_vector_unequal +; CHECK-SAME: (<2 x i64> [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> [[ARG]], <2 x i64> ) +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <2 x i64> [[ADD]], +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %add = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %arg, <2 x i64> ) + %cmp = icmp sgt <2 x i64> %add, + ret <2 x i1> %cmp +} + +; ============================================================================== +; Tests with vector types and multiple uses +; ============================================================================== +define <2 x i1> @icmp_eq_vector_multiuse_equal(<2 x i8> %arg) { +; CHECK-LABEL: define <2 x i1> @icmp_eq_vector_multiuse_equal +; CHECK-SAME: (<2 x i8> [[ARG:%.*]]) { +; CHECK-NEXT: [[ADD:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> [[ARG]], <2 x i8> ) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i8> [[ADD]], +; CHECK-NEXT: call void @use.v2i8(<2 x i8> [[ADD]]) +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %add = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %arg, <2 x i8> ) + %cmp = icmp eq <2 x i8> %add, + call void @use.v2i8(<2 x i8> %add) + ret <2 x i1> %cmp +} + +declare i8 @llvm.uadd.sat.i8(i8, i8) +declare i16 @llvm.uadd.sat.i16(i16, i16) +declare i32 @llvm.uadd.sat.i32(i32, i32) +declare i64 @llvm.uadd.sat.i64(i64, i64) + +declare <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64>, <2 x i64>) +declare <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32>, <2 x i32>) +declare <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16>, <2 x i16>) +declare <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8>, <2 x i8>) + +declare void @use.i8(i8) +declare void @use.v2i8(<2 x i8>)