diff --git a/llvm/test/Transforms/IndVarSimplify/X86/overflow-intrinsics.ll b/llvm/test/Transforms/IndVarSimplify/X86/overflow-intrinsics.ll index e678146c80af8..4a59e419369af 100644 --- a/llvm/test/Transforms/IndVarSimplify/X86/overflow-intrinsics.ll +++ b/llvm/test/Transforms/IndVarSimplify/X86/overflow-intrinsics.ll @@ -109,6 +109,37 @@ for.body: ; preds = %entry, %cont %0 = tail call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %i.04, i32 1) %1 = extractvalue { i32, i1 } %0, 1 +; CHECK: for.body: +; CHECK-NOT: @llvm.usub.with.overflow.i32 +; CHECK: br i1 false, label %trap, label %cont, !nosanitize !0 + br i1 %1, label %trap, label %cont, !nosanitize !{} + +trap: ; preds = %for.body + tail call void @llvm.trap(), !nosanitize !{} + unreachable, !nosanitize !{} + +cont: ; preds = %for.body + %2 = extractvalue { i32, i1 } %0, 0 + %cmp = icmp sgt i32 %2, 0 + br i1 %cmp, label %for.body, label %for.cond.cleanup +} + +define void @f_usub_overflow(ptr nocapture %a) { +; CHECK-LABEL: @f_usub_overflow( +entry: + br label %for.body + +for.cond.cleanup: ; preds = %cont + ret void + +for.body: ; preds = %entry, %cont + %i.04 = phi i32 [ 15, %entry ], [ %2, %cont ] + %idxprom = sext i32 %i.04 to i64 + %arrayidx = getelementptr inbounds i8, ptr %a, i64 %idxprom + store i8 0, ptr %arrayidx, align 1 + %0 = tail call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %i.04, i32 1) + %1 = extractvalue { i32, i1 } %0, 1 + ; It is theoretically possible to prove this, but SCEV cannot ; represent non-unsigned-wrapping subtraction operations.