diff --git a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp index 24fe08d6c3e4e..bf457194bfd8e 100644 --- a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp +++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp @@ -2133,8 +2133,15 @@ Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR, // negative. If Step is known to be positive or negative, only create // either 1. or 2. auto ComputeEndCheck = [&]() -> Value * { - // Checking isZero() && SE.isKnownPositive(Step)) + // Checking isZero() && SE.isKnownPositive(Step) && + DstBits < SrcBits && + ExitCount == SE.getZeroExtendExpr(SE.getTruncateExpr(ExitCount, ARTy), + ExitCount->getType()) && + SE.willNotOverflow(Instruction::Mul, Signed, Step, + SE.getTruncateExpr(ExitCount, ARTy))) return ConstantInt::getFalse(Loc->getContext()); // Get the backedge taken count and truncate or extended to the AR type. diff --git a/llvm/test/Transforms/LoopDistribute/scev-inserted-runtime-check.ll b/llvm/test/Transforms/LoopDistribute/scev-inserted-runtime-check.ll index 98b89abfeafda..057a60918ecf3 100644 --- a/llvm/test/Transforms/LoopDistribute/scev-inserted-runtime-check.ll +++ b/llvm/test/Transforms/LoopDistribute/scev-inserted-runtime-check.ll @@ -10,8 +10,13 @@ define void @f(ptr noalias %a, ptr noalias %b, ptr noalias %c, ptr noalias %d, p ; CHECK-NEXT: br label [[FOR_BODY_LVER_CHECK:%.*]] ; CHECK: for.body.lver.check: ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], -1 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32 +; CHECK-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]]) +; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0 +; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 -; CHECK-NEXT: br i1 [[TMP1]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH_LDIST1:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = or i1 [[MUL_OVERFLOW]], [[TMP1]] +; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH_LDIST1:%.*]] ; CHECK: for.body.ph.lver.orig: ; CHECK-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]] ; CHECK: for.body.lver.orig: @@ -75,7 +80,7 @@ define void @f(ptr noalias %a, ptr noalias %b, ptr noalias %c, ptr noalias %d, p ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT2:%.*]], label [[FOR_BODY]] ; CHECK: for.end.loopexit: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: for.end.loopexit1: +; CHECK: for.end.loopexit2: ; CHECK-NEXT: br label [[FOR_END]] ; CHECK: for.end: ; CHECK-NEXT: ret void @@ -135,8 +140,13 @@ define void @f_with_offset(ptr noalias %b, ptr noalias %c, ptr noalias %d, ptr n ; CHECK-NEXT: br label [[FOR_BODY_LVER_CHECK:%.*]] ; CHECK: for.body.lver.check: ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], -1 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP0]] to i32 +; CHECK-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP2]]) +; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0 +; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 -; CHECK-NEXT: br i1 [[TMP1]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH_LDIST1:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = or i1 [[MUL_OVERFLOW]], [[TMP1]] +; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH_LDIST1:%.*]] ; CHECK: for.body.ph.lver.orig: ; CHECK-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]] ; CHECK: for.body.lver.orig: @@ -200,7 +210,7 @@ define void @f_with_offset(ptr noalias %b, ptr noalias %c, ptr noalias %d, ptr n ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT2:%.*]], label [[FOR_BODY]] ; CHECK: for.end.loopexit: ; CHECK-NEXT: br label [[FOR_END:%.*]] -; CHECK: for.end.loopexit1: +; CHECK: for.end.loopexit2: ; CHECK-NEXT: br label [[FOR_END]] ; CHECK: for.end: ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll index 147e949808b54..c1566c4dd32bf 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll @@ -1045,8 +1045,8 @@ define i64 @live_in_known_1_via_scev() { ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_PHI]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 -; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 8 -; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 8 +; CHECK-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vector.reduce.mul.v4i64(<4 x i64> [[VEC_PHI]]) ; CHECK-NEXT: br label [[EXIT:%.*]] @@ -1213,6 +1213,7 @@ define i32 @g(i64 %n) { ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP1]], 4 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] ; CHECK: vector.scevcheck: +; CHECK-NEXT: [[TMP28:%.*]] = trunc i64 [[N]] to i32 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[N]], 4294967295 ; CHECK-NEXT: br i1 [[TMP2]], label [[VEC_EPILOG_SCALAR_PH]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] ; CHECK: vector.main.loop.iter.check: diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-3.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-3.ll deleted file mode 100644 index 661e8eb666d54..0000000000000 --- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-3.ll +++ /dev/null @@ -1,124 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 -; RUN: opt -S -passes=loop-vectorize,instcombine -force-vector-width=4 -force-vector-interleave=1 -enable-interleaved-mem-accesses=true < %s | FileCheck %s - -target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" - -; Check that the interleaved-mem-access analysis currently does not create an -; interleave group for access 'a' due to the possible pointer wrap-around. -; -; To begin with, in this test the candidate interleave group can be created -; only when getPtrStride is called with Assume=true. Next, because -; the interleave-group of the loads is not full (has gaps), we also need to check -; for possible pointer wrapping. Here we currently use Assume=false and as a -; result cannot prove the transformation is safe and therefore invalidate the -; candidate interleave group. -; - -; void func(unsigned * __restrict a, unsigned * __restrict b, unsigned char x, unsigned char y) { -; int i = 0; -; for (unsigned char index = x; i < y; index +=2, ++i) -; b[i] = aptr 2; -; -; } - -define void @_Z4funcPjS_hh(ptr noalias nocapture readonly %a, ptr noalias nocapture %b, i8 zeroext %x, i8 zeroext %y) local_unnamed_addr { -; CHECK-LABEL: define void @_Z4funcPjS_hh( -; CHECK-SAME: ptr noalias readonly captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i8 zeroext [[X:%.*]], i8 zeroext [[Y:%.*]]) local_unnamed_addr { -; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i8 [[Y]], 0 -; CHECK-NEXT: br i1 [[CMP9]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]] -; CHECK: [[FOR_BODY_PREHEADER]]: -; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i8 [[Y]] to i64 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i8 [[Y]], 5 -; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] -; CHECK: [[VECTOR_SCEVCHECK]]: -; CHECK-NEXT: [[TMP0:%.*]] = add nsw i64 [[WIDE_TRIP_COUNT]], -1 -; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i8 -; CHECK-NEXT: [[MUL_RESULT:%.*]] = shl i8 [[TMP1]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[X]], -1 -; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i8 [[MUL_RESULT]], [[TMP2]] -; CHECK-NEXT: [[TMP4:%.*]] = icmp ugt i64 [[TMP0]], 127 -; CHECK-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]] -; CHECK-NEXT: br i1 [[TMP5]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] -; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[N_MOD_VF:%.*]] = and i64 [[WIDE_TRIP_COUNT]], 3 -; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 -; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP8]], i64 4, i64 [[N_MOD_VF]] -; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[WIDE_TRIP_COUNT]], [[TMP7]] -; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i8 -; CHECK-NEXT: [[TMP6:%.*]] = shl i8 [[DOTCAST]], 1 -; CHECK-NEXT: [[IND_END:%.*]] = add i8 [[X]], [[TMP6]] -; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] -; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[DOTCAST3:%.*]] = trunc i64 [[INDEX]] to i8 -; CHECK-NEXT: [[TMP9:%.*]] = shl i8 [[DOTCAST3]], 1 -; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[X]], [[TMP9]] -; CHECK-NEXT: [[TMP14:%.*]] = zext i8 [[OFFSET_IDX]] to i64 -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[TMP14]] -; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP15]], align 4 -; CHECK-NEXT: [[TMP23:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> -; CHECK-NEXT: [[TMP24:%.*]] = shl <4 x i32> [[TMP23]], splat (i32 1) -; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] -; CHECK-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP25]], align 4 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] -; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[SCALAR_PH]] -; CHECK: [[SCALAR_PH]]: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[FOR_BODY_PREHEADER]] ], [ 0, %[[VECTOR_SCEVCHECK]] ] -; CHECK-NEXT: [[BC_RESUME_VAL3:%.*]] = phi i8 [ [[IND_END]], %[[MIDDLE_BLOCK]] ], [ [[X]], %[[FOR_BODY_PREHEADER]] ], [ [[X]], %[[VECTOR_SCEVCHECK]] ] -; CHECK-NEXT: br label %[[FOR_BODY:.*]] -; CHECK: [[FOR_COND_CLEANUP_LOOPEXIT:.*]]: -; CHECK-NEXT: br label %[[FOR_COND_CLEANUP]] -; CHECK: [[FOR_COND_CLEANUP]]: -; CHECK-NEXT: ret void -; CHECK: [[FOR_BODY]]: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] -; CHECK-NEXT: [[INDEX_011:%.*]] = phi i8 [ [[ADD:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL3]], %[[SCALAR_PH]] ] -; CHECK-NEXT: [[IDXPROM:%.*]] = zext i8 [[INDEX_011]] to i64 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IDXPROM]] -; CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[MUL:%.*]] = shl i32 [[TMP27]], 1 -; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX2]], align 4 -; CHECK-NEXT: [[ADD]] = add i8 [[INDEX_011]], 2 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]] -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[FOR_COND_CLEANUP_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] -; -entry: - %cmp9 = icmp eq i8 %y, 0 - br i1 %cmp9, label %for.cond.cleanup, label %for.body.preheader - -for.body.preheader: - %wide.trip.count = zext i8 %y to i64 - br label %for.body - -for.cond.cleanup.loopexit: - br label %for.cond.cleanup - -for.cond.cleanup: - ret void - -for.body: - %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ] - %index.011 = phi i8 [ %add, %for.body ], [ %x, %for.body.preheader ] - %idxprom = zext i8 %index.011 to i64 - %arrayidx = getelementptr inbounds i32, ptr %a, i64 %idxprom - %0 = load i32, ptr %arrayidx, align 4 - %mul = shl i32 %0, 1 - %arrayidx2 = getelementptr inbounds i32, ptr %b, i64 %indvars.iv - store i32 %mul, ptr %arrayidx2, align 4 - %add = add i8 %index.011, 2 - %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 - %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count - br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body -} -;. -; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} -; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} -; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]} -;. diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses-requiring-scev-predicates.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-requiring-scev-predicates.ll new file mode 100644 index 0000000000000..38438941eb357 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses-requiring-scev-predicates.ll @@ -0,0 +1,258 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5 +; RUN: opt -S -passes=loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -enable-interleaved-mem-accesses=true %s | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" + +; Check that the interleaved-mem-access analysis currently does not create an +; interleave group for access 'a' due to the possible pointer wrap-around. +; +; To begin with, in this test the candidate interleave group can be created +; only when getPtrStride is called with Assume=true. Next, because +; the interleave-group of the loads is not full (has gaps), we also need to check +; for possible pointer wrapping. Here we currently use Assume=false and as a +; result cannot prove the transformation is safe and therefore invalidate the +; candidate interleave group. +; + +; void func(unsigned * __restrict a, unsigned * __restrict b, unsigned char x, unsigned char y) { +; int i = 0; +; for (unsigned char index = x; i < y; index +=2, ++i) +; b[i] = aptr 2; +; +; } + +define void @wrap_around_scev_check(ptr noalias %a, ptr noalias %b, i8 %x, i8 %y) { +; CHECK-LABEL: define void @wrap_around_scev_check( +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i8 [[X:%.*]], i8 [[Y:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i8 [[Y]], 0 +; CHECK-NEXT: br i1 [[CMP9]], label %[[EXIT:.*]], label %[[LOOP_PREHEADER:.*]] +; CHECK: [[LOOP_PREHEADER]]: +; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i8 [[Y]] to i64 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[WIDE_TRIP_COUNT]], 4 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] +; CHECK: [[VECTOR_SCEVCHECK]]: +; CHECK-NEXT: [[TMP0:%.*]] = add nsw i64 [[WIDE_TRIP_COUNT]], -1 +; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i8 +; CHECK-NEXT: [[MUL1:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 2, i8 [[TMP1]]) +; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i8, i1 } [[MUL1]], 0 +; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i8, i1 } [[MUL1]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = add i8 [[X]], [[MUL_RESULT]] +; CHECK-NEXT: [[TMP3:%.*]] = icmp ult i8 [[TMP2]], [[X]] +; CHECK-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]] +; CHECK-NEXT: [[TMP17:%.*]] = icmp ugt i64 [[TMP0]], 255 +; CHECK-NEXT: [[TMP18:%.*]] = or i1 [[TMP5]], [[TMP17]] +; CHECK-NEXT: br i1 [[TMP18]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 4 +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 +; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP6]], i64 4, i64 [[N_MOD_VF]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[TMP7]] +; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i8 +; CHECK-NEXT: [[TMP8:%.*]] = mul i8 [[DOTCAST]], 2 +; CHECK-NEXT: [[TMP9:%.*]] = add i8 [[X]], [[TMP8]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[DOTCAST2:%.*]] = trunc i64 [[INDEX]] to i8 +; CHECK-NEXT: [[TMP10:%.*]] = mul i8 [[DOTCAST2]], 2 +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[X]], [[TMP10]] +; CHECK-NEXT: [[TMP11:%.*]] = zext i8 [[OFFSET_IDX]] to i64 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP11]] +; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP12]], align 4 +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> +; CHECK-NEXT: [[TMP13:%.*]] = shl <4 x i32> [[STRIDED_VEC]], splat (i32 1) +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP14]], i32 0 +; CHECK-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP19]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_SCEVCHECK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL3:%.*]] = phi i8 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ [[X]], %[[LOOP_PREHEADER]] ], [ [[X]], %[[VECTOR_SCEVCHECK]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[INDEX_011:%.*]] = phi i8 [ [[BC_RESUME_VAL3]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IDXPROM:%.*]] = zext i8 [[INDEX_011]] to i64 +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IDXPROM]] +; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[MUL:%.*]] = shl i32 [[TMP16]], 1 +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] +; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[ADD]] = add i8 [[INDEX_011]], 2 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[WIDE_TRIP_COUNT]] +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT_LOOPEXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: [[EXIT_LOOPEXIT]]: +; CHECK-NEXT: br label %[[EXIT]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + %cmp9 = icmp eq i8 %y, 0 + br i1 %cmp9, label %exit, label %loop.preheader + +loop.preheader: + %wide.trip.count = zext i8 %y to i64 + br label %loop + +loop: + %iv = phi i64 [ 0, %loop.preheader ], [ %iv.next, %loop ] + %index.011 = phi i8 [ %x, %loop.preheader ], [ %add, %loop ] + %idxprom = zext i8 %index.011 to i64 + %arrayidx = getelementptr inbounds i32, ptr %a, i64 %idxprom + %0 = load i32, ptr %arrayidx, align 4 + %mul = shl i32 %0, 1 + %arrayidx2 = getelementptr inbounds i32, ptr %b, i64 %iv + store i32 %mul, ptr %arrayidx2, align 4 + %add = add i8 %index.011, 2 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond = icmp eq i64 %iv.next, %wide.trip.count + br i1 %exitcond, label %exit, label %loop + +exit: + ret void +} + +; For %gep, we have the following SCEV: ((4 * (zext i4 {0,+,5}<%loop> to i64)) + %x). +; Note the i4 bit wide AddRec {0,+,5}. It is known to wrap in the loop with trip count 16. +define void @wrap_predicate_for_interleave_group_wraps_for_known_trip_count(ptr noalias %x, ptr noalias %out) { +; CHECK-LABEL: define void @wrap_predicate_for_interleave_group_wraps_for_known_trip_count( +; CHECK-SAME: ptr noalias [[X:%.*]], ptr noalias [[OUT:%.*]]) { +; CHECK-NEXT: [[START:.*]]: +; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] +; CHECK: [[VECTOR_SCEVCHECK]]: +; CHECK-NEXT: [[MUL:%.*]] = call { i4, i1 } @llvm.umul.with.overflow.i4(i4 5, i4 -1) +; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i4, i1 } [[MUL]], 0 +; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i4, i1 } [[MUL]], 1 +; CHECK-NEXT: br i1 [[MUL_OVERFLOW]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = mul nuw nsw i64 [[INDEX]], 5 +; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 15 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i32, ptr [[X]], i64 [[TMP1]] +; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <20 x i32>, ptr [[TMP2]], align 4 +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <20 x i32> [[WIDE_VEC]], <20 x i32> poison, <4 x i32> +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i32, ptr [[OUT]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP3]], i32 0 +; CHECK-NEXT: store <4 x i32> [[STRIDED_VEC]], ptr [[TMP5]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 12 +; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 12, %[[MIDDLE_BLOCK]] ], [ 0, %[[START]] ], [ 0, %[[VECTOR_SCEVCHECK]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_MUL5:%.*]] = mul nuw nsw i64 [[IV]], 5 +; CHECK-NEXT: [[IV_MUL5_MASKED:%.*]] = and i64 [[IV_MUL5]], 15 +; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw i32, ptr [[X]], i64 [[IV_MUL5_MASKED]] +; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[GEP]], align 4 +; CHECK-NEXT: [[OUT_I:%.*]] = getelementptr inbounds nuw i32, ptr [[OUT]], i64 [[IV]] +; CHECK-NEXT: store i32 [[V]], ptr [[OUT_I]], align 4 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 16 +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +start: + br label %loop + +loop: + %iv = phi i64 [ 0, %start ], [ %iv.next, %loop ] + %iv.next = add nuw nsw i64 %iv, 1 + %iv.mul5 = mul nuw nsw i64 %iv, 5 + %iv.mul5.masked = and i64 %iv.mul5, 15 + %gep = getelementptr inbounds nuw i32, ptr %x, i64 %iv.mul5.masked + %v = load i32, ptr %gep, align 4 + %out.i = getelementptr inbounds nuw i32, ptr %out, i64 %iv + store i32 %v, ptr %out.i, align 4 + %exitcond.not = icmp eq i64 %iv.next, 16 + br i1 %exitcond.not, label %exit, label %loop + +exit: + ret void +} + +; For %gep, we have the following SCEV: ((4 * (zext i4 {0,+,3}<%loop> to i64)) + %x). +; Note the i4 bit wide AddRec {0,+,3}. It may wrap, depending on the trip count. +define void @wrap_predicate_for_interleave_group_unknown_trip_count(ptr noalias %x, ptr noalias %out, i64 %n) { +; CHECK-LABEL: define void @wrap_predicate_for_interleave_group_unknown_trip_count( +; CHECK-SAME: ptr noalias [[X:%.*]], ptr noalias [[OUT:%.*]], i64 [[N:%.*]]) { +; CHECK-NEXT: [[START:.*]]: +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[N]], 4 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] +; CHECK: [[VECTOR_SCEVCHECK]]: +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 +; CHECK-NEXT: [[TMP9:%.*]] = trunc i64 [[TMP0]] to i4 +; CHECK-NEXT: [[MUL:%.*]] = call { i4, i1 } @llvm.umul.with.overflow.i4(i4 3, i4 [[TMP9]]) +; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i4, i1 } [[MUL]], 0 +; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i4, i1 } [[MUL]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[TMP0]], 15 +; CHECK-NEXT: [[TMP10:%.*]] = or i1 [[MUL_OVERFLOW]], [[TMP1]] +; CHECK-NEXT: br i1 [[TMP10]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4 +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 +; CHECK-NEXT: [[TMP7:%.*]] = select i1 [[TMP2]], i64 4, i64 [[N_MOD_VF]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[TMP7]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP8:%.*]] = mul nuw nsw i64 [[INDEX]], 3 +; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP8]], 15 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw i32, ptr [[X]], i64 [[TMP3]] +; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <12 x i32>, ptr [[TMP4]], align 4 +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i32, ptr [[OUT]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[TMP5]], i32 0 +; CHECK-NEXT: store <4 x i32> [[STRIDED_VEC]], ptr [[TMP9]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[START]] ], [ 0, %[[VECTOR_SCEVCHECK]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[IV_MUL5:%.*]] = mul nuw nsw i64 [[IV]], 3 +; CHECK-NEXT: [[IV_MUL5_MASKED:%.*]] = and i64 [[IV_MUL5]], 15 +; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw i32, ptr [[X]], i64 [[IV_MUL5_MASKED]] +; CHECK-NEXT: [[V:%.*]] = load i32, ptr [[GEP]], align 4 +; CHECK-NEXT: [[OUT_I:%.*]] = getelementptr inbounds nuw i32, ptr [[OUT]], i64 [[IV]] +; CHECK-NEXT: store i32 [[V]], ptr [[OUT_I]], align 4 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +start: + br label %loop + +loop: + %iv = phi i64 [ 0, %start ], [ %iv.next, %loop ] + %iv.next = add nuw nsw i64 %iv, 1 + %iv.mul3 = mul nuw nsw i64 %iv, 3 + %iv.mul3.masked = and i64 %iv.mul3, 15 + %gep = getelementptr inbounds nuw i32, ptr %x, i64 %iv.mul3.masked + %v = load i32, ptr %gep, align 4 + %out.i = getelementptr inbounds nuw i32, ptr %out, i64 %iv + store i32 %v, ptr %out.i, align 4 + %exitcond.not = icmp eq i64 %iv.next, %n + br i1 %exitcond.not, label %exit, label %loop + +exit: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/runtime-check-small-clamped-bounds.ll b/llvm/test/Transforms/LoopVectorize/runtime-check-small-clamped-bounds.ll index a47037c46eedc..7a5cf4576ac9e 100644 --- a/llvm/test/Transforms/LoopVectorize/runtime-check-small-clamped-bounds.ll +++ b/llvm/test/Transforms/LoopVectorize/runtime-check-small-clamped-bounds.ll @@ -19,6 +19,7 @@ define void @load_clamped_index(ptr %A, ptr %B, i32 %N) { ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] ; CHECK: vector.scevcheck: ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N]], -1 +; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP0]] to i2 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i32 [[TMP0]], 3 ; CHECK-NEXT: br i1 [[TMP1]], label [[SCALAR_PH]], label [[VECTOR_MEMCHECK:%.*]] ; CHECK: vector.memcheck: @@ -91,6 +92,7 @@ define void @store_clamped_index(ptr %A, ptr %B, i32 %N) { ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] ; CHECK: vector.scevcheck: ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N]], -1 +; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP0]] to i2 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i32 [[TMP0]], 3 ; CHECK-NEXT: br i1 [[TMP1]], label [[SCALAR_PH]], label [[VECTOR_MEMCHECK:%.*]] ; CHECK: vector.memcheck: @@ -359,6 +361,7 @@ define void @clamped_index_equal_dependence(ptr %A, ptr %B, i32 %N) { ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] ; CHECK: vector.scevcheck: ; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N]], -1 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP0]] to i2 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i32 [[TMP0]], 3 ; CHECK-NEXT: br i1 [[TMP1]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: diff --git a/llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll b/llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll index 6e5eeaf9f1218..3e25695304854 100644 --- a/llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll +++ b/llvm/test/Transforms/LoopVersioning/wrapping-pointer-versioning.ll @@ -29,7 +29,12 @@ define void @f1(ptr noalias %a, ; LV-LABEL: @f1( ; LV-NEXT: for.body.lver.check: ; LV-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], -1 +; LV-NEXT: [[TMP5:%.*]] = trunc i64 [[TMP0]] to i32 +; LV-NEXT: [[MUL2:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP5]]) +; LV-NEXT: [[MUL_RESULT1:%.*]] = extractvalue { i32, i1 } [[MUL2]], 0 +; LV-NEXT: [[MUL_OVERFLOW1:%.*]] = extractvalue { i32, i1 } [[MUL2]], 1 ; LV-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 +; LV-NEXT: [[TMP8:%.*]] = or i1 [[MUL_OVERFLOW1]], [[TMP1]] ; LV-NEXT: [[MUL1:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 4, i64 [[TMP0]]) ; LV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i64, i1 } [[MUL1]], 0 ; LV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i64, i1 } [[MUL1]], 1 @@ -37,7 +42,7 @@ define void @f1(ptr noalias %a, ; LV-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[MUL_RESULT]] ; LV-NEXT: [[TMP4:%.*]] = icmp ult ptr [[TMP3]], [[A]] ; LV-NEXT: [[TMP6:%.*]] = or i1 [[TMP4]], [[MUL_OVERFLOW]] -; LV-NEXT: [[TMP7:%.*]] = or i1 [[TMP1]], [[TMP6]] +; LV-NEXT: [[TMP7:%.*]] = or i1 [[TMP8]], [[TMP6]] ; LV-NEXT: br i1 [[TMP7]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]] ; LV: for.body.ph.lver.orig: ; LV-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]] @@ -75,7 +80,7 @@ define void @f1(ptr noalias %a, ; LV-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT2:%.*]], label [[FOR_BODY]] ; LV: for.end.loopexit: ; LV-NEXT: br label [[FOR_END:%.*]] -; LV: for.end.loopexit2: +; LV: for.end.loopexit5: ; LV-NEXT: br label [[FOR_END]] ; LV: for.end: ; LV-NEXT: ret void