-
Notifications
You must be signed in to change notification settings - Fork 10.8k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
LoopVectorize: guard marking iv as scalar; fix bug #88730
Conversation
When collecting loop scalars, LoopVectorize over-eagerly marks the induction variable and its update as scalars after vectorization, even if the induction variable update is a first-order recurrence. Guard the process with this check, fixing a crash. Fixes llvm#72969.
@llvm/pr-subscribers-llvm-transforms Author: Ramkumar Ramachandra (artagnon) ChangesWhen collecting loop scalars, LoopVectorize over-eagerly marks the induction variable and its update as scalars after vectorization, even if the induction variable update is a first-order recurrence. Guard the process with this check, fixing a crash. Fixes #72969. Full diff: https://github.com/llvm/llvm-project/pull/88730.diff 2 Files Affected:
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 5535cc55e93216..fa11127e496888 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -3875,6 +3875,13 @@ void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
if (!ScalarInd)
continue;
+ // If the induction variable update is a fixed-order recurrence, neither the
+ // induction variable or its update should be marked scalar after
+ // vectorization.
+ auto *IndUpdatePhi = dyn_cast<PHINode>(IndUpdate);
+ if (IndUpdatePhi && Legal->isFixedOrderRecurrence(IndUpdatePhi))
+ continue;
+
// Determine if all users of the induction variable update instruction are
// scalar after vectorization.
auto ScalarIndUpdate =
diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr72969.ll b/llvm/test/Transforms/LoopVectorize/X86/pr72969.ll
index f982695983330f..45af544f0304d4 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/pr72969.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr72969.ll
@@ -1,10 +1,111 @@
-; REQUIRES: asserts
-; RUN: not --crash opt -mtriple=x86_64 -mattr=-avx,-avx2,-avx512f,+sse,-sse2,-sse3,-sse4.2 -passes=loop-vectorize -S < %s
-; RUN: not --crash opt -mtriple=x86_64 -mattr=-avx,-avx2,-avx512f,+sse,-sse2,-sse3,-sse4.2 -passes=loop-vectorize -force-vector-width=4 -S < %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -mtriple=x86_64 -mattr=-avx,-avx2,-avx512f,+sse,-sse2,-sse3,-sse4.2 -passes=loop-vectorize -S %s | FileCheck --check-prefix=NOVEC %s
+; RUN: opt -mtriple=x86_64 -mattr=-avx,-avx2,-avx512f,+sse,-sse2,-sse3,-sse4.2 -passes=loop-vectorize -force-vector-width=4 -S %s | FileCheck --check-prefix=VEC %s
@h = global i64 0
define void @test(ptr %p) {
+; NOVEC-LABEL: define void @test(
+; NOVEC-SAME: ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
+; NOVEC-NEXT: entry:
+; NOVEC-NEXT: br label [[FOR_BODY:%.*]]
+; NOVEC: for.body:
+; NOVEC-NEXT: [[IDX_EXT_MERGE:%.*]] = phi i64 [ 1, [[ENTRY:%.*]] ], [ [[IDX:%.*]], [[FOR_BODY]] ]
+; NOVEC-NEXT: [[INC_MERGE:%.*]] = phi i16 [ 1, [[ENTRY]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; NOVEC-NEXT: [[IDX_MERGE:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IDX_EXT_MERGE]], [[FOR_BODY]] ]
+; NOVEC-NEXT: [[ADD:%.*]] = shl i64 [[IDX_MERGE]], 1
+; NOVEC-NEXT: [[ARRAYIDX:%.*]] = getelementptr i64, ptr [[P]], i64 [[ADD]]
+; NOVEC-NEXT: store i64 0, ptr [[ARRAYIDX]], align 8
+; NOVEC-NEXT: [[INC]] = add i16 [[INC_MERGE]], 1
+; NOVEC-NEXT: [[IDX]] = zext i16 [[INC]] to i64
+; NOVEC-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]]
+; NOVEC-NEXT: [[CMP:%.*]] = icmp ugt ptr [[GEP]], @h
+; NOVEC-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[FOR_BODY]]
+; NOVEC: exit:
+; NOVEC-NEXT: ret void
+;
+; VEC-LABEL: define void @test(
+; VEC-SAME: ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
+; VEC-NEXT: entry:
+; VEC-NEXT: [[P1:%.*]] = ptrtoint ptr [[P]] to i64
+; VEC-NEXT: [[TMP0:%.*]] = add i64 [[P1]], 16
+; VEC-NEXT: [[UMAX2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP0]], i64 add (i64 ptrtoint (ptr @h to i64), i64 1))
+; VEC-NEXT: [[TMP1:%.*]] = add i64 [[UMAX2]], -9
+; VEC-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], [[P1]]
+; VEC-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
+; VEC-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[TMP3]], 1
+; VEC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP4]], 4
+; VEC-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
+; VEC: vector.scevcheck:
+; VEC-NEXT: [[TMP5:%.*]] = add i64 [[P1]], 16
+; VEC-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP5]], i64 add (i64 ptrtoint (ptr @h to i64), i64 1))
+; VEC-NEXT: [[TMP6:%.*]] = add i64 [[UMAX]], -9
+; VEC-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], [[P1]]
+; VEC-NEXT: [[TMP8:%.*]] = lshr i64 [[TMP7]], 3
+; VEC-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[TMP8]], 65535
+; VEC-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP8]] to i16
+; VEC-NEXT: [[TMP11:%.*]] = add i16 2, [[TMP10]]
+; VEC-NEXT: [[TMP12:%.*]] = icmp ult i16 [[TMP11]], 2
+; VEC-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[TMP8]], 65535
+; VEC-NEXT: [[TMP14:%.*]] = or i1 [[TMP12]], [[TMP13]]
+; VEC-NEXT: br i1 [[TMP14]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
+; VEC: vector.ph:
+; VEC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], 4
+; VEC-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]]
+; VEC-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i16
+; VEC-NEXT: [[IND_END:%.*]] = add i16 1, [[DOTCAST]]
+; VEC-NEXT: br label [[VECTOR_BODY:%.*]]
+; VEC: vector.body:
+; VEC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; VEC-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i64> [ <i64 poison, i64 poison, i64 poison, i64 1>, [[VECTOR_PH]] ], [ [[TMP28:%.*]], [[VECTOR_BODY]] ]
+; VEC-NEXT: [[VEC_IND:%.*]] = phi <4 x i16> [ <i16 1, i16 2, i16 3, i16 4>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; VEC-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 0
+; VEC-NEXT: [[TMP16:%.*]] = add i64 [[INDEX]], 1
+; VEC-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 2
+; VEC-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], 3
+; VEC-NEXT: [[TMP19:%.*]] = shl i64 [[TMP15]], 1
+; VEC-NEXT: [[TMP20:%.*]] = shl i64 [[TMP16]], 1
+; VEC-NEXT: [[TMP21:%.*]] = shl i64 [[TMP17]], 1
+; VEC-NEXT: [[TMP22:%.*]] = shl i64 [[TMP18]], 1
+; VEC-NEXT: [[TMP23:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP19]]
+; VEC-NEXT: [[TMP24:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP20]]
+; VEC-NEXT: [[TMP25:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP21]]
+; VEC-NEXT: [[TMP26:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP22]]
+; VEC-NEXT: store i64 0, ptr [[TMP23]], align 8
+; VEC-NEXT: store i64 0, ptr [[TMP24]], align 8
+; VEC-NEXT: store i64 0, ptr [[TMP25]], align 8
+; VEC-NEXT: store i64 0, ptr [[TMP26]], align 8
+; VEC-NEXT: [[TMP27:%.*]] = add <4 x i16> [[VEC_IND]], <i16 1, i16 1, i16 1, i16 1>
+; VEC-NEXT: [[TMP28]] = zext <4 x i16> [[TMP27]] to <4 x i64>
+; VEC-NEXT: [[TMP29:%.*]] = shufflevector <4 x i64> [[VECTOR_RECUR]], <4 x i64> [[TMP28]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+; VEC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; VEC-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], <i16 4, i16 4, i16 4, i16 4>
+; VEC-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; VEC-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; VEC: middle.block:
+; VEC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]]
+; VEC-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i64> [[TMP28]], i32 3
+; VEC-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; VEC: scalar.ph:
+; VEC-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ 1, [[VECTOR_SCEVCHECK]] ], [ 1, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
+; VEC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i16 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 1, [[ENTRY]] ], [ 1, [[VECTOR_SCEVCHECK]] ]
+; VEC-NEXT: [[BC_RESUME_VAL3:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; VEC-NEXT: br label [[FOR_BODY:%.*]]
+; VEC: for.body:
+; VEC-NEXT: [[SCALAR_RECUR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IDX:%.*]], [[FOR_BODY]] ]
+; VEC-NEXT: [[INC_MERGE:%.*]] = phi i16 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; VEC-NEXT: [[IDX_MERGE:%.*]] = phi i64 [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ], [ [[SCALAR_RECUR]], [[FOR_BODY]] ]
+; VEC-NEXT: [[ADD:%.*]] = shl i64 [[IDX_MERGE]], 1
+; VEC-NEXT: [[ARRAYIDX:%.*]] = getelementptr i64, ptr [[P]], i64 [[ADD]]
+; VEC-NEXT: store i64 0, ptr [[ARRAYIDX]], align 8
+; VEC-NEXT: [[INC]] = add i16 [[INC_MERGE]], 1
+; VEC-NEXT: [[IDX]] = zext i16 [[INC]] to i64
+; VEC-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]]
+; VEC-NEXT: [[CMP:%.*]] = icmp ugt ptr [[GEP]], @h
+; VEC-NEXT: br i1 [[CMP]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; VEC: exit:
+; VEC-NEXT: ret void
+;
entry:
br label %for.body
@@ -24,3 +125,9 @@ for.body:
exit:
ret void
}
+;.
+; VEC: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; VEC: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; VEC: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; VEC: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
+;.
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM!
When collecting loop scalars, LoopVectorize over-eagerly marks the induction variable and its update as scalars after vectorization, even if the induction variable update is a first-order recurrence. Guard the process with this check, fixing a crash.
Fixes #72969.