Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

LoopVectorize: fix phi cost when it is scalar after vectorization #74456

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
5 changes: 4 additions & 1 deletion llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6885,7 +6885,10 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
auto *Phi = cast<PHINode>(I);

// First-order recurrences are replaced by vector shuffles inside the loop.
if (VF.isVector() && Legal->isFixedOrderRecurrence(Phi)) {
// However, if the Phi is a scalar after vectorization, don't get shuffle
// cost.
if (VF.isVector() && Legal->isFixedOrderRecurrence(Phi) &&
!isScalarAfterVectorization(Phi, VF)) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

IIUC we now don't crash because the phi in the test is considered scalar after vectorization; but this doesn't match the code we generate in the test case, where we have a vector phi for the recurrence and the corresponding shuffle AFAICT.

We only support codegen for vector recurrences, so it would probably be better to avoid marking the phi as scalar after vectorization.

SmallVector<int> Mask(VF.getKnownMinValue());
std::iota(Mask.begin(), Mask.end(), VF.getKnownMinValue() - 1);
return TTI.getShuffleCost(TargetTransformInfo::SK_Splice,
Expand Down
133 changes: 133 additions & 0 deletions llvm/test/Transforms/LoopVectorize/X86/pr72969.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -mtriple=x86_64 -mattr=-avx,-avx2,-avx512f,+sse,-sse2,-sse3,-sse4.2 -passes=loop-vectorize -S < %s | FileCheck --check-prefix=NOVEC %s
; RUN: opt -mtriple=x86_64 -mattr=-avx,-avx2,-avx512f,+sse,-sse2,-sse3,-sse4.2 -passes=loop-vectorize -force-vector-width=4 -S < %s | FileCheck --check-prefix=VEC %s

@h = global i64 0

define void @test(ptr %p) {
; NOVEC-LABEL: define void @test(
; NOVEC-SAME: ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
; NOVEC-NEXT: entry:
; NOVEC-NEXT: br label [[FOR_BODY:%.*]]
; NOVEC: for.body:
; NOVEC-NEXT: [[IDX_EXT_MERGE:%.*]] = phi i64 [ 1, [[ENTRY:%.*]] ], [ [[IDX:%.*]], [[FOR_BODY]] ]
; NOVEC-NEXT: [[INC_MERGE:%.*]] = phi i16 [ 1, [[ENTRY]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; NOVEC-NEXT: [[IDX_MERGE:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IDX_EXT_MERGE]], [[FOR_BODY]] ]
; NOVEC-NEXT: [[ADD:%.*]] = shl i64 [[IDX_MERGE]], 1
; NOVEC-NEXT: [[ARRAYIDX:%.*]] = getelementptr i64, ptr [[P]], i64 [[ADD]]
; NOVEC-NEXT: store i64 0, ptr [[ARRAYIDX]], align 8
; NOVEC-NEXT: [[INC]] = add i16 [[INC_MERGE]], 1
; NOVEC-NEXT: [[IDX]] = zext i16 [[INC]] to i64
; NOVEC-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]]
; NOVEC-NEXT: [[CMP:%.*]] = icmp ugt ptr [[GEP]], @h
; NOVEC-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[FOR_BODY]]
; NOVEC: exit:
; NOVEC-NEXT: ret void
;
; VEC-LABEL: define void @test(
; VEC-SAME: ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
; VEC-NEXT: entry:
; VEC-NEXT: [[P1:%.*]] = ptrtoint ptr [[P]] to i64
; VEC-NEXT: [[TMP0:%.*]] = add i64 [[P1]], 16
; VEC-NEXT: [[UMAX2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP0]], i64 add (i64 ptrtoint (ptr @h to i64), i64 1))
; VEC-NEXT: [[TMP1:%.*]] = add i64 [[UMAX2]], -9
; VEC-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], [[P1]]
; VEC-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
; VEC-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[TMP3]], 1
; VEC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP4]], 4
; VEC-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; VEC: vector.scevcheck:
; VEC-NEXT: [[TMP5:%.*]] = add i64 [[P1]], 16
; VEC-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP5]], i64 add (i64 ptrtoint (ptr @h to i64), i64 1))
; VEC-NEXT: [[TMP6:%.*]] = add i64 [[UMAX]], -9
; VEC-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], [[P1]]
; VEC-NEXT: [[TMP8:%.*]] = lshr i64 [[TMP7]], 3
; VEC-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[TMP8]], 65535
; VEC-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP8]] to i16
; VEC-NEXT: [[TMP11:%.*]] = add i16 2, [[TMP10]]
; VEC-NEXT: [[TMP12:%.*]] = icmp ult i16 [[TMP11]], 2
; VEC-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[TMP8]], 65535
; VEC-NEXT: [[TMP14:%.*]] = or i1 [[TMP12]], [[TMP13]]
; VEC-NEXT: br i1 [[TMP14]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; VEC: vector.ph:
; VEC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], 4
; VEC-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]]
; VEC-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i16
; VEC-NEXT: [[IND_END:%.*]] = add i16 1, [[DOTCAST]]
; VEC-NEXT: br label [[VECTOR_BODY:%.*]]
; VEC: vector.body:
; VEC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i64> [ <i64 poison, i64 poison, i64 poison, i64 1>, [[VECTOR_PH]] ], [ [[TMP28:%.*]], [[VECTOR_BODY]] ]
; VEC-NEXT: [[VEC_IND:%.*]] = phi <4 x i16> [ <i16 1, i16 2, i16 3, i16 4>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; VEC-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 0
; VEC-NEXT: [[TMP16:%.*]] = add i64 [[INDEX]], 1
; VEC-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 2
; VEC-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], 3
; VEC-NEXT: [[TMP19:%.*]] = shl i64 [[TMP15]], 1
; VEC-NEXT: [[TMP20:%.*]] = shl i64 [[TMP16]], 1
; VEC-NEXT: [[TMP21:%.*]] = shl i64 [[TMP17]], 1
; VEC-NEXT: [[TMP22:%.*]] = shl i64 [[TMP18]], 1
; VEC-NEXT: [[TMP23:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP19]]
; VEC-NEXT: [[TMP24:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP20]]
; VEC-NEXT: [[TMP25:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP21]]
; VEC-NEXT: [[TMP26:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP22]]
; VEC-NEXT: store i64 0, ptr [[TMP23]], align 8
; VEC-NEXT: store i64 0, ptr [[TMP24]], align 8
; VEC-NEXT: store i64 0, ptr [[TMP25]], align 8
; VEC-NEXT: store i64 0, ptr [[TMP26]], align 8
; VEC-NEXT: [[TMP27:%.*]] = add <4 x i16> [[VEC_IND]], <i16 1, i16 1, i16 1, i16 1>
; VEC-NEXT: [[TMP28]] = zext <4 x i16> [[TMP27]] to <4 x i64>
; VEC-NEXT: [[TMP29:%.*]] = shufflevector <4 x i64> [[VECTOR_RECUR]], <4 x i64> [[TMP28]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
; VEC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; VEC-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], <i16 4, i16 4, i16 4, i16 4>
; VEC-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; VEC-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; VEC: middle.block:
; VEC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]]
; VEC-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i64> [[TMP28]], i32 3
; VEC-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; VEC: scalar.ph:
; VEC-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ 1, [[VECTOR_SCEVCHECK]] ], [ 1, [[ENTRY:%.*]] ], [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ]
; VEC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i16 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 1, [[ENTRY]] ], [ 1, [[VECTOR_SCEVCHECK]] ]
; VEC-NEXT: [[BC_RESUME_VAL3:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
; VEC-NEXT: br label [[FOR_BODY:%.*]]
; VEC: for.body:
; VEC-NEXT: [[SCALAR_RECUR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IDX:%.*]], [[FOR_BODY]] ]
; VEC-NEXT: [[INC_MERGE:%.*]] = phi i16 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; VEC-NEXT: [[IDX_MERGE:%.*]] = phi i64 [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ], [ [[SCALAR_RECUR]], [[FOR_BODY]] ]
; VEC-NEXT: [[ADD:%.*]] = shl i64 [[IDX_MERGE]], 1
; VEC-NEXT: [[ARRAYIDX:%.*]] = getelementptr i64, ptr [[P]], i64 [[ADD]]
; VEC-NEXT: store i64 0, ptr [[ARRAYIDX]], align 8
; VEC-NEXT: [[INC]] = add i16 [[INC_MERGE]], 1
; VEC-NEXT: [[IDX]] = zext i16 [[INC]] to i64
; VEC-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]]
; VEC-NEXT: [[CMP:%.*]] = icmp ugt ptr [[GEP]], @h
; VEC-NEXT: br i1 [[CMP]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; VEC: exit:
; VEC-NEXT: ret void
;
entry:
br label %for.body

for.body:
%idx.ext.merge = phi i64 [ 1, %entry ], [ %idx, %for.body ]
%inc.merge = phi i16 [ 1, %entry ], [ %inc, %for.body ]
%idx.merge = phi i64 [ 0, %entry ], [ %idx.ext.merge, %for.body ]
%add = shl i64 %idx.merge, 1
%arrayidx = getelementptr i64, ptr %p, i64 %add
store i64 0, ptr %arrayidx
%inc = add i16 %inc.merge, 1
%idx = zext i16 %inc to i64
%gep = getelementptr i64, ptr %p, i64 %idx
%cmp = icmp ugt ptr %gep, @h
br i1 %cmp, label %exit, label %for.body

exit:
ret void
}
;.
; VEC: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; VEC: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
; VEC: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
; VEC: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
;.