From a36d269658dfd8b16dd8bdcb1726b9206b1d97ff Mon Sep 17 00:00:00 2001 From: Malhar Jajoo Date: Fri, 11 Mar 2022 09:35:31 +0000 Subject: [PATCH] [VPlan] Avoid collecting scalars for SVE This patch ensures scalars (except for uniforms) are no longer collected (prior to LVP planning phase) for scalable vectorization. This is to avoid the chances of generating scalarized instructions later (during LVP execute phase) as they are not supported for scalable vectorization. Relevant test has also been added. Differential Revision: https://reviews.llvm.org/D121452 --- .../Transforms/Vectorize/LoopVectorize.cpp | 8 ++ .../AArch64/scalable-avoid-scalarization.ll | 95 +++++++++++++++++++ 2 files changed, 103 insertions(+) create mode 100644 llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 9de6ec40a5d8b..b8754fbddc1b7 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -4455,6 +4455,14 @@ void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && "This function should not be visited twice for the same VF"); + // This avoids any chances of creating a REPLICATE recipe during planning + // since that would result in generation of scalarized code during execution, + // which is not supported for scalable vectors. + if (VF.isScalable()) { + Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end()); + return; + } + SmallSetVector Worklist; // These sets are used to seed the analysis with pointers used by memory diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll new file mode 100644 index 0000000000000..6fa67d888fb26 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll @@ -0,0 +1,95 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -mtriple=aarch64 -loop-vectorize --force-vector-interleave=1 -S | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; The test checks that scalarized code is not generated for SVE. +; It creates a scenario where the gep instruction is used outside +; the loop, preventing the gep (and consequently the loop induction +; update variable) from being classified as 'uniform'. + +define void @test_no_scalarization(i64* %a, i32 %idx, i32 %n) #0 { +; CHECK-LABEL: @test_no_scalarization( +; CHECK-NEXT: L.entry: +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[IDX:%.*]], 1 +; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[N:%.*]], i32 [[TMP0]]) +; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[SMAX]], [[IDX]] +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[TMP2]], 2 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP1]], [[TMP3]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP5:%.*]] = mul i32 [[TMP4]], 2 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP1]], [[TMP5]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP1]], [[N_MOD_VF]] +; CHECK-NEXT: [[IND_END:%.*]] = add i32 [[IDX]], [[N_VEC]] +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i32 [[IDX]], i32 0 +; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = call @llvm.experimental.stepvector.nxv2i32() +; CHECK-NEXT: [[TMP7:%.*]] = add [[TMP6]], zeroinitializer +; CHECK-NEXT: [[TMP8:%.*]] = mul [[TMP7]], shufflevector ( insertelement ( poison, i32 1, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[INDUCTION:%.*]] = add [[DOTSPLAT]], [[TMP8]] +; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP10:%.*]] = mul i32 [[TMP9]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = mul i32 1, [[TMP10]] +; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement poison, i32 [[TMP11]], i32 0 +; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector [[DOTSPLATINSERT1]], poison, zeroinitializer +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i64, i64* [[A:%.*]], [[VEC_IND]] +; CHECK-NEXT: [[TMP13:%.*]] = extractelement [[TMP12]], i32 0 +; CHECK-NEXT: [[TMP14:%.*]] = bitcast i64* [[TMP13]] to double* +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr double, double* [[TMP14]], i32 0 +; CHECK-NEXT: [[TMP16:%.*]] = bitcast double* [[TMP15]] to * +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP16]], align 8 +; CHECK-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP18:%.*]] = mul i32 [[TMP17]], 2 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP18]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT2]] +; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[TMP1]], [[N_VEC]] +; CHECK-NEXT: [[TMP20:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP21:%.*]] = mul i32 [[TMP20]], 2 +; CHECK-NEXT: [[TMP22:%.*]] = sub i32 [[TMP21]], 1 +; CHECK-NEXT: [[TMP23:%.*]] = extractelement [[TMP12]], i32 [[TMP22]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[L_EXIT:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[IDX]], [[L_ENTRY:%.*]] ] +; CHECK-NEXT: br label [[L_LOOPBODY:%.*]] +; CHECK: L.LoopBody: +; CHECK-NEXT: [[INDVAR:%.*]] = phi i32 [ [[INDVAR_NEXT:%.*]], [[L_LOOPBODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[INDVAR_NEXT]] = add nsw i32 [[INDVAR]], 1 +; CHECK-NEXT: [[TMP24:%.*]] = getelementptr i64, i64* [[A]], i32 [[INDVAR]] +; CHECK-NEXT: [[TMP25:%.*]] = bitcast i64* [[TMP24]] to double* +; CHECK-NEXT: [[TMP26:%.*]] = load double, double* [[TMP25]], align 8 +; CHECK-NEXT: [[TMP27:%.*]] = icmp slt i32 [[INDVAR_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[TMP27]], label [[L_LOOPBODY]], label [[L_EXIT]], !llvm.loop [[LOOP2:![0-9]+]] +; CHECK: L.exit: +; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i64* [ [[TMP24]], [[L_LOOPBODY]] ], [ [[TMP23]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: store i64 1, i64* [[DOTLCSSA]], align 8 +; CHECK-NEXT: ret void +; +L.entry: + br label %L.LoopBody + +L.LoopBody: ; preds = %L.LoopBody, %L.entry + %indvar = phi i32 [ %indvar.next, %L.LoopBody ], [ %idx, %L.entry ] + %indvar.next = add nsw i32 %indvar, 1 + %0 = getelementptr i64, i64* %a, i32 %indvar + %1 = bitcast i64* %0 to double* + %2 = load double, double* %1, align 8 + %3 = icmp slt i32 %indvar.next, %n + br i1 %3, label %L.LoopBody, label %L.exit + +L.exit: ; preds = %L.LoopBody + store i64 1, i64* %0, align 8 + ret void +} + +attributes #0 = { nofree norecurse noreturn nosync nounwind "target-features"="+sve" } +