From df019e78060cf688eba7ce34e7c49764ae192b62 Mon Sep 17 00:00:00 2001 From: Elvis Wang Date: Sun, 2 Nov 2025 16:29:40 -0800 Subject: [PATCH] [LV] Convert uniform-address scatters to scalar store when unmasked or header-masked This patch optimizes vector scatters that have a uniform (single-scalar) address by replacing them with "extract-last-element + scalar store" when the scatter is unmasked, or masked by the header mask. In all of these cases, at least one lane is guaranteed to execute in each vector iteration, so storing the last active element is sufficient. Implementation: - Add optimizeScatterWithUniformAddr(VPlan &), and invoke it from VPlanTransforms::optimize(). - Identify non-consecutive VPWidenStoreRecipe/VPWidenStoreEVLRecipe with uniform addresses. - Require either no mask, an all-true mask, or the loop header mask. - Replace the scatter with VPInstruction::ExtractLastElement of the stored value and a VPReplicate (scalar) store. Notes: - The legacy cost model can scalarize a store if both the address and the value are uniform. In VPlan we materialize the stored value via ExtractLastElement, so only the address must be uniform. - Some of the loops won't be vectorized any sine no vector instructions will be generated. --- .../Transforms/Vectorize/VPlanTransforms.cpp | 45 +++++++++++++++ .../RISCV/gather-scatter-cost.ll | 27 +++++---- .../LoopVectorize/RISCV/induction-costs.ll | 8 ++- .../LoopVectorize/RISCV/pr154103.ll | 57 ++++++++++++------- .../Transforms/LoopVectorize/RISCV/pr88802.ll | 48 ++++++++-------- .../truncate-to-minimal-bitwidth-evl-crash.ll | 35 ++++++------ .../RISCV/type-info-cache-evl-crash.ll | 10 ++-- .../LoopVectorize/RISCV/uniform-load-store.ll | 16 ++++-- .../vf-will-not-generate-any-vector-insts.ll | 43 +------------- 9 files changed, 163 insertions(+), 126 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 3757a590c230e..981f6b1dfe794 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -1372,6 +1372,50 @@ void VPlanTransforms::simplifyRecipes(VPlan &Plan) { } } +static VPSingleDefRecipe *findHeaderMask(VPlan &Plan); + +/// Convert scatters with a uniform address that are either unmasked or +/// masked by the header mask into an extract-last-element + scalar store. +// TODO: Add a profitability check comparing the cost of a scatter vs. +// extract + scalar store. +static void optimizeScatterWithUniformAddr(VPlan &Plan) { + VPValue *HeaderMask = findHeaderMask(Plan); + for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly( + vp_depth_first_deep(Plan.getEntry()))) { + for (VPRecipeBase &R : make_early_inc_range(*VPBB)) { + + // Only transform store recipes. + if (!isa(&R)) + continue; + + auto StoreR = cast(&R); + if (StoreR->isConsecutive() || + !vputils::isSingleScalar(StoreR->getAddr())) + continue; + + assert(!StoreR->isReverse() && + "Not consecutive memory recipes shouldn't be reversed"); + VPValue *Mask = StoreR->getMask(); + + // Only convert the scatter to a scalar store if it is unmasked or masked + // by the header mask, which guarantees that at least one active lane. + if (Mask && Mask != HeaderMask) + continue; + + auto *Extract = new VPInstruction(VPInstruction::ExtractLastElement, + {StoreR->getOperand(1)}); + Extract->insertBefore(StoreR); + + // TODO: Sink the scalar store recipe to middle block if possible. + auto *ScalarStore = new VPReplicateRecipe( + &StoreR->getIngredient(), {Extract, StoreR->getAddr()}, + true /*IsSingleScalar*/, nullptr /*Mask*/, *StoreR /*Metadata*/); + ScalarStore->insertBefore(StoreR); + StoreR->eraseFromParent(); + } + } +} + static void narrowToSingleScalarRecipes(VPlan &Plan) { if (Plan.hasScalarVFOnly()) return; @@ -2320,6 +2364,7 @@ void VPlanTransforms::optimize(VPlan &Plan) { runPass(removeDeadRecipes, Plan); runPass(simplifyBlends, Plan); runPass(legalizeAndOptimizeInductions, Plan); + runPass(optimizeScatterWithUniformAddr, Plan); runPass(narrowToSingleScalarRecipes, Plan); runPass(removeRedundantExpandSCEVRecipes, Plan); runPass(simplifyRecipes, Plan); diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll index 1c6954c187e5f..7ab773d48e33d 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/gather-scatter-cost.ll @@ -31,17 +31,19 @@ define void @predicated_uniform_load(ptr %src, i32 %n, ptr %dst, i1 %cond) { ; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i1 [[COND:%.*]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer ; CHECK-NEXT: [[TMP13:%.*]] = xor [[BROADCAST_SPLAT1]], splat (i1 true) -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[BOXES]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement poison, ptr [[NBRBOXES]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement poison, ptr [[BOXES]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector [[BROADCAST_SPLATINSERT3]], poison, zeroinitializer ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ [[TMP3]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 4, i1 true) -; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.vp.gather.nxv4i32.nxv4p0( align 4 [[BROADCAST_SPLAT]], [[TMP13]], i32 [[TMP10]]), !alias.scope [[META0:![0-9]+]] +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.vp.gather.nxv4i32.nxv4p0( align 4 [[BROADCAST_SPLAT4]], [[TMP13]], i32 [[TMP10]]), !alias.scope [[META0:![0-9]+]] ; CHECK-NEXT: [[PREDPHI:%.*]] = select [[BROADCAST_SPLAT1]], zeroinitializer, [[WIDE_MASKED_GATHER]] -; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( [[PREDPHI]], align 4 [[BROADCAST_SPLAT4]], splat (i1 true), i32 [[TMP10]]), !alias.scope [[META3:![0-9]+]], !noalias [[META0]] +; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i32 [[TMP12]], 4 +; CHECK-NEXT: [[TMP14:%.*]] = sub i32 [[TMP18]], 1 +; CHECK-NEXT: [[TMP15:%.*]] = extractelement [[PREDPHI]], i32 [[TMP14]] +; CHECK-NEXT: store i32 [[TMP15]], ptr [[NBRBOXES]], align 4, !alias.scope [[META3:![0-9]+]], !noalias [[META0]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP10]] ; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] @@ -63,7 +65,7 @@ define void @predicated_uniform_load(ptr %src, i32 %n, ptr %dst, i1 %cond) { ; CHECK-NEXT: store i32 [[STORE]], ptr [[NBRBOXES]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp sgt i32 [[IV]], [[IBOX]] -; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: exit: ; CHECK-NEXT: ret void ; @@ -114,7 +116,7 @@ define void @predicated_strided_store(ptr %start) { ; RVA23-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP3]] ; RVA23-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; RVA23-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; RVA23-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; RVA23-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; RVA23: middle.block: ; RVA23-NEXT: br label [[LOOP:%.*]] ; RVA23: exit: @@ -141,7 +143,7 @@ define void @predicated_strided_store(ptr %start) { ; RVA23ZVL1024B-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP3]] ; RVA23ZVL1024B-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; RVA23ZVL1024B-NEXT: [[TMP7:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; RVA23ZVL1024B-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; RVA23ZVL1024B-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; RVA23ZVL1024B: middle.block: ; RVA23ZVL1024B-NEXT: br label [[LOOP:%.*]] ; RVA23ZVL1024B: exit: @@ -188,13 +190,14 @@ define void @store_to_addr_generated_from_invariant_addr(ptr noalias %p0, ptr no ; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i64 [[TMP6]], i64 0 ; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P3:%.*]], [[BROADCAST_SPLAT2]] -; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i32.nxv2p0( zeroinitializer, align 4 [[TMP7]], splat (i1 true), i32 [[TMP3]]) -; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i32.nxv2p0( zeroinitializer, align 4 [[TMP7]], splat (i1 true), i32 [[TMP3]]) -; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i8.nxv2p0( zeroinitializer, align 1 [[TMP7]], splat (i1 true), i32 [[TMP3]]) +; CHECK-NEXT: [[TMP8:%.*]] = extractelement [[TMP7]], i32 0 +; CHECK-NEXT: store i32 0, ptr [[TMP8]], align 4 +; CHECK-NEXT: store i32 0, ptr [[TMP8]], align 4 +; CHECK-NEXT: store i8 0, ptr [[TMP8]], align 1 ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP4]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: exit: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll index 4ccec2ca61778..d38a286922c24 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/induction-costs.ll @@ -126,8 +126,6 @@ define void @test_3_inductions(ptr noalias %dst, ptr noalias %src, i64 %n) #1 { ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1 ; CHECK-NEXT: br label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[DST]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.stepvector.nxv2i32() ; CHECK-NEXT: [[TMP2:%.*]] = mul [[TMP1]], splat (i32 2) ; CHECK-NEXT: [[INDUCTION:%.*]] = add splat (i32 1), [[TMP2]] @@ -144,7 +142,11 @@ define void @test_3_inductions(ptr noalias %dst, ptr noalias %src, i64 %n) #1 { ; CHECK-NEXT: [[TMP5:%.*]] = or [[VEC_IND2]], [[VEC_IND]] ; CHECK-NEXT: [[TMP6:%.*]] = sext [[TMP5]] to ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[SRC]], [[TMP6]] -; CHECK-NEXT: call void @llvm.vp.scatter.nxv2p0.nxv2p0( [[TMP7]], align 8 [[BROADCAST_SPLAT]], splat (i1 true), i32 [[TMP3]]) +; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP13:%.*]] = mul nuw i32 [[TMP12]], 2 +; CHECK-NEXT: [[TMP10:%.*]] = sub i32 [[TMP13]], 1 +; CHECK-NEXT: [[TMP11:%.*]] = extractelement [[TMP7]], i32 [[TMP10]] +; CHECK-NEXT: store ptr [[TMP11]], ptr [[DST]], align 8 ; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP3]] to i64 ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP8]] ; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT4]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr154103.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr154103.ll index c35a3d7b9269f..6d94499a68782 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/pr154103.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr154103.ll @@ -6,29 +6,46 @@ define void @pr154103(ptr noalias %a, ptr noalias %b, ptr noalias %c, ptr noalias %d) { ; CHECK-LABEL: define void @pr154103( ; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], ptr noalias [[D:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[ENTRY:.*]]: -; CHECK-NEXT: br label %[[LOOP:.*]] -; CHECK: [[LOOP]]: -; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 1, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]] -; CHECK-NEXT: [[X:%.*]] = load i8, ptr [[GEP]], align 1 -; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[X]] to i64 -; CHECK-NEXT: [[DIV:%.*]] = sdiv i64 0, [[CONV]] -; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[DIV]], 0 -; CHECK-NEXT: br i1 [[CMP]], label %[[THEN:.*]], label %[[LATCH]] -; CHECK: [[THEN]]: -; CHECK-NEXT: [[Y:%.*]] = load i8, ptr [[B]], align 1 -; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[Y]] to i64 -; CHECK-NEXT: [[NOT:%.*]] = xor i64 [[ZEXT]], 0 -; CHECK-NEXT: br label %[[LATCH]] -; CHECK: [[LATCH]]: -; CHECK-NEXT: [[COND:%.*]] = phi i64 [ [[NOT]], %[[THEN]] ], [ 0, %[[LOOP]] ] -; CHECK-NEXT: [[TRUNC:%.*]] = trunc i64 [[COND]] to i16 +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[B]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[TMP0:%.*]] = call @llvm.stepvector.nxv4i64() +; CHECK-NEXT: [[TMP1:%.*]] = mul [[TMP0]], splat (i64 7) +; CHECK-NEXT: [[INDUCTION:%.*]] = add splat (i64 1), [[TMP1]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ -7905747460161236406, %[[VECTOR_PH]] ], [ [[IV:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = mul i64 7, [[TMP3]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i64 [[TMP4]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[A]], [[VEC_IND]] +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.vp.gather.nxv4i8.nxv4p0( align 1 [[TMP5]], splat (i1 true), i32 [[TMP2]]) +; CHECK-NEXT: [[TMP6:%.*]] = zext [[WIDE_MASKED_GATHER]] to +; CHECK-NEXT: [[TMP7:%.*]] = call @llvm.vp.merge.nxv4i64( splat (i1 true), [[TMP6]], splat (i64 1), i32 [[TMP2]]) +; CHECK-NEXT: [[TMP8:%.*]] = sdiv zeroinitializer, [[TMP7]] +; CHECK-NEXT: [[TMP9:%.*]] = icmp sgt [[TMP8]], zeroinitializer +; CHECK-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call @llvm.vp.gather.nxv4i8.nxv4p0( align 1 [[BROADCAST_SPLAT]], [[TMP9]], i32 [[TMP2]]) +; CHECK-NEXT: [[TMP10:%.*]] = zext [[WIDE_MASKED_GATHER3]] to +; CHECK-NEXT: [[TMP11:%.*]] = xor [[TMP10]], zeroinitializer +; CHECK-NEXT: [[PREDPHI:%.*]] = select [[TMP9]], [[TMP11]], zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = trunc [[PREDPHI]] to +; CHECK-NEXT: [[TMP13:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP14:%.*]] = mul nuw i32 [[TMP13]], 4 +; CHECK-NEXT: [[TMP15:%.*]] = sub i32 [[TMP14]], 1 +; CHECK-NEXT: [[TRUNC:%.*]] = extractelement [[TMP12]], i32 [[TMP15]] ; CHECK-NEXT: store i16 [[TRUNC]], ptr [[C]], align 2 ; CHECK-NEXT: store i32 0, ptr [[D]], align 4 -; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 7 +; CHECK-NEXT: [[IV]] = sub nuw i64 [[AVL]], [[TMP3]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT2]] ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV]], 0 -; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK-NEXT: br i1 [[DONE]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br label %[[EXIT:.*]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll index 8d4d282a5236d..9e9207a7609df 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll @@ -7,33 +7,35 @@ define void @test(ptr %p, i64 %a, i8 %b) { ; CHECK-NEXT: entry: ; CHECK-NEXT: br label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i8 [[B]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i64 [[A]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer -; CHECK-NEXT: [[TMP5:%.*]] = shl [[BROADCAST_SPLAT2]], splat (i64 48) -; CHECK-NEXT: [[TMP6:%.*]] = ashr [[TMP5]], splat (i64 52) -; CHECK-NEXT: [[TMP7:%.*]] = trunc [[TMP6]] to -; CHECK-NEXT: [[TMP8:%.*]] = zext [[BROADCAST_SPLAT]] to -; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement poison, ptr [[P]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector [[BROADCAST_SPLATINSERT3]], poison, zeroinitializer -; CHECK-NEXT: [[TMP9:%.*]] = call @llvm.stepvector.nxv2i32() -; CHECK-NEXT: [[TMP10:%.*]] = mul [[TMP9]], splat (i32 1) -; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP10]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i8 [[B]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i64 [[A]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; CHECK-NEXT: [[TMP0:%.*]] = shl [[BROADCAST_SPLAT2]], splat (i64 48) +; CHECK-NEXT: [[TMP1:%.*]] = ashr [[TMP0]], splat (i64 52) +; CHECK-NEXT: [[TMP2:%.*]] = trunc [[TMP1]] to +; CHECK-NEXT: [[TMP3:%.*]] = zext [[BROADCAST_SPLAT]] to +; CHECK-NEXT: [[TMP4:%.*]] = call @llvm.stepvector.nxv8i32() +; CHECK-NEXT: [[TMP5:%.*]] = mul [[TMP4]], splat (i32 1) +; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP5]] ; CHECK-NEXT: br label [[FOR_COND:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_COND]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_COND]] ] ; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ 9, [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[FOR_COND]] ] -; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 2, i1 true) -; CHECK-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement poison, i32 [[TMP11]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector [[BROADCAST_SPLATINSERT7]], poison, zeroinitializer -; CHECK-NEXT: [[TMP12:%.*]] = icmp slt [[VEC_IND]], splat (i32 2) -; CHECK-NEXT: [[PREDPHI:%.*]] = select [[TMP12]], [[TMP8]], [[TMP7]] -; CHECK-NEXT: [[TMP16:%.*]] = shl [[PREDPHI]], splat (i32 8) -; CHECK-NEXT: [[TMP17:%.*]] = trunc [[TMP16]] to -; CHECK-NEXT: call void @llvm.vp.scatter.nxv2i8.nxv2p0( [[TMP17]], align 1 [[BROADCAST_SPLAT4]], splat (i1 true), i32 [[TMP11]]) +; CHECK-NEXT: [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 8, i1 true) +; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement poison, i32 [[TMP11]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector [[BROADCAST_SPLATINSERT3]], poison, zeroinitializer +; CHECK-NEXT: [[TMP7:%.*]] = icmp slt [[VEC_IND]], splat (i32 2) +; CHECK-NEXT: [[PREDPHI:%.*]] = select [[TMP7]], [[TMP3]], [[TMP2]] +; CHECK-NEXT: [[TMP8:%.*]] = shl [[PREDPHI]], splat (i32 8) +; CHECK-NEXT: [[TMP9:%.*]] = trunc [[TMP8]] to +; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP14:%.*]] = mul nuw i32 [[TMP10]], 8 +; CHECK-NEXT: [[TMP12:%.*]] = sub i32 [[TMP14]], 1 +; CHECK-NEXT: [[TMP13:%.*]] = extractelement [[TMP9]], i32 [[TMP12]] +; CHECK-NEXT: store i8 [[TMP13]], ptr [[P]], align 1 ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP11]] -; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT8]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[BROADCAST_SPLAT4]] ; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 ; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_COND]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll index 232c354764e1a..49158dfdd9e9c 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll @@ -8,20 +8,20 @@ define void @truncate_to_minimal_bitwidths_widen_cast_recipe(ptr %src) { ; CHECK-LABEL: define void @truncate_to_minimal_bitwidths_widen_cast_recipe( ; CHECK-SAME: ptr [[SRC:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: br label %[[VECTOR_PH:.*]] -; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] -; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 9, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 8, i1 true) -; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i8.nxv8p0( zeroinitializer, align 1 zeroinitializer, splat (i1 true), i32 [[TMP7]]) -; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP7]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]] -; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] -; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV]] +; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[GEP_SRC]], align 1 +; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP0]] to i32 +; CHECK-NEXT: [[MUL16:%.*]] = mul i32 0, [[CONV]] +; CHECK-NEXT: [[SHR35:%.*]] = lshr i32 [[MUL16]], 1 +; CHECK-NEXT: [[CONV36:%.*]] = trunc i32 [[SHR35]] to i8 +; CHECK-NEXT: store i8 [[CONV36]], ptr null, align 1 +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 8 +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -75,7 +75,7 @@ define void @truncate_i16_to_i8_cse(ptr noalias %src, ptr noalias %dst) { ; CHECK-NEXT: store i8 [[TMP10]], ptr [[DST]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: [[MIDDLE_BLOCK]]: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4294967296, [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -95,7 +95,7 @@ define void @truncate_i16_to_i8_cse(ptr noalias %src, ptr noalias %dst) { ; CHECK-NEXT: [[COUNT_NEXT]] = add i32 [[COUNT]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[COUNT_NEXT]], 0 ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 -; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -124,6 +124,5 @@ exit: ; preds = %loop ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]} -; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META2]], [[META1]]} +; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} ;. diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll index df848f2db917f..4e3013b887ad3 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll @@ -20,8 +20,6 @@ define void @type_info_cache_clobber(ptr %dstv, ptr %src, i64 %wide.trip.count) ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[DSTV]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] ; CHECK: [[VECTOR_BODY]]: ; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -34,8 +32,12 @@ define void @type_info_cache_clobber(ptr %dstv, ptr %src, i64 %wide.trip.count) ; CHECK-NEXT: [[TMP16:%.*]] = icmp ult [[TMP15]], zeroinitializer ; CHECK-NEXT: [[TMP17:%.*]] = select [[TMP16]], [[TMP23]], zeroinitializer ; CHECK-NEXT: [[TMP24:%.*]] = trunc [[TMP17]] to -; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i8.nxv8p0( [[TMP24]], align 1 [[BROADCAST_SPLAT]], splat (i1 true), i32 [[TMP11]]), !alias.scope [[META3:![0-9]+]], !noalias [[META0]] -; CHECK-NEXT: call void @llvm.vp.scatter.nxv8i16.nxv8p0( zeroinitializer, align 2 zeroinitializer, splat (i1 true), i32 [[TMP11]]) +; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i32 [[TMP9]], 8 +; CHECK-NEXT: [[TMP14:%.*]] = sub i32 [[TMP10]], 1 +; CHECK-NEXT: [[TMP12:%.*]] = extractelement [[TMP24]], i32 [[TMP14]] +; CHECK-NEXT: store i8 [[TMP12]], ptr [[DSTV]], align 1, !alias.scope [[META3:![0-9]+]], !noalias [[META0]] +; CHECK-NEXT: store i16 0, ptr null, align 2 ; CHECK-NEXT: [[TMP20:%.*]] = zext i32 [[TMP11]] to i64 ; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP20]], [[EVL_BASED_IV]] ; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP20]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll index 1e21c753840e9..9548820f56ba1 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll @@ -585,8 +585,6 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; SCALABLE-NEXT: [[ENTRY:.*:]] ; SCALABLE-NEXT: br label %[[VECTOR_PH:.*]] ; SCALABLE: [[VECTOR_PH]]: -; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, ptr [[B]], i64 0 -; SCALABLE-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer ; SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[V]], i64 0 ; SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; SCALABLE-NEXT: [[TMP6:%.*]] = call @llvm.stepvector.nxv2i64() @@ -601,7 +599,11 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; SCALABLE-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 ; SCALABLE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[TMP8]], i64 0 ; SCALABLE-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer -; SCALABLE-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0( [[VEC_IND]], align 8 [[BROADCAST_SPLAT1]], splat (i1 true), i32 [[TMP7]]) +; SCALABLE-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32() +; SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 2 +; SCALABLE-NEXT: [[TMP11:%.*]] = sub i32 [[TMP5]], 1 +; SCALABLE-NEXT: [[TMP12:%.*]] = extractelement [[VEC_IND]], i32 [[TMP11]] +; SCALABLE-NEXT: store i64 [[TMP12]], ptr [[B]], align 8 ; SCALABLE-NEXT: [[TMP16:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP10]] ; SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[BROADCAST_SPLAT]], ptr align 8 [[TMP16]], splat (i1 true), i32 [[TMP7]]) ; SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[TMP10]] @@ -656,8 +658,6 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; TF-SCALABLE-NEXT: [[ENTRY:.*:]] ; TF-SCALABLE-NEXT: br label %[[VECTOR_PH:.*]] ; TF-SCALABLE: [[VECTOR_PH]]: -; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[B]], i64 0 -; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement poison, i64 [[V]], i64 0 ; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector [[BROADCAST_SPLATINSERT2]], poison, zeroinitializer ; TF-SCALABLE-NEXT: [[TMP5:%.*]] = call @llvm.stepvector.nxv2i64() @@ -672,7 +672,11 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias ; TF-SCALABLE-NEXT: [[TMP13:%.*]] = zext i32 [[TMP9]] to i64 ; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i64 [[TMP13]], i64 0 ; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer -; TF-SCALABLE-NEXT: call void @llvm.vp.scatter.nxv2i64.nxv2p0( [[VEC_IND]], align 8 [[BROADCAST_SPLAT]], splat (i1 true), i32 [[TMP9]]) +; TF-SCALABLE-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32() +; TF-SCALABLE-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP4]], 2 +; TF-SCALABLE-NEXT: [[TMP6:%.*]] = sub i32 [[TMP8]], 1 +; TF-SCALABLE-NEXT: [[TMP11:%.*]] = extractelement [[VEC_IND]], i32 [[TMP6]] +; TF-SCALABLE-NEXT: store i64 [[TMP11]], ptr [[B]], align 8 ; TF-SCALABLE-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]] ; TF-SCALABLE-NEXT: call void @llvm.vp.store.nxv2i64.p0( [[BROADCAST_SPLAT3]], ptr align 8 [[TMP10]], splat (i1 true), i32 [[TMP9]]) ; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP13]], [[INDEX]] diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll index ed797fcd6c026..7737214003080 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vf-will-not-generate-any-vector-insts.ll @@ -7,41 +7,15 @@ target triple = "riscv64-unknown-unknown-elf" define void @vf_will_not_generate_any_vector_insts(ptr %src, ptr %dst) { ; CHECK-LABEL: define void @vf_will_not_generate_any_vector_insts( ; CHECK-SAME: ptr [[SRC:%.*]], ptr [[DST:%.*]]) #[[ATTR0:[0-9]+]] { -; CHECK-NEXT: [[ENTRY:.*:]] -; CHECK-NEXT: br label %[[VECTOR_MEMCHECK:.*]] -; CHECK: [[VECTOR_MEMCHECK]]: -; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 4 -; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[SRC]], i64 4 -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP1]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP]] -; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] -; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] -; CHECK: [[VECTOR_PH]]: -; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[DST]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] -; CHECK: [[VECTOR_BODY]]: -; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 100, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 4, i1 true) -; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[SRC]], align 4, !alias.scope [[META0:![0-9]+]] -; CHECK-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement poison, i32 [[TMP6]], i64 0 -; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector [[BROADCAST_SPLATINSERT2]], poison, zeroinitializer -; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( [[BROADCAST_SPLAT3]], align 4 [[BROADCAST_SPLAT]], splat (i1 true), i32 [[TMP5]]), !alias.scope [[META3:![0-9]+]], !noalias [[META0]] -; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64 -; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP7]] -; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 -; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] -; CHECK: [[MIDDLE_BLOCK]]: -; CHECK-NEXT: br label %[[EXIT:.*]] -; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[ENTRY:.*]]: ; CHECK-NEXT: br label %[[LOOP:.*]] ; CHECK: [[LOOP]]: -; CHECK-NEXT: [[TMP2:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[TMP3:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[TMP2:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[TMP3:%.*]], %[[LOOP]] ] ; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr [[SRC]], align 4 ; CHECK-NEXT: store i32 [[DOTPRE]], ptr [[DST]], align 4 ; CHECK-NEXT: [[TMP3]] = add nuw i64 [[TMP2]], 1 ; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[TMP3]], 100 -; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] ; CHECK: [[EXIT]]: ; CHECK-NEXT: ret void ; @@ -59,14 +33,3 @@ loop: exit: ret void } -;. -; CHECK: [[META0]] = !{[[META1:![0-9]+]]} -; CHECK: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]]} -; CHECK: [[META2]] = distinct !{[[META2]], !"LVerDomain"} -; CHECK: [[META3]] = !{[[META4:![0-9]+]]} -; CHECK: [[META4]] = distinct !{[[META4]], [[META2]]} -; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META6:![0-9]+]], [[META7:![0-9]+]]} -; CHECK: [[META6]] = !{!"llvm.loop.isvectorized", i32 1} -; CHECK: [[META7]] = !{!"llvm.loop.unroll.runtime.disable"} -; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META6]]} -;.