diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 50136a8ecae57..ed6c35a17ffad 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -8240,14 +8240,14 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes( // the vector loop or when not folding the tail. In the later case, we know // that the canonical induction increment will not overflow as the vector trip // count is >= increment and a multiple of the increment. + VPRegionBlock *LoopRegion = Plan->getVectorLoopRegion(); bool HasNUW = !IVUpdateMayOverflow || Style == TailFoldingStyle::None; if (!HasNUW) { - auto *IVInc = Plan->getVectorLoopRegion() - ->getExitingBasicBlock() - ->getTerminator() - ->getOperand(0); - assert(match(IVInc, m_VPInstruction( - m_Specific(Plan->getCanonicalIV()), m_VPValue())) && + auto *IVInc = + LoopRegion->getExitingBasicBlock()->getTerminator()->getOperand(0); + assert(match(IVInc, + m_VPInstruction( + m_Specific(LoopRegion->getCanonicalIV()), m_VPValue())) && "Did not find the canonical IV increment"); cast(IVInc)->dropPoisonGeneratingFlags(); } @@ -8293,7 +8293,6 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes( // Scan the body of the loop in a topological order to visit each basic block // after having visited its predecessor basic blocks. - VPRegionBlock *LoopRegion = Plan->getVectorLoopRegion(); VPBasicBlock *HeaderVPBB = LoopRegion->getEntryBasicBlock(); ReversePostOrderTraversal> RPOT( HeaderVPBB); @@ -8377,8 +8376,8 @@ VPlanPtr LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes( for (VPValue *Old : Old2New.keys()) Old->getDefiningRecipe()->eraseFromParent(); - assert(isa(Plan->getVectorLoopRegion()) && - !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() && + assert(isa(LoopRegion) && + !LoopRegion->getEntryBasicBlock()->empty() && "entry block must be set to a VPRegionBlock having a non-empty entry " "VPBasicBlock"); @@ -9320,8 +9319,9 @@ static void preparePlanForMainVectorLoop(VPlan &MainPlan, VPlan &EpiPlan) { if (ResumePhiIter == MainScalarPH->phis().end()) { VPBuilder ScalarPHBuilder(MainScalarPH, MainScalarPH->begin()); ResumePhi = ScalarPHBuilder.createScalarPhi( - {VectorTC, MainPlan.getCanonicalIV()->getStartValue()}, {}, - "vec.epilog.resume.val"); + {VectorTC, + MainPlan.getVectorLoopRegion()->getCanonicalIV()->getStartValue()}, + {}, "vec.epilog.resume.val"); } else { ResumePhi = cast(&*ResumePhiIter); if (MainScalarPH->begin() == MainScalarPH->end()) @@ -9348,7 +9348,7 @@ static SmallVector preparePlanForEpilogueVectorLoop( VPBasicBlock *Header = VectorLoop->getEntryBasicBlock(); Header->setName("vec.epilog.vector.body"); - VPCanonicalIVPHIRecipe *IV = Plan.getCanonicalIV(); + VPCanonicalIVPHIRecipe *IV = VectorLoop->getCanonicalIV(); // When vectorizing the epilogue loop, the canonical induction needs to be // adjusted by the value after the main vector loop. Find the resume value // created during execution of the main VPlan. It must be the first phi in the diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index fb696bea671af..e8b9aa0432f22 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -4066,6 +4066,19 @@ class LLVM_ABI_FOR_TEST VPRegionBlock : public VPBlockBase { /// Remove the current region from its VPlan, connecting its predecessor to /// its entry, and its exiting block to its successor. void dissolveToCFGLoop(); + + /// Returns the canonical induction recipe of the region. + VPCanonicalIVPHIRecipe *getCanonicalIV() { + VPBasicBlock *EntryVPBB = getEntryBasicBlock(); + if (EntryVPBB->empty()) { + // VPlan native path. TODO: Unify both code paths. + EntryVPBB = cast(EntryVPBB->getSingleSuccessor()); + } + return cast(&*EntryVPBB->begin()); + } + const VPCanonicalIVPHIRecipe *getCanonicalIV() const { + return const_cast(this)->getCanonicalIV(); + } }; /// VPlan models a candidate for vectorization, encoding various decisions take @@ -4377,16 +4390,6 @@ class VPlan { LLVM_DUMP_METHOD void dump() const; #endif - /// Returns the canonical induction recipe of the vector loop. - VPCanonicalIVPHIRecipe *getCanonicalIV() { - VPBasicBlock *EntryVPBB = getVectorLoopRegion()->getEntryBasicBlock(); - if (EntryVPBB->empty()) { - // VPlan native path. - EntryVPBB = cast(EntryVPBB->getSingleSuccessor()); - } - return cast(&*EntryVPBB->begin()); - } - VPValue *getSCEVExpansion(const SCEV *S) const { return SCEVToExpansion.lookup(S); } diff --git a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp index 81deba2932ef8..55ec19281c876 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanConstruction.cpp @@ -661,9 +661,11 @@ void VPlanTransforms::attachCheckBlock(VPlan &Plan, Value *Cond, } VPIRMetadata VPBranchWeights; - auto *Term = VPBuilder(CheckBlockVPBB) - .createNaryOp(VPInstruction::BranchOnCond, {CondVPV}, - Plan.getCanonicalIV()->getDebugLoc()); + auto *Term = + VPBuilder(CheckBlockVPBB) + .createNaryOp( + VPInstruction::BranchOnCond, {CondVPV}, + Plan.getVectorLoopRegion()->getCanonicalIV()->getDebugLoc()); if (AddBranchWeights) { MDBuilder MDB(Plan.getContext()); MDNode *BranchWeights = @@ -925,8 +927,8 @@ bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) { if (auto *DerivedIV = dyn_cast(VecV)) { if (DerivedIV->getNumUsers() == 1 && DerivedIV->getOperand(1) == &Plan.getVectorTripCount()) { - auto *NewSel = Builder.createSelect(AnyNaN, Plan.getCanonicalIV(), - &Plan.getVectorTripCount()); + auto *NewSel = Builder.createSelect( + AnyNaN, LoopRegion->getCanonicalIV(), &Plan.getVectorTripCount()); DerivedIV->moveAfter(&*Builder.getInsertPoint()); DerivedIV->setOperand(1, NewSel); continue; @@ -939,7 +941,8 @@ bool VPlanTransforms::handleMaxMinNumReductions(VPlan &Plan) { "FMaxNum/FMinNum reduction.\n"); return false; } - auto *NewSel = Builder.createSelect(AnyNaN, Plan.getCanonicalIV(), VecV); + auto *NewSel = + Builder.createSelect(AnyNaN, LoopRegion->getCanonicalIV(), VecV); ResumeR->setOperand(0, NewSel); } diff --git a/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp b/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp index 0c27d535b680e..fb17d5dd62b9d 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanPredicator.cpp @@ -168,7 +168,8 @@ void VPPredicator::createHeaderMask(VPBasicBlock *HeaderVPBB, bool FoldTail) { // non-phi instructions. auto &Plan = *HeaderVPBB->getPlan(); - auto *IV = new VPWidenCanonicalIVRecipe(Plan.getCanonicalIV()); + auto *IV = + new VPWidenCanonicalIVRecipe(HeaderVPBB->getParent()->getCanonicalIV()); Builder.setInsertPoint(HeaderVPBB, HeaderVPBB->getFirstNonPhi()); Builder.insert(IV); diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index 8e91677292788..f367941372b23 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -2344,7 +2344,7 @@ bool VPWidenIntOrFpInductionRecipe::isCanonical() const { return false; auto *StepC = dyn_cast(getStepValue()->getLiveInIRValue()); auto *StartC = dyn_cast(getStartValue()->getLiveInIRValue()); - auto *CanIV = cast(&*getParent()->begin()); + auto *CanIV = getParent()->getParent()->getCanonicalIV(); return StartC && StartC->isZero() && StepC && StepC->isOne() && getScalarType() == CanIV->getScalarType(); } diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 7563cd719b19c..6723507f784ec 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -501,7 +501,8 @@ static void removeRedundantInductionCasts(VPlan &Plan) { /// Try to replace VPWidenCanonicalIVRecipes with a widened canonical IV /// recipe, if it exists. static void removeRedundantCanonicalIVs(VPlan &Plan) { - VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV(); + VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion(); + VPCanonicalIVPHIRecipe *CanonicalIV = LoopRegion->getCanonicalIV(); VPWidenCanonicalIVRecipe *WidenNewIV = nullptr; for (VPUser *U : CanonicalIV->users()) { WidenNewIV = dyn_cast(U); @@ -512,7 +513,7 @@ static void removeRedundantCanonicalIVs(VPlan &Plan) { if (!WidenNewIV) return; - VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock(); + VPBasicBlock *HeaderVPBB = LoopRegion->getEntryBasicBlock(); for (VPRecipeBase &Phi : HeaderVPBB->phis()) { auto *WidenOriginalIV = dyn_cast(&Phi); @@ -582,8 +583,9 @@ createScalarIVSteps(VPlan &Plan, InductionDescriptor::InductionKind Kind, FPMathOperator *FPBinOp, Instruction *TruncI, VPValue *StartV, VPValue *Step, DebugLoc DL, VPBuilder &Builder) { - VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock(); - VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV(); + VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion(); + VPBasicBlock *HeaderVPBB = LoopRegion->getEntryBasicBlock(); + VPCanonicalIVPHIRecipe *CanonicalIV = LoopRegion->getCanonicalIV(); VPSingleDefRecipe *BaseIV = Builder.createDerivedIV( Kind, FPBinOp, StartV, CanonicalIV, Step, "offset.idx"); @@ -800,8 +802,9 @@ static VPValue *optimizeEarlyExitInductionUser(VPlan &Plan, return nullptr; // Calculate the final index. - VPValue *EndValue = Plan.getCanonicalIV(); - auto CanonicalIVType = Plan.getCanonicalIV()->getScalarType(); + VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion(); + auto *CanonicalIV = LoopRegion->getCanonicalIV(); + Type *CanonicalIVType = CanonicalIV->getScalarType(); VPBuilder B(cast(PredVPBB)); DebugLoc DL = cast(Op)->getDebugLoc(); @@ -810,7 +813,8 @@ static VPValue *optimizeEarlyExitInductionUser(VPlan &Plan, Type *FirstActiveLaneType = TypeInfo.inferScalarType(FirstActiveLane); FirstActiveLane = B.createScalarZExtOrTrunc(FirstActiveLane, CanonicalIVType, FirstActiveLaneType, DL); - EndValue = B.createNaryOp(Instruction::Add, {EndValue, FirstActiveLane}, DL); + VPValue *EndValue = + B.createNaryOp(Instruction::Add, {CanonicalIV, FirstActiveLane}, DL); // `getOptimizableIVOf()` always returns the pre-incremented IV, so if it // changed it means the exit is using the incremented value, so we need to @@ -1532,7 +1536,7 @@ static bool isConditionTrueViaVFAndUF(VPValue *Cond, VPlan &Plan, return isConditionTrueViaVFAndUF(C, Plan, BestVF, BestUF, SE); }); - auto *CanIV = Plan.getCanonicalIV(); + auto *CanIV = Plan.getVectorLoopRegion()->getCanonicalIV(); if (!match(Cond, m_SpecificICmp(CmpInst::ICMP_EQ, m_Specific(CanIV->getBackedgeValue()), m_Specific(&Plan.getVectorTripCount())))) @@ -2316,7 +2320,7 @@ static VPActiveLaneMaskPHIRecipe *addVPLaneMaskPhiAndUpdateExitBranch( VPlan &Plan, bool DataAndControlFlowWithoutRuntimeCheck) { VPRegionBlock *TopRegion = Plan.getVectorLoopRegion(); VPBasicBlock *EB = TopRegion->getExitingBasicBlock(); - auto *CanonicalIVPHI = Plan.getCanonicalIV(); + auto *CanonicalIVPHI = TopRegion->getCanonicalIV(); VPValue *StartV = CanonicalIVPHI->getStartValue(); auto *CanonicalIVIncrement = @@ -2355,7 +2359,7 @@ static VPActiveLaneMaskPHIRecipe *addVPLaneMaskPhiAndUpdateExitBranch( // Create the active lane mask instruction in the VPlan preheader. VPValue *ALMMultiplier = Plan.getOrAddLiveIn( - ConstantInt::get(Plan.getCanonicalIV()->getScalarType(), 1)); + ConstantInt::get(TopRegion->getCanonicalIV()->getScalarType(), 1)); auto *EntryALM = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {EntryIncrement, TC, ALMMultiplier}, DL, "active.lane.mask.entry"); @@ -2391,13 +2395,15 @@ static VPActiveLaneMaskPHIRecipe *addVPLaneMaskPhiAndUpdateExitBranch( /// TODO: Introduce explicit recipe for header-mask instead of searching /// for the header-mask pattern manually. static VPSingleDefRecipe *findHeaderMask(VPlan &Plan) { + VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion(); SmallVector WideCanonicalIVs; - auto *FoundWidenCanonicalIVUser = find_if(Plan.getCanonicalIV()->users(), - IsaPred); - assert(count_if(Plan.getCanonicalIV()->users(), + auto *FoundWidenCanonicalIVUser = find_if( + LoopRegion->getCanonicalIV()->users(), IsaPred); + assert(count_if(LoopRegion->getCanonicalIV()->users(), IsaPred) <= 1 && "Must have at most one VPWideCanonicalIVRecipe"); - if (FoundWidenCanonicalIVUser != Plan.getCanonicalIV()->users().end()) { + if (FoundWidenCanonicalIVUser != + LoopRegion->getCanonicalIV()->users().end()) { auto *WideCanonicalIV = cast(*FoundWidenCanonicalIVUser); WideCanonicalIVs.push_back(WideCanonicalIV); @@ -2405,7 +2411,7 @@ static VPSingleDefRecipe *findHeaderMask(VPlan &Plan) { // Also include VPWidenIntOrFpInductionRecipes that represent a widened // version of the canonical induction. - VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock(); + VPBasicBlock *HeaderVPBB = LoopRegion->getEntryBasicBlock(); for (VPRecipeBase &Phi : HeaderVPBB->phis()) { auto *WidenOriginalIV = dyn_cast(&Phi); if (WidenOriginalIV && WidenOriginalIV->isCanonical()) @@ -2438,8 +2444,9 @@ void VPlanTransforms::addActiveLaneMask( "DataAndControlFlowWithoutRuntimeCheck implies " "UseActiveLaneMaskForControlFlow"); - auto *FoundWidenCanonicalIVUser = find_if(Plan.getCanonicalIV()->users(), - IsaPred); + VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion(); + auto *FoundWidenCanonicalIVUser = find_if( + LoopRegion->getCanonicalIV()->users(), IsaPred); assert(FoundWidenCanonicalIVUser && "Must have widened canonical IV when tail folding!"); VPSingleDefRecipe *HeaderMask = findHeaderMask(Plan); @@ -2452,7 +2459,7 @@ void VPlanTransforms::addActiveLaneMask( } else { VPBuilder B = VPBuilder::getToInsertAfter(WideCanonicalIV); VPValue *ALMMultiplier = Plan.getOrAddLiveIn( - ConstantInt::get(Plan.getCanonicalIV()->getScalarType(), 1)); + ConstantInt::get(LoopRegion->getCanonicalIV()->getScalarType(), 1)); LaneMask = B.createNaryOp(VPInstruction::ActiveLaneMask, {WideCanonicalIV, Plan.getTripCount(), ALMMultiplier}, @@ -2562,9 +2569,10 @@ static void transformRecipestoEVLRecipes(VPlan &Plan, VPValue &EVL) { }); assert(all_of(Plan.getVFxUF().users(), - [&Plan](VPUser *U) { - return match(U, m_c_Add(m_Specific(Plan.getCanonicalIV()), - m_Specific(&Plan.getVFxUF()))) || + [&LoopRegion, &Plan](VPUser *U) { + return match(U, + m_c_Add(m_Specific(LoopRegion->getCanonicalIV()), + m_Specific(&Plan.getVFxUF()))) || isa(U); }) && "Only users of VFxUF should be VPWidenPointerInductionRecipe and the " @@ -2719,9 +2727,10 @@ void VPlanTransforms::addExplicitVectorLength( VPlan &Plan, const std::optional &MaxSafeElements) { if (Plan.hasScalarVFOnly()) return; - VPBasicBlock *Header = Plan.getVectorLoopRegion()->getEntryBasicBlock(); + VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion(); + VPBasicBlock *Header = LoopRegion->getEntryBasicBlock(); - auto *CanonicalIVPHI = Plan.getCanonicalIV(); + auto *CanonicalIVPHI = LoopRegion->getCanonicalIV(); auto *CanIVTy = CanonicalIVPHI->getScalarType(); VPValue *StartV = CanonicalIVPHI->getStartValue(); @@ -4165,7 +4174,7 @@ void VPlanTransforms::narrowInterleaveGroups(VPlan &Plan, ElementCount VF, // Adjust induction to reflect that the transformed plan only processes one // original iteration. - auto *CanIV = Plan.getCanonicalIV(); + auto *CanIV = VectorLoop->getCanonicalIV(); auto *Inc = cast(CanIV->getBackedgeValue()); VPBuilder PHBuilder(Plan.getVectorPreheader()); diff --git a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp index 5e7f19faebb56..14682d851e765 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanUnroll.cpp @@ -69,7 +69,8 @@ class UnrollState { VPBasicBlock::iterator InsertPtForPhi); VPValue *getConstantVPV(unsigned Part) { - Type *CanIVIntTy = Plan.getCanonicalIV()->getScalarType(); + Type *CanIVIntTy = + Plan.getVectorLoopRegion()->getCanonicalIV()->getScalarType(); return Plan.getOrAddLiveIn(ConstantInt::get(CanIVIntTy, Part)); } diff --git a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp index 66748c534f10b..72401889a8b48 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp @@ -67,8 +67,10 @@ bool vputils::isHeaderMask(const VPValue *V, VPlan &Plan) { if (match(V, m_ActiveLaneMask(m_VPValue(A), m_VPValue(B), m_One()))) return B == Plan.getTripCount() && - (match(A, m_ScalarIVSteps(m_Specific(Plan.getCanonicalIV()), m_One(), - m_Specific(&Plan.getVF()))) || + (match(A, + m_ScalarIVSteps( + m_Specific(Plan.getVectorLoopRegion()->getCanonicalIV()), + m_One(), m_Specific(&Plan.getVF()))) || IsWideCanonicalIV(A)); return match(V, m_ICmp(m_VPValue(A), m_VPValue(B))) && IsWideCanonicalIV(A) && @@ -102,7 +104,8 @@ bool vputils::isUniformAcrossVFsAndUFs(VPValue *V) { return all_of(R->operands(), isUniformAcrossVFsAndUFs); } - auto *CanonicalIV = R->getParent()->getPlan()->getCanonicalIV(); + auto *CanonicalIV = + R->getParent()->getEnclosingLoopRegion()->getCanonicalIV(); // Canonical IV chain is uniform. if (V == CanonicalIV || V == CanonicalIV->getBackedgeValue()) return true;