diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 25557f1d5d651..e95910b78dcac 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -689,31 +689,6 @@ static void legalizeAndOptimizeInductions(VPlan &Plan) { if (!PhiR) continue; - // Try to narrow wide and replicating recipes to uniform recipes, based on - // VPlan analysis. - // TODO: Apply to all recipes in the future, to replace legacy uniformity - // analysis. - auto Users = collectUsersRecursively(PhiR); - for (VPUser *U : reverse(Users)) { - auto *Def = dyn_cast(U); - auto *RepR = dyn_cast(U); - // Skip recipes that shouldn't be narrowed. - if (!Def || !isa(Def) || - Def->getNumUsers() == 0 || !Def->getUnderlyingValue() || - (RepR && (RepR->isSingleScalar() || RepR->isPredicated()))) - continue; - - // Skip recipes that may have other lanes than their first used. - if (!vputils::isSingleScalar(Def) && !vputils::onlyFirstLaneUsed(Def)) - continue; - - auto *Clone = new VPReplicateRecipe(Def->getUnderlyingInstr(), - Def->operands(), /*IsUniform*/ true, - /*Mask*/ nullptr, /*Flags*/ *Def); - Clone->insertAfter(Def); - Def->replaceAllUsesWith(Clone); - } - // Replace wide pointer inductions which have only their scalars used by // PtrAdd(IndStart, ScalarIVSteps (0, Step)). if (auto *PtrIV = dyn_cast(&Phi)) { @@ -1450,7 +1425,9 @@ static void narrowToSingleScalarRecipes(VPlan &Plan) { // Skip recipes that aren't single scalars or don't have only their // scalar results used. In the latter case, we would introduce extra // broadcasts. - if (!vputils::isSingleScalar(RepOrWidenR) || + if ((!vputils::isSingleScalar(RepOrWidenR) && + !vputils::onlyFirstLaneUsed(RepOrWidenR)) || + RepOrWidenR->getNumUsers() == 0 || !all_of(RepOrWidenR->users(), [RepOrWidenR](const VPUser *U) { if (auto *Store = dyn_cast(U)) { // VPWidenStore doesn't have users, and stores are always