diff --git a/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/llvm/lib/Transforms/Scalar/JumpThreading.cpp index f9cd4cdf3d17c8..75bc4bb7626046 100644 --- a/llvm/lib/Transforms/Scalar/JumpThreading.cpp +++ b/llvm/lib/Transforms/Scalar/JumpThreading.cpp @@ -100,6 +100,11 @@ ImplicationSearchThreshold( "condition to use to thread over a weaker condition"), cl::init(3), cl::Hidden); +static cl::opt PhiDuplicateThreshold( + "jump-threading-phi-threshold", + cl::desc("Max PHIs in BB to duplicate for jump threading"), cl::init(76), + cl::Hidden); + static cl::opt PrintLVIAfterJumpThreading( "print-lvi-after-jump-threading", cl::desc("Print the LazyValueInfo cache after JumpThreading"), cl::init(false), @@ -518,8 +523,23 @@ static unsigned getJumpThreadDuplicationCost(const TargetTransformInfo *TTI, Instruction *StopAt, unsigned Threshold) { assert(StopAt->getParent() == BB && "Not an instruction from proper BB?"); + + // Do not duplicate the BB if it has a lot of PHI nodes. + // If a threadable chain is too long then the number of PHI nodes can add up, + // leading to a substantial increase in compile time when rewriting the SSA. + unsigned PhiCount = 0; + Instruction *FirstNonPHI = nullptr; + for (Instruction &I : *BB) { + if (!isa(&I)) { + FirstNonPHI = &I; + break; + } + if (++PhiCount > PhiDuplicateThreshold) + return ~0U; + } + /// Ignore PHI nodes, these will be flattened when duplication happens. - BasicBlock::const_iterator I(BB->getFirstNonPHI()); + BasicBlock::const_iterator I(FirstNonPHI); // FIXME: THREADING will delete values that are just used to compute the // branch, so they shouldn't count against the duplication cost.