diff --git a/llvm/lib/Analysis/IVDescriptors.cpp b/llvm/lib/Analysis/IVDescriptors.cpp index 6fb600114bc61d..bdebd71d7f68b0 100644 --- a/llvm/lib/Analysis/IVDescriptors.cpp +++ b/llvm/lib/Analysis/IVDescriptors.cpp @@ -699,25 +699,37 @@ bool RecurrenceDescriptor::isFirstOrderRecurrence( // Ensure every user of the phi node is dominated by the previous value. // The dominance requirement ensures the loop vectorizer will not need to // vectorize the initial value prior to the first iteration of the loop. - // TODO: Consider extending this sinking to handle other kinds of instructions - // and expressions, beyond sinking a single cast past Previous. + // TODO: Consider extending this sinking to handle memory instructions and + // phis with multiple users. + + // Returns true, if all users of I are dominated by DominatedBy. + auto allUsesDominatedBy = [DT](Instruction *I, Instruction *DominatedBy) { + return all_of(I->uses(), [DT, DominatedBy](Use &U) { + return DT->dominates(DominatedBy, U); + }); + }; + if (Phi->hasOneUse()) { - auto *I = Phi->user_back(); - if (I->isCast() && (I->getParent() == Phi->getParent()) && I->hasOneUse() && - DT->dominates(Previous, I->user_back())) { - if (!DT->dominates(Previous, I)) // Otherwise we're good w/o sinking. - SinkAfter[I] = Previous; + Instruction *I = Phi->user_back(); + + // If the user of the PHI is also the incoming value, we potentially have a + // reduction and which cannot be handled by sinking. + if (Previous == I) + return false; + + if (DT->dominates(Previous, I)) // We already are good w/o sinking. return true; - } - } - for (User *U : Phi->users()) - if (auto *I = dyn_cast(U)) { - if (!DT->dominates(Previous, I)) - return false; + // We can sink any instruction without side effects, as long as all users + // are dominated by the instruction we are sinking after. + if (I->getParent() == Phi->getParent() && !I->mayHaveSideEffects() && + allUsesDominatedBy(I, Previous)) { + SinkAfter[I] = Previous; + return true; } + } - return true; + return allUsesDominatedBy(Phi, Previous); } /// This function returns the identity element (or neutral element) for diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-complex.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-complex.ll new file mode 100644 index 00000000000000..e09804276ec83a --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-complex.ll @@ -0,0 +1,245 @@ +; RUN: opt -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S %s | FileCheck %s + + +@p = external local_unnamed_addr global [257 x i32], align 16 +@q = external local_unnamed_addr global [257 x i32], align 16 + +; Test case for PR43398. + +define void @can_sink_after_store(i32 %x, i32* %ptr, i64 %tc) local_unnamed_addr #0 { +; CHECK-LABEL: vector.ph: +; CHECK: %broadcast.splatinsert1 = insertelement <4 x i32> undef, i32 %x, i32 0 +; CHECK-NEXT: %broadcast.splat2 = shufflevector <4 x i32> %broadcast.splatinsert1, <4 x i32> undef, <4 x i32> zeroinitializer +; CHECK-NEXT: %vector.recur.init = insertelement <4 x i32> undef, i32 %.pre, i32 3 +; CHECK-NEXT: br label %vector.body + +; CHECK-LABEL: vector.body: +; CHECK-NEXT: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] +; CHECK-NEXT: %vector.recur = phi <4 x i32> [ %vector.recur.init, %vector.ph ], [ %wide.load, %vector.body ] +; CHECK-NEXT: %offset.idx = add i64 1, %index +; CHECK-NEXT: %broadcast.splatinsert = insertelement <4 x i64> undef, i64 %offset.idx, i32 0 +; CHECK-NEXT: %broadcast.splat = shufflevector <4 x i64> %broadcast.splatinsert, <4 x i64> undef, <4 x i32> zeroinitializer +; CHECK-NEXT: %induction = add <4 x i64> %broadcast.splat, +; CHECK-NEXT: %0 = add i64 %offset.idx, 0 +; CHECK-NEXT: %1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %0 +; CHECK-NEXT: %2 = getelementptr inbounds i32, i32* %1, i32 0 +; CHECK-NEXT: %3 = bitcast i32* %2 to <4 x i32>* +; CHECK-NEXT: %wide.load = load <4 x i32>, <4 x i32>* %3, align 4 +; CHECK-NEXT: %4 = shufflevector <4 x i32> %vector.recur, <4 x i32> %wide.load, <4 x i32> +; CHECK-NEXT: %5 = add <4 x i32> %4, %broadcast.splat2 +; CHECK-NEXT: %6 = add <4 x i32> %5, %wide.load +; CHECK-NEXT: %7 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %0 +; CHECK-NEXT: %8 = getelementptr inbounds i32, i32* %7, i32 0 +; CHECK-NEXT: %9 = bitcast i32* %8 to <4 x i32>* +; CHECK-NEXT: store <4 x i32> %6, <4 x i32>* %9, align 4 +; CHECK-NEXT: %index.next = add i64 %index, 4 +; CHECK-NEXT: %10 = icmp eq i64 %index.next, 1996 +; CHECK-NEXT: br i1 %10, label %middle.block, label %vector.body +; +entry: + br label %preheader + +preheader: + %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1 + %.pre = load i32, i32* %idx.phi.trans, align 4 + br label %for + +for: + %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ] + %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ] + %add.1 = add i32 %pre.phi, %x + %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv + %pre.next = load i32, i32* %idx.1, align 4 + %add.2 = add i32 %add.1, %pre.next + %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv + store i32 %add.2, i32* %idx.2, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond = icmp eq i64 %iv.next, 2000 + br i1 %exitcond, label %exit, label %for + +exit: + ret void +} + +; We can sink potential trapping instructions, as this will only delay the trap +; and not introduce traps on additional paths. +define void @sink_sdiv(i32 %x, i32* %ptr, i64 %tc) local_unnamed_addr #0 { +; CHECK-LABEL: vector.ph: +; CHECK: %broadcast.splatinsert1 = insertelement <4 x i32> undef, i32 %x, i32 0 +; CHECK-NEXT: %broadcast.splat2 = shufflevector <4 x i32> %broadcast.splatinsert1, <4 x i32> undef, <4 x i32> zeroinitializer +; CHECK-NEXT: %vector.recur.init = insertelement <4 x i32> undef, i32 %.pre, i32 3 +; CHECK-NEXT: br label %vector.body + +; CHECK-LABEL: vector.body: +; CHECK-NEXT: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] +; CHECK-NEXT: %vector.recur = phi <4 x i32> [ %vector.recur.init, %vector.ph ], [ %wide.load, %vector.body ] +; CHECK-NEXT: %offset.idx = add i64 1, %index +; CHECK-NEXT: %broadcast.splatinsert = insertelement <4 x i64> undef, i64 %offset.idx, i32 0 +; CHECK-NEXT: %broadcast.splat = shufflevector <4 x i64> %broadcast.splatinsert, <4 x i64> undef, <4 x i32> zeroinitializer +; CHECK-NEXT: %induction = add <4 x i64> %broadcast.splat, +; CHECK-NEXT: %0 = add i64 %offset.idx, 0 +; CHECK-NEXT: %1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %0 +; CHECK-NEXT: %2 = getelementptr inbounds i32, i32* %1, i32 0 +; CHECK-NEXT: %3 = bitcast i32* %2 to <4 x i32>* +; CHECK-NEXT: %wide.load = load <4 x i32>, <4 x i32>* %3, align 4 +; CHECK-NEXT: %4 = shufflevector <4 x i32> %vector.recur, <4 x i32> %wide.load, <4 x i32> +; CHECK-NEXT: %5 = sdiv <4 x i32> %4, %broadcast.splat2 +; CHECK-NEXT: %6 = add <4 x i32> %5, %wide.load +; CHECK-NEXT: %7 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %0 +; CHECK-NEXT: %8 = getelementptr inbounds i32, i32* %7, i32 0 +; CHECK-NEXT: %9 = bitcast i32* %8 to <4 x i32>* +; CHECK-NEXT: store <4 x i32> %6, <4 x i32>* %9, align 4 +; CHECK-NEXT: %index.next = add i64 %index, 4 +; CHECK-NEXT: %10 = icmp eq i64 %index.next, 1996 +; CHECK-NEXT: br i1 %10, label %middle.block, label %vector.body +; +entry: + br label %preheader + +preheader: + %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1 + %.pre = load i32, i32* %idx.phi.trans, align 4 + br label %for + +for: + %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ] + %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ] + %div.1 = sdiv i32 %pre.phi, %x + %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv + %pre.next = load i32, i32* %idx.1, align 4 + %add.2 = add i32 %div.1, %pre.next + %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv + store i32 %add.2, i32* %idx.2, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond = icmp eq i64 %iv.next, 2000 + br i1 %exitcond, label %exit, label %for + +exit: + ret void +} + +; FIXME: Currently we can only sink a single instruction. For the example below, +; we also have to sink users. +define void @cannot_sink_with_additional_user(i32 %x, i32* %ptr, i64 %tc) { +; CHECK-LABEL: define void @cannot_sink_with_additional_user( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label %preheader + +; CHECK-LABEL: preheader: ; preds = %entry +; CHECK: br label %for + +; CHECK-LABEL: for: ; preds = %for, %preheader +; CHECK br i1 %exitcond, label %exit, label %for + +; CHECK-LABEL: exit: +; CHECK-NEXT: ret void + +entry: + br label %preheader + +preheader: + %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1 + %.pre = load i32, i32* %idx.phi.trans, align 4 + br label %for + +for: + %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ] + %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ] + %add.1 = add i32 %pre.phi, %x + %add.2 = add i32 %add.1, %x + %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv + %pre.next = load i32, i32* %idx.1, align 4 + %add.3 = add i32 %add.1, %pre.next + %add.4 = add i32 %add.2, %add.3 + %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv + store i32 %add.4, i32* %idx.2, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond = icmp eq i64 %iv.next, 2000 + br i1 %exitcond, label %exit, label %for + +exit: + ret void +} + +; FIXME: We can sink a store, if we can guarantee that it does not alias any +; loads/stores in between. +define void @cannot_sink_store(i32 %x, i32* %ptr, i64 %tc) { +; CHECK-LABEL: define void @cannot_sink_store( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label %preheader + +; CHECK-LABEL: preheader: ; preds = %entry +; CHECK: br label %for + +; CHECK-LABEL: for: ; preds = %for, %preheader +; CHECK br i1 %exitcond, label %exit, label %for + +; CHECK-LABEL: exit: +; CHECK-NEXT: ret void +; +entry: + br label %preheader + +preheader: + %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1 + %.pre = load i32, i32* %idx.phi.trans, align 4 + br label %for + +for: + %pre.phi = phi i32 [ %.pre, %preheader ], [ %pre.next, %for ] + %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ] + %add.1 = add i32 %pre.phi, %x + store i32 %add.1, i32* %ptr + %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv + %pre.next = load i32, i32* %idx.1, align 4 + %add.2 = add i32 %add.1, %pre.next + %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv + store i32 %add.2, i32* %idx.2, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond = icmp eq i64 %iv.next, 2000 + br i1 %exitcond, label %exit, label %for + +exit: + ret void +} + +; Some kinds of reductions are not detected by IVDescriptors. If we have a +; cycle, we cannot sink it. +define void @cannot_sink_reduction(i32 %x, i32* %ptr, i64 %tc) { +; CHECK-LABEL: define void @cannot_sink_reduction( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label %preheader + +; CHECK-LABEL: preheader: ; preds = %entry +; CHECK: br label %for + +; CHECK-LABEL: for: ; preds = %for, %preheader +; CHECK br i1 %exitcond, label %exit, label %for + +; CHECK-LABEL: exit: ; preds = %for +; CHECK-NET: ret void +; +entry: + br label %preheader + +preheader: + %idx.phi.trans = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 1 + %.pre = load i32, i32* %idx.phi.trans, align 4 + br label %for + +for: + %pre.phi = phi i32 [ %.pre, %preheader ], [ %d, %for ] + %iv = phi i64 [ 1, %preheader ], [ %iv.next, %for ] + %d = sdiv i32 %pre.phi, %x + %idx.1 = getelementptr inbounds [257 x i32], [257 x i32]* @p, i64 0, i64 %iv + %pre.next = load i32, i32* %idx.1, align 4 + %add.2 = add i32 %x, %pre.next + %idx.2 = getelementptr inbounds [257 x i32], [257 x i32]* @q, i64 0, i64 %iv + store i32 %add.2, i32* %idx.2, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond = icmp eq i64 %iv.next, 2000 + br i1 %exitcond, label %exit, label %for + +exit: + ret void +}