diff --git a/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll b/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll index 7fc8d518e560d..6f8d28fee0504 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll @@ -1,28 +1,59 @@ -; RUN: opt < %s -loop-vectorize -S | FileCheck %s -; RUN: opt < %s -loop-vectorize -prefer-predicate-over-epilog -S | FileCheck -check-prefix=PREDFLAG %s +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -loop-vectorize -S | FileCheck %s --check-prefixes=CHECK,DEFAULT +; RUN: opt < %s -loop-vectorize -prefer-predicate-over-epilog -S | FileCheck %s --check-prefixes=CHECK,PREDFLAG target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" define dso_local void @tail_folding_enabled(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C) local_unnamed_addr #0 { -; CHECK-LABEL: tail_folding_enabled( -; CHECK: vector.body: -; CHECK: %wide.masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32( -; CHECK: %wide.masked.load1 = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32( -; CHECK: %8 = add nsw <8 x i32> %wide.masked.load1, %wide.masked.load -; CHECK: call void @llvm.masked.store.v8i32.p0v8i32( -; CHECK: %index.next = add i64 %index, 8 -; CHECK: %12 = icmp eq i64 %index.next, 432 -; CHECK: br i1 %12, label %middle.block, label %vector.body, !llvm.loop !0 -; PREDFLAG-LABEL: tail_folding_enabled( -; PREDFLAG: vector.body: -; PREDFLAG: %wide.masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32( -; PREDFLAG: %wide.masked.load1 = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32( -; PREDFLAG: %8 = add nsw <8 x i32> %wide.masked.load1, %wide.masked.load -; PREDFLAG: call void @llvm.masked.store.v8i32.p0v8i32( -; PREDFLAG: %index.next = add i64 %index, 8 -; PREDFLAG: %12 = icmp eq i64 %index.next, 432 -; PREDFLAG: br i1 %12, label %middle.block, label %vector.body, !llvm.loop !0 +; CHECK-LABEL: @tail_folding_enabled( +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i64> undef, i64 [[INDEX]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT]], <8 x i64> undef, <8 x i32> zeroinitializer +; CHECK-NEXT: [[INDUCTION:%.*]] = add <8 x i64> [[BROADCAST_SPLAT]], +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp ule <8 x i64> [[INDUCTION]], +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 0 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP3]] to <8 x i32>* +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* [[TMP4]], i32 4, <8 x i1> [[TMP2]], <8 x i32> undef) +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[TMP5]], i32 0 +; CHECK-NEXT: [[TMP7:%.*]] = bitcast i32* [[TMP6]] to <8 x i32>* +; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* [[TMP7]], i32 4, <8 x i1> [[TMP2]], <8 x i32> undef) +; CHECK-NEXT: [[TMP8:%.*]] = add nsw <8 x i32> [[WIDE_MASKED_LOAD1]], [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP9]], i32 0 +; CHECK-NEXT: [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <8 x i32>* +; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0v8i32(<8 x i32> [[TMP8]], <8 x i32>* [[TMP11]], i32 4, <8 x i1> [[TMP2]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 432 +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !0 +; CHECK: middle.block: +; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 432, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP13]] +; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX4]], align 4 +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 430 +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop !2 +; entry: br label %for.body @@ -44,20 +75,141 @@ for.body: } define dso_local void @tail_folding_disabled(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C) local_unnamed_addr #0 { -; CHECK-LABEL: tail_folding_disabled( -; CHECK: vector.body: -; CHECK-NOT: @llvm.masked.load.v8i32.p0v8i32( -; CHECK-NOT: @llvm.masked.store.v8i32.p0v8i32( -; CHECK: br i1 %44, label {{.*}}, label %vector.body -; PREDFLAG-LABEL: tail_folding_disabled( -; PREDFLAG: vector.body: -; PREDFLAG: %wide.masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32( -; PREDFLAG: %wide.masked.load1 = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32( -; PREDFLAG: %8 = add nsw <8 x i32> %wide.masked.load1, %wide.masked.load -; PREDFLAG: call void @llvm.masked.store.v8i32.p0v8i32( -; PREDFLAG: %index.next = add i64 %index, 8 -; PREDFLAG: %12 = icmp eq i64 %index.next, 432 -; PREDFLAG: br i1 %12, label %middle.block, label %vector.body, !llvm.loop !4 +; DEFAULT-LABEL: @tail_folding_disabled( +; DEFAULT-NEXT: entry: +; DEFAULT-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; DEFAULT: vector.ph: +; DEFAULT-NEXT: br label [[VECTOR_BODY:%.*]] +; DEFAULT: vector.body: +; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; DEFAULT-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; DEFAULT-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 8 +; DEFAULT-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 16 +; DEFAULT-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 24 +; DEFAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[TMP0]] +; DEFAULT-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[TMP1]] +; DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[TMP2]] +; DEFAULT-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[TMP3]] +; DEFAULT-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i32 0 +; DEFAULT-NEXT: [[TMP9:%.*]] = bitcast i32* [[TMP8]] to <8 x i32>* +; DEFAULT-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, <8 x i32>* [[TMP9]], align 4 +; DEFAULT-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i32 8 +; DEFAULT-NEXT: [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <8 x i32>* +; DEFAULT-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP11]], align 4 +; DEFAULT-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i32 16 +; DEFAULT-NEXT: [[TMP13:%.*]] = bitcast i32* [[TMP12]] to <8 x i32>* +; DEFAULT-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i32>, <8 x i32>* [[TMP13]], align 4 +; DEFAULT-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i32 24 +; DEFAULT-NEXT: [[TMP15:%.*]] = bitcast i32* [[TMP14]] to <8 x i32>* +; DEFAULT-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i32>, <8 x i32>* [[TMP15]], align 4 +; DEFAULT-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[TMP0]] +; DEFAULT-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[TMP1]] +; DEFAULT-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[TMP2]] +; DEFAULT-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[TMP3]] +; DEFAULT-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, i32* [[TMP16]], i32 0 +; DEFAULT-NEXT: [[TMP21:%.*]] = bitcast i32* [[TMP20]] to <8 x i32>* +; DEFAULT-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i32>, <8 x i32>* [[TMP21]], align 4 +; DEFAULT-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, i32* [[TMP16]], i32 8 +; DEFAULT-NEXT: [[TMP23:%.*]] = bitcast i32* [[TMP22]] to <8 x i32>* +; DEFAULT-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i32>, <8 x i32>* [[TMP23]], align 4 +; DEFAULT-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, i32* [[TMP16]], i32 16 +; DEFAULT-NEXT: [[TMP25:%.*]] = bitcast i32* [[TMP24]] to <8 x i32>* +; DEFAULT-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, <8 x i32>* [[TMP25]], align 4 +; DEFAULT-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, i32* [[TMP16]], i32 24 +; DEFAULT-NEXT: [[TMP27:%.*]] = bitcast i32* [[TMP26]] to <8 x i32>* +; DEFAULT-NEXT: [[WIDE_LOAD7:%.*]] = load <8 x i32>, <8 x i32>* [[TMP27]], align 4 +; DEFAULT-NEXT: [[TMP28:%.*]] = add nsw <8 x i32> [[WIDE_LOAD4]], [[WIDE_LOAD]] +; DEFAULT-NEXT: [[TMP29:%.*]] = add nsw <8 x i32> [[WIDE_LOAD5]], [[WIDE_LOAD1]] +; DEFAULT-NEXT: [[TMP30:%.*]] = add nsw <8 x i32> [[WIDE_LOAD6]], [[WIDE_LOAD2]] +; DEFAULT-NEXT: [[TMP31:%.*]] = add nsw <8 x i32> [[WIDE_LOAD7]], [[WIDE_LOAD3]] +; DEFAULT-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[TMP0]] +; DEFAULT-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP1]] +; DEFAULT-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP2]] +; DEFAULT-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP3]] +; DEFAULT-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, i32* [[TMP32]], i32 0 +; DEFAULT-NEXT: [[TMP37:%.*]] = bitcast i32* [[TMP36]] to <8 x i32>* +; DEFAULT-NEXT: store <8 x i32> [[TMP28]], <8 x i32>* [[TMP37]], align 4 +; DEFAULT-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, i32* [[TMP32]], i32 8 +; DEFAULT-NEXT: [[TMP39:%.*]] = bitcast i32* [[TMP38]] to <8 x i32>* +; DEFAULT-NEXT: store <8 x i32> [[TMP29]], <8 x i32>* [[TMP39]], align 4 +; DEFAULT-NEXT: [[TMP40:%.*]] = getelementptr inbounds i32, i32* [[TMP32]], i32 16 +; DEFAULT-NEXT: [[TMP41:%.*]] = bitcast i32* [[TMP40]] to <8 x i32>* +; DEFAULT-NEXT: store <8 x i32> [[TMP30]], <8 x i32>* [[TMP41]], align 4 +; DEFAULT-NEXT: [[TMP42:%.*]] = getelementptr inbounds i32, i32* [[TMP32]], i32 24 +; DEFAULT-NEXT: [[TMP43:%.*]] = bitcast i32* [[TMP42]] to <8 x i32>* +; DEFAULT-NEXT: store <8 x i32> [[TMP31]], <8 x i32>* [[TMP43]], align 4 +; DEFAULT-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 32 +; DEFAULT-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], 416 +; DEFAULT-NEXT: br i1 [[TMP44]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !4 +; DEFAULT: middle.block: +; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 430, 416 +; DEFAULT-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; DEFAULT: scalar.ph: +; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 416, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; DEFAULT-NEXT: br label [[FOR_BODY:%.*]] +; DEFAULT: for.cond.cleanup: +; DEFAULT-NEXT: ret void +; DEFAULT: for.body: +; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV]] +; DEFAULT-NEXT: [[TMP45:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[INDVARS_IV]] +; DEFAULT-NEXT: [[TMP46:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4 +; DEFAULT-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP46]], [[TMP45]] +; DEFAULT-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] +; DEFAULT-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX4]], align 4 +; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; DEFAULT-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 430 +; DEFAULT-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop !5 +; +; PREDFLAG-LABEL: @tail_folding_disabled( +; PREDFLAG-NEXT: entry: +; PREDFLAG-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; PREDFLAG: vector.ph: +; PREDFLAG-NEXT: br label [[VECTOR_BODY:%.*]] +; PREDFLAG: vector.body: +; PREDFLAG-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; PREDFLAG-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i64> undef, i64 [[INDEX]], i32 0 +; PREDFLAG-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT]], <8 x i64> undef, <8 x i32> zeroinitializer +; PREDFLAG-NEXT: [[INDUCTION:%.*]] = add <8 x i64> [[BROADCAST_SPLAT]], +; PREDFLAG-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; PREDFLAG-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[TMP0]] +; PREDFLAG-NEXT: [[TMP2:%.*]] = icmp ule <8 x i64> [[INDUCTION]], +; PREDFLAG-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 0 +; PREDFLAG-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP3]] to <8 x i32>* +; PREDFLAG-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* [[TMP4]], i32 4, <8 x i1> [[TMP2]], <8 x i32> undef) +; PREDFLAG-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[TMP0]] +; PREDFLAG-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[TMP5]], i32 0 +; PREDFLAG-NEXT: [[TMP7:%.*]] = bitcast i32* [[TMP6]] to <8 x i32>* +; PREDFLAG-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* [[TMP7]], i32 4, <8 x i1> [[TMP2]], <8 x i32> undef) +; PREDFLAG-NEXT: [[TMP8:%.*]] = add nsw <8 x i32> [[WIDE_MASKED_LOAD1]], [[WIDE_MASKED_LOAD]] +; PREDFLAG-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[TMP0]] +; PREDFLAG-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP9]], i32 0 +; PREDFLAG-NEXT: [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <8 x i32>* +; PREDFLAG-NEXT: call void @llvm.masked.store.v8i32.p0v8i32(<8 x i32> [[TMP8]], <8 x i32>* [[TMP11]], i32 4, <8 x i1> [[TMP2]]) +; PREDFLAG-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 8 +; PREDFLAG-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 432 +; PREDFLAG-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !4 +; PREDFLAG: middle.block: +; PREDFLAG-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; PREDFLAG: scalar.ph: +; PREDFLAG-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 432, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; PREDFLAG-NEXT: br label [[FOR_BODY:%.*]] +; PREDFLAG: for.cond.cleanup: +; PREDFLAG-NEXT: ret void +; PREDFLAG: for.body: +; PREDFLAG-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; PREDFLAG-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV]] +; PREDFLAG-NEXT: [[TMP13:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +; PREDFLAG-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[INDVARS_IV]] +; PREDFLAG-NEXT: [[TMP14:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4 +; PREDFLAG-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP13]] +; PREDFLAG-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] +; PREDFLAG-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX4]], align 4 +; PREDFLAG-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; PREDFLAG-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 430 +; PREDFLAG-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop !5 +; entry: br label %for.body @@ -87,30 +239,74 @@ for.body: ; return sum; ; } ; + define i32 @reduction_i32(i32* nocapture readonly %A, i32* nocapture readonly %B, i32 %N) #0 { ; CHECK-LABEL: @reduction_i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1 +; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1 +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP2]], 7 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; CHECK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[TMP2]], 1 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i64> undef, i64 [[TRIP_COUNT_MINUS_1]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT]], <8 x i64> undef, <8 x i32> zeroinitializer +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ] -; CHECK-NEXT: [[ACCUM_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, %vector.ph ], [ [[ACCUM:%.*]], %vector.body ] -; CHECK: [[ICMPULE:%.*]] = icmp ule <8 x i64> -; CHECK: [[LOAD1:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* {{.*}}, i32 4, <8 x i1> [[ICMPULE]], <8 x i32> undef) -; CHECK: [[LOAD2:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* {{.*}}, i32 4, <8 x i1> [[ICMPULE]], <8 x i32> undef) -; CHECK-NEXT: [[ADD:%.*]] = add nsw <8 x i32> [[LOAD2]], [[LOAD1]] -; CHECK-NEXT: [[ACCUM]] = add <8 x i32> [[ADD]], [[ACCUM_PHI]] -; CHECK: [[LIVEOUT:%.*]] = select <8 x i1> [[ICMPULE]], <8 x i32> [[ACCUM]], <8 x i32> [[ACCUM_PHI]] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x i64> undef, i64 [[INDEX]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT1]], <8 x i64> undef, <8 x i32> zeroinitializer +; CHECK-NEXT: [[INDUCTION:%.*]] = add <8 x i64> [[BROADCAST_SPLAT2]], +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[TMP3]], 1 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP6:%.*]] = icmp ule <8 x i64> [[INDUCTION]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[TMP5]], i32 0 +; CHECK-NEXT: [[TMP8:%.*]] = bitcast i32* [[TMP7]] to <8 x i32>* +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* [[TMP8]], i32 4, <8 x i1> [[TMP6]], <8 x i32> undef) +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP9]], i32 0 +; CHECK-NEXT: [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <8 x i32>* +; CHECK-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* [[TMP11]], i32 4, <8 x i1> [[TMP6]], <8 x i32> undef) +; CHECK-NEXT: [[TMP12:%.*]] = add nsw <8 x i32> [[WIDE_MASKED_LOAD3]], [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: [[TMP13]] = add <8 x i32> [[TMP12]], [[VEC_PHI]] +; CHECK-NEXT: [[TMP14:%.*]] = trunc i64 [[TMP4]] to i32 +; CHECK-NEXT: [[TMP15:%.*]] = select <8 x i1> [[TMP6]], <8 x i32> [[TMP13]], <8 x i32> [[VEC_PHI]] ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !6 ; CHECK: middle.block: -; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[LIVEOUT]], <8 x i32> undef, <8 x i32> -; CHECK-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[LIVEOUT]], [[RDX_SHUF]] +; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP15]], <8 x i32> undef, <8 x i32> +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP15]], [[RDX_SHUF]] ; CHECK-NEXT: [[RDX_SHUF4:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> ; CHECK-NEXT: [[BIN_RDX5:%.*]] = add <8 x i32> [[BIN_RDX]], [[RDX_SHUF4]] ; CHECK-NEXT: [[RDX_SHUF6:%.*]] = shufflevector <8 x i32> [[BIN_RDX5]], <8 x i32> undef, <8 x i32> ; CHECK-NEXT: [[BIN_RDX7:%.*]] = add <8 x i32> [[BIN_RDX5]], [[RDX_SHUF6]] ; CHECK-NEXT: [[TMP17:%.*]] = extractelement <8 x i32> [[BIN_RDX7]], i32 0 -; CHECK-NEXT: br i1 true, label %for.cond.cleanup, label %scalar.ph +; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[SUM_0:%.*]] = phi i32 [ [[SUM_1:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[TMP18:%.*]] = load i32, i32* [[ARRAYIDXA]], align 4 +; CHECK-NEXT: [[ARRAYIDXB:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[ARRAYIDXB]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP18]] +; CHECK-NEXT: [[SUM_1]] = add nuw nsw i32 [[ADD]], [[SUM_0]] +; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop !7 ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi i32 [ {{.*}}, %for.body ], [ [[TMP17]], %middle.block ] +; CHECK-NEXT: [[SUM_1_LCSSA:%.*]] = phi i32 [ [[SUM_1]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[SUM_1_LCSSA]] ; entry: