diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll index bf76b4d08c6e2f..d4906fd0dbbd70 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll @@ -1,18 +1,109 @@ -; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -instsimplify -S | FileCheck %s --check-prefixes=CHECK,LV -; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -instsimplify -S | FileCheck %s --check-prefixes=CHECK,TFALWAYS -; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -instsimplify -S | FileCheck %s --check-prefixes=CHECK,TFFALLBACK +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -instsimplify -S | FileCheck %s --check-prefixes=TFNONE +; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -instsimplify -S | FileCheck %s --check-prefixes=TFALWAYS +; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -instsimplify -S | FileCheck %s --check-prefixes=TFFALLBACK target triple = "aarch64-unknown-linux-gnu" ; A call whose argument must be widened. We check that tail folding uses the ; primary mask, and that without tail folding we synthesize an all-true mask. define void @test_widen(i64* noalias %a, i64* readnone %b) #4 { -; CHECK-LABEL: @test_widen( -; LV-NOT: call @foo_vector -; TFALWAYS-NOT: vector.body -; TFALWAYS-NOT: call @foo_vector -; TFFALLBACK-NOT: call @foo_vector -; CHECK: ret void +; TFNONE-LABEL: @test_widen( +; TFNONE-NEXT: entry: +; TFNONE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFNONE: vector.ph: +; TFNONE-NEXT: br label [[VECTOR_BODY:%.*]] +; TFNONE: vector.body: +; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFNONE-NEXT: [[TMP0:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDEX]] +; TFNONE-NEXT: [[TMP1:%.*]] = bitcast i64* [[TMP0]] to <2 x i64>* +; TFNONE-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, <2 x i64>* [[TMP1]], align 4 +; TFNONE-NEXT: [[TMP2:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 0 +; TFNONE-NEXT: [[TMP3:%.*]] = call i64 @foo(i64 [[TMP2]]) #[[ATTR2:[0-9]+]] +; TFNONE-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 1 +; TFNONE-NEXT: [[TMP5:%.*]] = call i64 @foo(i64 [[TMP4]]) #[[ATTR2]] +; TFNONE-NEXT: [[TMP6:%.*]] = insertelement <2 x i64> poison, i64 [[TMP3]], i32 0 +; TFNONE-NEXT: [[TMP7:%.*]] = insertelement <2 x i64> [[TMP6]], i64 [[TMP5]], i32 1 +; TFNONE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] +; TFNONE-NEXT: [[TMP9:%.*]] = bitcast i64* [[TMP8]] to <2 x i64>* +; TFNONE-NEXT: store <2 x i64> [[TMP7]], <2 x i64>* [[TMP9]], align 4 +; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; TFNONE-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; TFNONE-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; TFNONE: middle.block: +; TFNONE-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFNONE: scalar.ph: +; TFNONE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1024, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; TFNONE-NEXT: br label [[FOR_BODY:%.*]] +; TFNONE: for.body: +; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; TFNONE-NEXT: [[GEP:%.*]] = getelementptr i64, i64* [[B]], i64 [[INDVARS_IV]] +; TFNONE-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 4 +; TFNONE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR2]] +; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] +; TFNONE-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 4 +; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 +; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]] +; TFNONE: for.cond.cleanup: +; TFNONE-NEXT: ret void +; +; TFALWAYS-LABEL: @test_widen( +; TFALWAYS-NEXT: entry: +; TFALWAYS-NEXT: br label [[FOR_BODY:%.*]] +; TFALWAYS: for.body: +; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; TFALWAYS-NEXT: [[GEP:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 4 +; TFALWAYS-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR1:[0-9]+]] +; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 4 +; TFALWAYS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; TFALWAYS-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 +; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFALWAYS: for.cond.cleanup: +; TFALWAYS-NEXT: ret void +; +; TFFALLBACK-LABEL: @test_widen( +; TFFALLBACK-NEXT: entry: +; TFFALLBACK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFFALLBACK: vector.ph: +; TFFALLBACK-NEXT: br label [[VECTOR_BODY:%.*]] +; TFFALLBACK: vector.body: +; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFFALLBACK-NEXT: [[TMP0:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[TMP1:%.*]] = bitcast i64* [[TMP0]] to <2 x i64>* +; TFFALLBACK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, <2 x i64>* [[TMP1]], align 4 +; TFFALLBACK-NEXT: [[TMP2:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 0 +; TFFALLBACK-NEXT: [[TMP3:%.*]] = call i64 @foo(i64 [[TMP2]]) #[[ATTR2:[0-9]+]] +; TFFALLBACK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 1 +; TFFALLBACK-NEXT: [[TMP5:%.*]] = call i64 @foo(i64 [[TMP4]]) #[[ATTR2]] +; TFFALLBACK-NEXT: [[TMP6:%.*]] = insertelement <2 x i64> poison, i64 [[TMP3]], i32 0 +; TFFALLBACK-NEXT: [[TMP7:%.*]] = insertelement <2 x i64> [[TMP6]], i64 [[TMP5]], i32 1 +; TFFALLBACK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[TMP9:%.*]] = bitcast i64* [[TMP8]] to <2 x i64>* +; TFFALLBACK-NEXT: store <2 x i64> [[TMP7]], <2 x i64>* [[TMP9]], align 4 +; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 +; TFFALLBACK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 +; TFFALLBACK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; TFFALLBACK: middle.block: +; TFFALLBACK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFFALLBACK: scalar.ph: +; TFFALLBACK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1024, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; TFFALLBACK-NEXT: br label [[FOR_BODY:%.*]] +; TFFALLBACK: for.body: +; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; TFFALLBACK-NEXT: [[GEP:%.*]] = getelementptr i64, i64* [[B]], i64 [[INDVARS_IV]] +; TFFALLBACK-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 4 +; TFFALLBACK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR2]] +; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] +; TFFALLBACK-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 4 +; TFFALLBACK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; TFFALLBACK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 +; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]] +; TFFALLBACK: for.cond.cleanup: +; TFFALLBACK-NEXT: ret void +; entry: br label %for.body @@ -33,11 +124,72 @@ for.cond.cleanup: ; Check that a simple conditional call can be vectorized. define void @test_if_then(i64* noalias %a, i64* readnone %b) #4 { -; CHECK-LABEL: @test_if_then( -; LV-NOT: call @foo_vector -; TFALWAYS-NOT: call @foo_vector -; TFFALLBACK-NOT: call @foo_vector -; CHECK: ret void +; TFNONE-LABEL: @test_if_then( +; TFNONE-NEXT: entry: +; TFNONE-NEXT: br label [[FOR_BODY:%.*]] +; TFNONE: for.body: +; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ 0, [[ENTRY:%.*]] ] +; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDVARS_IV]] +; TFNONE-NEXT: [[TMP0:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 +; TFNONE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], 50 +; TFNONE-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]] +; TFNONE: if.then: +; TFNONE-NEXT: [[TMP1:%.*]] = call i64 @foo(i64 [[TMP0]]) #[[ATTR2]] +; TFNONE-NEXT: br label [[IF_END]] +; TFNONE: if.end: +; TFNONE-NEXT: [[TMP2:%.*]] = phi i64 [ [[TMP1]], [[IF_THEN]] ], [ 0, [[FOR_BODY]] ] +; TFNONE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDVARS_IV]] +; TFNONE-NEXT: store i64 [[TMP2]], i64* [[ARRAYIDX1]], align 8 +; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 +; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFNONE: for.cond.cleanup: +; TFNONE-NEXT: ret void +; +; TFALWAYS-LABEL: @test_if_then( +; TFALWAYS-NEXT: entry: +; TFALWAYS-NEXT: br label [[FOR_BODY:%.*]] +; TFALWAYS: for.body: +; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ 0, [[ENTRY:%.*]] ] +; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: [[TMP0:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 +; TFALWAYS-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], 50 +; TFALWAYS-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]] +; TFALWAYS: if.then: +; TFALWAYS-NEXT: [[TMP1:%.*]] = call i64 @foo(i64 [[TMP0]]) #[[ATTR1]] +; TFALWAYS-NEXT: br label [[IF_END]] +; TFALWAYS: if.end: +; TFALWAYS-NEXT: [[TMP2:%.*]] = phi i64 [ [[TMP1]], [[IF_THEN]] ], [ 0, [[FOR_BODY]] ] +; TFALWAYS-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: store i64 [[TMP2]], i64* [[ARRAYIDX1]], align 8 +; TFALWAYS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; TFALWAYS-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 +; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFALWAYS: for.cond.cleanup: +; TFALWAYS-NEXT: ret void +; +; TFFALLBACK-LABEL: @test_if_then( +; TFFALLBACK-NEXT: entry: +; TFFALLBACK-NEXT: br label [[FOR_BODY:%.*]] +; TFFALLBACK: for.body: +; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ 0, [[ENTRY:%.*]] ] +; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDVARS_IV]] +; TFFALLBACK-NEXT: [[TMP0:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 +; TFFALLBACK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], 50 +; TFFALLBACK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]] +; TFFALLBACK: if.then: +; TFFALLBACK-NEXT: [[TMP1:%.*]] = call i64 @foo(i64 [[TMP0]]) #[[ATTR2]] +; TFFALLBACK-NEXT: br label [[IF_END]] +; TFFALLBACK: if.end: +; TFFALLBACK-NEXT: [[TMP2:%.*]] = phi i64 [ [[TMP1]], [[IF_THEN]] ], [ 0, [[FOR_BODY]] ] +; TFFALLBACK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDVARS_IV]] +; TFFALLBACK-NEXT: store i64 [[TMP2]], i64* [[ARRAYIDX1]], align 8 +; TFFALLBACK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; TFFALLBACK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 +; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFFALLBACK: for.cond.cleanup: +; TFFALLBACK-NEXT: ret void +; entry: br label %for.body @@ -69,14 +221,81 @@ for.cond.cleanup: ; uniform parameter and the metadata lists a uniform variant, right now ; we just see a splat of the parameter instead. More work needed. define void @test_widen_if_then_else(i64* noalias %a, i64* readnone %b) #4 { -; CHECK-LABEL: @test_widen_if_then_else -; LV-NOT: call @foo_vector -; LV-NOT: call @foo_uniform -; TFALWAYS-NOT: call @foo_vector -; TFALWAYS-NOT: call @foo_uniform -; TFFALLBACK-NOT: call @foo_vector -; TFFALLBACK-NOT: call @foo_uniform -; CHECK: ret void +; TFNONE-LABEL: @test_widen_if_then_else( +; TFNONE-NEXT: entry: +; TFNONE-NEXT: br label [[FOR_BODY:%.*]] +; TFNONE: for.body: +; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ 0, [[ENTRY:%.*]] ] +; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDVARS_IV]] +; TFNONE-NEXT: [[TMP0:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 +; TFNONE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], 50 +; TFNONE-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] +; TFNONE: if.then: +; TFNONE-NEXT: [[TMP1:%.*]] = call i64 @foo(i64 [[TMP0]]) #[[ATTR3:[0-9]+]] +; TFNONE-NEXT: br label [[IF_END]] +; TFNONE: if.else: +; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @foo(i64 0) #[[ATTR3]] +; TFNONE-NEXT: br label [[IF_END]] +; TFNONE: if.end: +; TFNONE-NEXT: [[TMP3:%.*]] = phi i64 [ [[TMP1]], [[IF_THEN]] ], [ [[TMP2]], [[IF_ELSE]] ] +; TFNONE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDVARS_IV]] +; TFNONE-NEXT: store i64 [[TMP3]], i64* [[ARRAYIDX1]], align 8 +; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 +; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFNONE: for.cond.cleanup: +; TFNONE-NEXT: ret void +; +; TFALWAYS-LABEL: @test_widen_if_then_else( +; TFALWAYS-NEXT: entry: +; TFALWAYS-NEXT: br label [[FOR_BODY:%.*]] +; TFALWAYS: for.body: +; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ 0, [[ENTRY:%.*]] ] +; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: [[TMP0:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 +; TFALWAYS-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], 50 +; TFALWAYS-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] +; TFALWAYS: if.then: +; TFALWAYS-NEXT: [[TMP1:%.*]] = call i64 @foo(i64 [[TMP0]]) #[[ATTR2:[0-9]+]] +; TFALWAYS-NEXT: br label [[IF_END]] +; TFALWAYS: if.else: +; TFALWAYS-NEXT: [[TMP2:%.*]] = call i64 @foo(i64 0) #[[ATTR2]] +; TFALWAYS-NEXT: br label [[IF_END]] +; TFALWAYS: if.end: +; TFALWAYS-NEXT: [[TMP3:%.*]] = phi i64 [ [[TMP1]], [[IF_THEN]] ], [ [[TMP2]], [[IF_ELSE]] ] +; TFALWAYS-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: store i64 [[TMP3]], i64* [[ARRAYIDX1]], align 8 +; TFALWAYS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; TFALWAYS-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 +; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFALWAYS: for.cond.cleanup: +; TFALWAYS-NEXT: ret void +; +; TFFALLBACK-LABEL: @test_widen_if_then_else( +; TFFALLBACK-NEXT: entry: +; TFFALLBACK-NEXT: br label [[FOR_BODY:%.*]] +; TFFALLBACK: for.body: +; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ 0, [[ENTRY:%.*]] ] +; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDVARS_IV]] +; TFFALLBACK-NEXT: [[TMP0:%.*]] = load i64, i64* [[ARRAYIDX]], align 8 +; TFFALLBACK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], 50 +; TFFALLBACK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] +; TFFALLBACK: if.then: +; TFFALLBACK-NEXT: [[TMP1:%.*]] = call i64 @foo(i64 [[TMP0]]) #[[ATTR3:[0-9]+]] +; TFFALLBACK-NEXT: br label [[IF_END]] +; TFFALLBACK: if.else: +; TFFALLBACK-NEXT: [[TMP2:%.*]] = call i64 @foo(i64 0) #[[ATTR3]] +; TFFALLBACK-NEXT: br label [[IF_END]] +; TFFALLBACK: if.end: +; TFFALLBACK-NEXT: [[TMP3:%.*]] = phi i64 [ [[TMP1]], [[IF_THEN]] ], [ [[TMP2]], [[IF_ELSE]] ] +; TFFALLBACK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[B:%.*]], i64 [[INDVARS_IV]] +; TFFALLBACK-NEXT: store i64 [[TMP3]], i64* [[ARRAYIDX1]], align 8 +; TFFALLBACK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; TFFALLBACK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 +; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFFALLBACK: for.cond.cleanup: +; TFFALLBACK-NEXT: ret void +; entry: br label %for.body @@ -111,12 +330,112 @@ for.cond.cleanup: ; a mask. Forcing tail folding results in no vectorized call, whereas an ; unpredicated body with scalar tail can use the unmasked variant. define void @test_widen_nomask(i64* noalias %a, i64* readnone %b) #4 { -; CHECK-LABEL: @test_widen_nomask( -; LV: call @foo_vector_nomask -; TFALWAYS-NOT: vector.body -; TFALWAYS-NOT: call @foo_vector_nomask -; TFFALLBACK: call @foo_vector_nomask -; CHECK: ret void +; TFNONE-LABEL: @test_widen_nomask( +; TFNONE-NEXT: entry: +; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] +; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFNONE: vector.ph: +; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 +; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] +; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; TFNONE-NEXT: br label [[VECTOR_BODY:%.*]] +; TFNONE: vector.body: +; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFNONE-NEXT: [[TMP4:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDEX]] +; TFNONE-NEXT: [[TMP5:%.*]] = bitcast i64* [[TMP4]] to * +; TFNONE-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP5]], align 4 +; TFNONE-NEXT: [[TMP6:%.*]] = call @foo_vector_nomask( [[WIDE_LOAD]]) +; TFNONE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] +; TFNONE-NEXT: [[TMP8:%.*]] = bitcast i64* [[TMP7]] to * +; TFNONE-NEXT: store [[TMP6]], * [[TMP8]], align 4 +; TFNONE-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 2 +; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; TFNONE-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; TFNONE-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; TFNONE: middle.block: +; TFNONE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; TFNONE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFNONE: scalar.ph: +; TFNONE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; TFNONE-NEXT: br label [[FOR_BODY:%.*]] +; TFNONE: for.body: +; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; TFNONE-NEXT: [[GEP:%.*]] = getelementptr i64, i64* [[B]], i64 [[INDVARS_IV]] +; TFNONE-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 4 +; TFNONE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR4:[0-9]+]] +; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] +; TFNONE-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 4 +; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 +; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; TFNONE: for.cond.cleanup: +; TFNONE-NEXT: ret void +; +; TFALWAYS-LABEL: @test_widen_nomask( +; TFALWAYS-NEXT: entry: +; TFALWAYS-NEXT: br label [[FOR_BODY:%.*]] +; TFALWAYS: for.body: +; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; TFALWAYS-NEXT: [[GEP:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 4 +; TFALWAYS-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR3:[0-9]+]] +; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 4 +; TFALWAYS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; TFALWAYS-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 +; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFALWAYS: for.cond.cleanup: +; TFALWAYS-NEXT: ret void +; +; TFFALLBACK-LABEL: @test_widen_nomask( +; TFFALLBACK-NEXT: entry: +; TFFALLBACK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFFALLBACK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] +; TFFALLBACK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFFALLBACK: vector.ph: +; TFFALLBACK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 +; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] +; TFFALLBACK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; TFFALLBACK-NEXT: br label [[VECTOR_BODY:%.*]] +; TFFALLBACK: vector.body: +; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFFALLBACK-NEXT: [[TMP4:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[TMP5:%.*]] = bitcast i64* [[TMP4]] to * +; TFFALLBACK-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP5]], align 4 +; TFFALLBACK-NEXT: [[TMP6:%.*]] = call @foo_vector_nomask( [[WIDE_LOAD]]) +; TFFALLBACK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[TMP8:%.*]] = bitcast i64* [[TMP7]] to * +; TFFALLBACK-NEXT: store [[TMP6]], * [[TMP8]], align 4 +; TFFALLBACK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 2 +; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; TFFALLBACK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; TFFALLBACK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; TFFALLBACK: middle.block: +; TFFALLBACK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; TFFALLBACK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFFALLBACK: scalar.ph: +; TFFALLBACK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; TFFALLBACK-NEXT: br label [[FOR_BODY:%.*]] +; TFFALLBACK: for.body: +; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; TFFALLBACK-NEXT: [[GEP:%.*]] = getelementptr i64, i64* [[B]], i64 [[INDVARS_IV]] +; TFFALLBACK-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 4 +; TFFALLBACK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR4:[0-9]+]] +; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] +; TFFALLBACK-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 4 +; TFFALLBACK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; TFFALLBACK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 +; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; TFFALLBACK: for.cond.cleanup: +; TFFALLBACK-NEXT: ret void +; entry: br label %for.body @@ -139,12 +458,112 @@ for.cond.cleanup: ; use the masked version and unpredicated body with scalar tail use the unmasked ; version. define void @test_widen_optmask(i64* noalias %a, i64* readnone %b) #4 { -; CHECK-LABEL: @test_widen_optmask( -; LV: call @foo_vector_nomask -; TFALWAYS-NOT: vector.body -; TFALWAYS-NOT: call @foo_vector -; TFFALLBACK: call @foo_vector_nomask -; CHECK: ret void +; TFNONE-LABEL: @test_widen_optmask( +; TFNONE-NEXT: entry: +; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] +; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFNONE: vector.ph: +; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 +; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] +; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; TFNONE-NEXT: br label [[VECTOR_BODY:%.*]] +; TFNONE: vector.body: +; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFNONE-NEXT: [[TMP4:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDEX]] +; TFNONE-NEXT: [[TMP5:%.*]] = bitcast i64* [[TMP4]] to * +; TFNONE-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP5]], align 4 +; TFNONE-NEXT: [[TMP6:%.*]] = call @foo_vector_nomask( [[WIDE_LOAD]]) +; TFNONE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] +; TFNONE-NEXT: [[TMP8:%.*]] = bitcast i64* [[TMP7]] to * +; TFNONE-NEXT: store [[TMP6]], * [[TMP8]], align 4 +; TFNONE-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; TFNONE-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 2 +; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; TFNONE-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; TFNONE-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; TFNONE: middle.block: +; TFNONE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; TFNONE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFNONE: scalar.ph: +; TFNONE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; TFNONE-NEXT: br label [[FOR_BODY:%.*]] +; TFNONE: for.body: +; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; TFNONE-NEXT: [[GEP:%.*]] = getelementptr i64, i64* [[B]], i64 [[INDVARS_IV]] +; TFNONE-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 4 +; TFNONE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR5:[0-9]+]] +; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] +; TFNONE-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 4 +; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 +; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; TFNONE: for.cond.cleanup: +; TFNONE-NEXT: ret void +; +; TFALWAYS-LABEL: @test_widen_optmask( +; TFALWAYS-NEXT: entry: +; TFALWAYS-NEXT: br label [[FOR_BODY:%.*]] +; TFALWAYS: for.body: +; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; TFALWAYS-NEXT: [[GEP:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 4 +; TFALWAYS-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR4:[0-9]+]] +; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDVARS_IV]] +; TFALWAYS-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 4 +; TFALWAYS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; TFALWAYS-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 +; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; TFALWAYS: for.cond.cleanup: +; TFALWAYS-NEXT: ret void +; +; TFFALLBACK-LABEL: @test_widen_optmask( +; TFFALLBACK-NEXT: entry: +; TFFALLBACK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; TFFALLBACK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]] +; TFFALLBACK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; TFFALLBACK: vector.ph: +; TFFALLBACK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 +; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]] +; TFFALLBACK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]] +; TFFALLBACK-NEXT: br label [[VECTOR_BODY:%.*]] +; TFFALLBACK: vector.body: +; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; TFFALLBACK-NEXT: [[TMP4:%.*]] = getelementptr i64, i64* [[B:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[TMP5:%.*]] = bitcast i64* [[TMP4]] to * +; TFFALLBACK-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP5]], align 4 +; TFFALLBACK-NEXT: [[TMP6:%.*]] = call @foo_vector_nomask( [[WIDE_LOAD]]) +; TFFALLBACK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, i64* [[A:%.*]], i64 [[INDEX]] +; TFFALLBACK-NEXT: [[TMP8:%.*]] = bitcast i64* [[TMP7]] to * +; TFFALLBACK-NEXT: store [[TMP6]], * [[TMP8]], align 4 +; TFFALLBACK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; TFFALLBACK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 2 +; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]] +; TFFALLBACK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; TFFALLBACK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; TFFALLBACK: middle.block: +; TFFALLBACK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]] +; TFFALLBACK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; TFFALLBACK: scalar.ph: +; TFFALLBACK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; TFFALLBACK-NEXT: br label [[FOR_BODY:%.*]] +; TFFALLBACK: for.body: +; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; TFFALLBACK-NEXT: [[GEP:%.*]] = getelementptr i64, i64* [[B]], i64 [[INDVARS_IV]] +; TFFALLBACK-NEXT: [[LOAD:%.*]] = load i64, i64* [[GEP]], align 4 +; TFFALLBACK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR5:[0-9]+]] +; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 [[INDVARS_IV]] +; TFFALLBACK-NEXT: store i64 [[CALL]], i64* [[ARRAYIDX]], align 4 +; TFFALLBACK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; TFFALLBACK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1024 +; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; TFFALLBACK: for.cond.cleanup: +; TFFALLBACK-NEXT: ret void +; entry: br label %for.body