Skip to content

Commit 1d65d9c

Browse files
committed
[VPlan] Match legacy CM in ::computeCost if load is used by load/store.
If a load is scalarized because it is used by a load/store address, the legacy cost model does not pass ScalarEvolution to getAddressComputationCost. Match the behavior in VPReplicateRecipe::computeCost.
1 parent 2a05904 commit 1d65d9c

File tree

2 files changed

+130
-3
lines changed

2 files changed

+130
-3
lines changed

llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3298,10 +3298,11 @@ InstructionCost VPReplicateRecipe::computeCost(ElementCount VF,
32983298
UI->getOpcode(), ValTy, Alignment, AS, Ctx.CostKind, OpInfo);
32993299

33003300
Type *PtrTy = isSingleScalar() ? ScalarPtrTy : toVectorTy(ScalarPtrTy, VF);
3301-
3301+
bool UsedByLoadStoreAddress = isUsedByLoadStoreAddress(this);
33023302
InstructionCost ScalarCost =
33033303
ScalarMemOpCost + Ctx.TTI.getAddressComputationCost(
3304-
PtrTy, &Ctx.SE, nullptr, Ctx.CostKind);
3304+
PtrTy, UsedByLoadStoreAddress ? nullptr : &Ctx.SE,
3305+
nullptr, Ctx.CostKind);
33053306
if (isSingleScalar())
33063307
return ScalarCost;
33073308

@@ -3312,7 +3313,7 @@ InstructionCost VPReplicateRecipe::computeCost(ElementCount VF,
33123313
// vectorized addressing or the loaded value is used as part of an address
33133314
// of another load or store.
33143315
bool PreferVectorizedAddressing = Ctx.TTI.prefersVectorizedAddressing();
3315-
if (PreferVectorizedAddressing || !isUsedByLoadStoreAddress(this)) {
3316+
if (PreferVectorizedAddressing || !UsedByLoadStoreAddress) {
33163317
bool EfficientVectorLoadStore =
33173318
Ctx.TTI.supportsEfficientVectorElementLoadStore();
33183319
if (!(IsLoad && !PreferVectorizedAddressing) &&

llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll

Lines changed: 126 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -454,6 +454,132 @@ exit:
454454
ret void
455455
}
456456

457+
declare i1 @cond()
458+
459+
define double @test_load_used_by_other_load_scev(ptr %ptr.a, ptr %ptr.b, ptr %ptr.c) {
460+
; I64-LABEL: define double @test_load_used_by_other_load_scev(
461+
; I64-SAME: ptr [[PTR_A:%.*]], ptr [[PTR_B:%.*]], ptr [[PTR_C:%.*]]) {
462+
; I64-NEXT: [[ENTRY:.*]]:
463+
; I64-NEXT: br label %[[OUTER_LOOP:.*]]
464+
; I64: [[OUTER_LOOP_LOOPEXIT:.*]]:
465+
; I64-NEXT: br label %[[OUTER_LOOP]]
466+
; I64: [[OUTER_LOOP]]:
467+
; I64-NEXT: [[ACCUM:%.*]] = phi double [ 0.000000e+00, %[[ENTRY]] ], [ [[TMP29:%.*]], %[[OUTER_LOOP_LOOPEXIT]] ]
468+
; I64-NEXT: [[COND:%.*]] = call i1 @cond()
469+
; I64-NEXT: br i1 [[COND]], label %[[INNER_LOOP_PREHEADER:.*]], label %[[EXIT:.*]]
470+
; I64: [[INNER_LOOP_PREHEADER]]:
471+
; I64-NEXT: br label %[[VECTOR_PH:.*]]
472+
; I64: [[VECTOR_PH]]:
473+
; I64-NEXT: br label %[[VECTOR_BODY:.*]]
474+
; I64: [[VECTOR_BODY]]:
475+
; I64-NEXT: [[TMP0:%.*]] = add i64 0, 1
476+
; I64-NEXT: [[TMP1:%.*]] = add i64 1, 1
477+
; I64-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[PTR_C]], i64 [[TMP0]]
478+
; I64-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[PTR_C]], i64 [[TMP1]]
479+
; I64-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[PTR_A]], i64 [[TMP0]]
480+
; I64-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[PTR_A]], i64 [[TMP1]]
481+
; I64-NEXT: [[TMP6:%.*]] = load i64, ptr [[TMP4]], align 8
482+
; I64-NEXT: [[TMP7:%.*]] = load i64, ptr [[TMP5]], align 8
483+
; I64-NEXT: [[TMP8:%.*]] = getelementptr double, ptr [[PTR_B]], i64 [[TMP6]]
484+
; I64-NEXT: [[TMP9:%.*]] = getelementptr double, ptr [[PTR_B]], i64 [[TMP7]]
485+
; I64-NEXT: [[TMP10:%.*]] = load double, ptr [[PTR_A]], align 8
486+
; I64-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x double> poison, double [[TMP10]], i64 0
487+
; I64-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT]], <2 x double> poison, <2 x i32> zeroinitializer
488+
; I64-NEXT: [[TMP11:%.*]] = fadd <2 x double> [[BROADCAST_SPLAT]], zeroinitializer
489+
; I64-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[TMP2]], i64 8
490+
; I64-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP3]], i64 8
491+
; I64-NEXT: [[TMP14:%.*]] = load double, ptr [[TMP12]], align 8
492+
; I64-NEXT: [[TMP15:%.*]] = load double, ptr [[TMP13]], align 8
493+
; I64-NEXT: [[TMP16:%.*]] = insertelement <2 x double> poison, double [[TMP14]], i32 0
494+
; I64-NEXT: [[TMP17:%.*]] = insertelement <2 x double> [[TMP16]], double [[TMP15]], i32 1
495+
; I64-NEXT: [[TMP18:%.*]] = fmul <2 x double> [[TMP11]], zeroinitializer
496+
; I64-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x double> poison, double [[ACCUM]], i64 0
497+
; I64-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT1]], <2 x double> poison, <2 x i32> zeroinitializer
498+
; I64-NEXT: [[TMP19:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLAT2]], <2 x double> [[TMP18]], <2 x i32> <i32 1, i32 2>
499+
; I64-NEXT: [[TMP20:%.*]] = fmul <2 x double> [[TMP17]], zeroinitializer
500+
; I64-NEXT: [[TMP21:%.*]] = fadd <2 x double> [[TMP20]], zeroinitializer
501+
; I64-NEXT: [[TMP22:%.*]] = fadd <2 x double> [[TMP21]], splat (double 1.000000e+00)
502+
; I64-NEXT: [[TMP23:%.*]] = load double, ptr [[TMP8]], align 8
503+
; I64-NEXT: [[TMP24:%.*]] = load double, ptr [[TMP9]], align 8
504+
; I64-NEXT: [[TMP25:%.*]] = insertelement <2 x double> poison, double [[TMP23]], i32 0
505+
; I64-NEXT: [[TMP26:%.*]] = insertelement <2 x double> [[TMP25]], double [[TMP24]], i32 1
506+
; I64-NEXT: [[TMP27:%.*]] = fdiv <2 x double> [[TMP26]], [[TMP22]]
507+
; I64-NEXT: [[TMP28:%.*]] = fsub <2 x double> [[TMP19]], [[TMP27]]
508+
; I64-NEXT: br label %[[MIDDLE_BLOCK:.*]]
509+
; I64: [[MIDDLE_BLOCK]]:
510+
; I64-NEXT: [[TMP29]] = extractelement <2 x double> [[TMP28]], i32 1
511+
; I64-NEXT: br label %[[OUTER_LOOP_LOOPEXIT]]
512+
; I64: [[EXIT]]:
513+
; I64-NEXT: ret double [[ACCUM]]
514+
;
515+
; I32-LABEL: define double @test_load_used_by_other_load_scev(
516+
; I32-SAME: ptr [[PTR_A:%.*]], ptr [[PTR_B:%.*]], ptr [[PTR_C:%.*]]) {
517+
; I32-NEXT: [[ENTRY:.*]]:
518+
; I32-NEXT: br label %[[OUTER_LOOP:.*]]
519+
; I32: [[OUTER_LOOP]]:
520+
; I32-NEXT: [[ACCUM:%.*]] = phi double [ 0.000000e+00, %[[ENTRY]] ], [ [[RESULT:%.*]], %[[INNER_LOOP:.*]] ]
521+
; I32-NEXT: [[COND:%.*]] = call i1 @cond()
522+
; I32-NEXT: br i1 [[COND]], label %[[INNER_LOOP]], label %[[EXIT:.*]]
523+
; I32: [[INNER_LOOP]]:
524+
; I32-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[OUTER_LOOP]] ], [ [[IV_NEXT:%.*]], %[[INNER_LOOP]] ]
525+
; I32-NEXT: [[ACCUM_INNER:%.*]] = phi double [ [[ACCUM]], %[[OUTER_LOOP]] ], [ [[MUL1:%.*]], %[[INNER_LOOP]] ]
526+
; I32-NEXT: [[IDX_PLUS1:%.*]] = add i64 [[IV]], 1
527+
; I32-NEXT: [[GEP_C:%.*]] = getelementptr i8, ptr [[PTR_C]], i64 [[IDX_PLUS1]]
528+
; I32-NEXT: [[GEP_A_I64:%.*]] = getelementptr i64, ptr [[PTR_A]], i64 [[IDX_PLUS1]]
529+
; I32-NEXT: [[LOAD_IDX:%.*]] = load i64, ptr [[GEP_A_I64]], align 8
530+
; I32-NEXT: [[GEP_B:%.*]] = getelementptr double, ptr [[PTR_B]], i64 [[LOAD_IDX]]
531+
; I32-NEXT: [[LOAD_A:%.*]] = load double, ptr [[PTR_A]], align 8
532+
; I32-NEXT: [[ADD1:%.*]] = fadd double [[LOAD_A]], 0.000000e+00
533+
; I32-NEXT: [[GEP_C_OFFSET:%.*]] = getelementptr i8, ptr [[GEP_C]], i64 8
534+
; I32-NEXT: [[LOAD_C:%.*]] = load double, ptr [[GEP_C_OFFSET]], align 8
535+
; I32-NEXT: [[MUL1]] = fmul double [[ADD1]], 0.000000e+00
536+
; I32-NEXT: [[MUL2:%.*]] = fmul double [[LOAD_C]], 0.000000e+00
537+
; I32-NEXT: [[ADD2:%.*]] = fadd double [[MUL2]], 0.000000e+00
538+
; I32-NEXT: [[ADD3:%.*]] = fadd double [[ADD2]], 1.000000e+00
539+
; I32-NEXT: [[LOAD_B:%.*]] = load double, ptr [[GEP_B]], align 8
540+
; I32-NEXT: [[DIV:%.*]] = fdiv double [[LOAD_B]], [[ADD3]]
541+
; I32-NEXT: [[RESULT]] = fsub double [[ACCUM_INNER]], [[DIV]]
542+
; I32-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
543+
; I32-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 1
544+
; I32-NEXT: br i1 [[EXITCOND]], label %[[OUTER_LOOP]], label %[[INNER_LOOP]]
545+
; I32: [[EXIT]]:
546+
; I32-NEXT: ret double [[ACCUM]]
547+
;
548+
entry:
549+
br label %outer.loop
550+
551+
outer.loop:
552+
%accum = phi double [ 0.0, %entry ], [ %result, %inner.loop ]
553+
%cond = call i1 @cond()
554+
br i1 %cond, label %inner.loop, label %exit
555+
556+
inner.loop:
557+
%iv = phi i64 [ 0, %outer.loop ], [ %iv.next, %inner.loop ]
558+
%accum.inner = phi double [ %accum, %outer.loop ], [ %mul1, %inner.loop ]
559+
%idx.plus1 = add i64 %iv, 1
560+
%gep.c = getelementptr i8, ptr %ptr.c, i64 %idx.plus1
561+
%gep.a.i64 = getelementptr i64, ptr %ptr.a, i64 %idx.plus1
562+
%load.idx = load i64, ptr %gep.a.i64, align 8
563+
%gep.b = getelementptr double, ptr %ptr.b, i64 %load.idx
564+
%load.a = load double, ptr %ptr.a, align 8
565+
%add1 = fadd double %load.a, 0.000000e+00
566+
%gep.c.offset = getelementptr i8, ptr %gep.c, i64 8
567+
%load.c = load double, ptr %gep.c.offset, align 8
568+
%mul1 = fmul double %add1, 0.000000e+00
569+
%mul2 = fmul double %load.c, 0.000000e+00
570+
%add2 = fadd double %mul2, 0.000000e+00
571+
%add3 = fadd double %add2, 1.000000e+00
572+
%load.b = load double, ptr %gep.b, align 8
573+
%div = fdiv double %load.b, %add3
574+
%result = fsub double %accum.inner, %div
575+
%iv.next = add i64 %iv, 1
576+
%exitcond = icmp eq i64 %iv, 1
577+
br i1 %exitcond, label %outer.loop, label %inner.loop
578+
579+
exit:
580+
ret double %accum
581+
}
582+
457583
attributes #0 = { "target-cpu"="znver2" }
458584

459585
!0 = distinct !{!0, !1}

0 commit comments

Comments
 (0)