Skip to content

Commit

Permalink
[AMDGPU] Fix regression with vectorization limiting
Browse files Browse the repository at this point in the history
D67148 has removed TTI::getNumberOfRegisters(bool Vector) and
started to call TTI::getNumberOfRegisters(unsigned ClassID) from
the LoopVectorize. This has resulted in an unrestricted vectorization
on AMDGPU blowing up register pressure.

Differential Revision: https://reviews.llvm.org/D122850
  • Loading branch information
rampitec committed Apr 9, 2022
1 parent 833882b commit fced87d
Show file tree
Hide file tree
Showing 4 changed files with 53 additions and 116 deletions.
26 changes: 7 additions & 19 deletions llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
Expand Up @@ -288,33 +288,21 @@ GCNTTIImpl::GCNTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
: BaseT(TM, F.getParent()->getDataLayout()),
ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))),
TLI(ST->getTargetLowering()), CommonTTI(TM, F),
IsGraphics(AMDGPU::isGraphics(F.getCallingConv())),
MaxVGPRs(ST->getMaxNumVGPRs(
std::max(ST->getWavesPerEU(F).first,
ST->getWavesPerEUForWorkGroup(
ST->getFlatWorkGroupSizes(F).second)))) {
IsGraphics(AMDGPU::isGraphics(F.getCallingConv())) {
AMDGPU::SIModeRegisterDefaults Mode(F);
HasFP32Denormals = Mode.allFP32Denormals();
HasFP64FP16Denormals = Mode.allFP64FP16Denormals();
}

unsigned GCNTTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
// The concept of vector registers doesn't really exist. Some packed vector
// operations operate on the normal 32-bit registers.
return MaxVGPRs;
}
unsigned GCNTTIImpl::getNumberOfRegisters(unsigned RCID) const {
// NB: RCID is not an RCID. In fact it is 0 or 1 for scalar or vector
// registers. See getRegisterClassForType for the implementation.
// In this case vector registers are not vector in terms of
// VGPRs, but those which can hold multiple values.

unsigned GCNTTIImpl::getNumberOfRegisters(bool Vec) const {
// This is really the number of registers to fill when vectorizing /
// interleaving loops, so we lie to avoid trying to use all registers.
return getHardwareNumberOfRegisters(Vec) >> 3;
}

unsigned GCNTTIImpl::getNumberOfRegisters(unsigned RCID) const {
const SIRegisterInfo *TRI = ST->getRegisterInfo();
const TargetRegisterClass *RC = TRI->getRegClass(RCID);
unsigned NumVGPRs = (TRI->getRegSizeInBits(*RC) + 31) / 32;
return getHardwareNumberOfRegisters(false) / NumVGPRs;
return 4;
}

TypeSize
Expand Down
3 changes: 0 additions & 3 deletions llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
Expand Up @@ -68,7 +68,6 @@ class GCNTTIImpl final : public BasicTTIImplBase<GCNTTIImpl> {
bool IsGraphics;
bool HasFP32Denormals;
bool HasFP64FP16Denormals;
unsigned MaxVGPRs;

static const FeatureBitset InlineFeatureIgnoreList;

Expand Down Expand Up @@ -113,8 +112,6 @@ class GCNTTIImpl final : public BasicTTIImplBase<GCNTTIImpl> {
return TTI::PSK_FastHardware;
}

unsigned getHardwareNumberOfRegisters(bool Vector) const;
unsigned getNumberOfRegisters(bool Vector) const;
unsigned getNumberOfRegisters(unsigned RCID) const;
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind Vector) const;
unsigned getMinVectorRegisterBitWidth() const;
Expand Down
24 changes: 24 additions & 0 deletions llvm/test/Transforms/LoopVectorize/AMDGPU/packed-fp32.ll
@@ -0,0 +1,24 @@
; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a < %s -loop-vectorize -S | FileCheck -check-prefix=GFX90A %s

; GFX90A-LABEL: @vectorize_v2f32_loop(
; GFX90A-COUNT-2: load <2 x float>
; GFX90A-COUNT-2: fadd fast <2 x float>

define float @vectorize_v2f32_loop(float addrspace(1)* noalias %s) {
entry:
br label %for.body

for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%q.04 = phi float [ 0.0, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds float, float addrspace(1)* %s, i64 %indvars.iv
%load = load float, float addrspace(1)* %arrayidx, align 4
%add = fadd fast float %q.04, %load
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 256
br i1 %exitcond, label %for.end, label %for.body

for.end:
%add.lcssa = phi float [ %add, %for.body ]
ret float %add.lcssa
}
116 changes: 22 additions & 94 deletions llvm/test/Transforms/LoopVectorize/AMDGPU/packed-math.ll
Expand Up @@ -11,65 +11,29 @@ define half @vectorize_v2f16_loop(half addrspace(1)* noalias %s) {
; GFX9-NEXT: br label [[VECTOR_BODY:%.*]]
; GFX9: vector.body:
; GFX9-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; GFX9-NEXT: [[VEC_PHI:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
; GFX9-NEXT: [[VEC_PHI1:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
; GFX9-NEXT: [[VEC_PHI2:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
; GFX9-NEXT: [[VEC_PHI3:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ]
; GFX9-NEXT: [[VEC_PHI4:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP20:%.*]], [[VECTOR_BODY]] ]
; GFX9-NEXT: [[VEC_PHI5:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP21:%.*]], [[VECTOR_BODY]] ]
; GFX9-NEXT: [[VEC_PHI6:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
; GFX9-NEXT: [[VEC_PHI7:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP23:%.*]], [[VECTOR_BODY]] ]
; GFX9-NEXT: [[VEC_PHI:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
; GFX9-NEXT: [[VEC_PHI1:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
; GFX9-NEXT: [[TMP0:%.*]] = getelementptr inbounds half, half addrspace(1)* [[S:%.*]], i64 [[INDEX]]
; GFX9-NEXT: [[TMP1:%.*]] = bitcast half addrspace(1)* [[TMP0]] to <2 x half> addrspace(1)*
; GFX9-NEXT: [[WIDE_LOAD:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP1]], align 2
; GFX9-NEXT: [[TMP2:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 2
; GFX9-NEXT: [[TMP3:%.*]] = bitcast half addrspace(1)* [[TMP2]] to <2 x half> addrspace(1)*
; GFX9-NEXT: [[WIDE_LOAD8:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP3]], align 2
; GFX9-NEXT: [[TMP4:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 4
; GFX9-NEXT: [[TMP5:%.*]] = bitcast half addrspace(1)* [[TMP4]] to <2 x half> addrspace(1)*
; GFX9-NEXT: [[WIDE_LOAD9:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP5]], align 2
; GFX9-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 6
; GFX9-NEXT: [[TMP7:%.*]] = bitcast half addrspace(1)* [[TMP6]] to <2 x half> addrspace(1)*
; GFX9-NEXT: [[WIDE_LOAD10:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP7]], align 2
; GFX9-NEXT: [[TMP8:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 8
; GFX9-NEXT: [[TMP9:%.*]] = bitcast half addrspace(1)* [[TMP8]] to <2 x half> addrspace(1)*
; GFX9-NEXT: [[WIDE_LOAD11:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP9]], align 2
; GFX9-NEXT: [[TMP10:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 10
; GFX9-NEXT: [[TMP11:%.*]] = bitcast half addrspace(1)* [[TMP10]] to <2 x half> addrspace(1)*
; GFX9-NEXT: [[WIDE_LOAD12:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP11]], align 2
; GFX9-NEXT: [[TMP12:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 12
; GFX9-NEXT: [[TMP13:%.*]] = bitcast half addrspace(1)* [[TMP12]] to <2 x half> addrspace(1)*
; GFX9-NEXT: [[WIDE_LOAD13:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP13]], align 2
; GFX9-NEXT: [[TMP14:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 14
; GFX9-NEXT: [[TMP15:%.*]] = bitcast half addrspace(1)* [[TMP14]] to <2 x half> addrspace(1)*
; GFX9-NEXT: [[WIDE_LOAD14:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP15]], align 2
; GFX9-NEXT: [[TMP16]] = fadd fast <2 x half> [[VEC_PHI]], [[WIDE_LOAD]]
; GFX9-NEXT: [[TMP17]] = fadd fast <2 x half> [[VEC_PHI1]], [[WIDE_LOAD8]]
; GFX9-NEXT: [[TMP18]] = fadd fast <2 x half> [[VEC_PHI2]], [[WIDE_LOAD9]]
; GFX9-NEXT: [[TMP19]] = fadd fast <2 x half> [[VEC_PHI3]], [[WIDE_LOAD10]]
; GFX9-NEXT: [[TMP20]] = fadd fast <2 x half> [[VEC_PHI4]], [[WIDE_LOAD11]]
; GFX9-NEXT: [[TMP21]] = fadd fast <2 x half> [[VEC_PHI5]], [[WIDE_LOAD12]]
; GFX9-NEXT: [[TMP22]] = fadd fast <2 x half> [[VEC_PHI6]], [[WIDE_LOAD13]]
; GFX9-NEXT: [[TMP23]] = fadd fast <2 x half> [[VEC_PHI7]], [[WIDE_LOAD14]]
; GFX9-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; GFX9-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; GFX9-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; GFX9-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP3]], align 2
; GFX9-NEXT: [[TMP4]] = fadd fast <2 x half> [[VEC_PHI]], [[WIDE_LOAD]]
; GFX9-NEXT: [[TMP5]] = fadd fast <2 x half> [[VEC_PHI1]], [[WIDE_LOAD2]]
; GFX9-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; GFX9-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; GFX9-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; GFX9: middle.block:
; GFX9-NEXT: [[BIN_RDX:%.*]] = fadd fast <2 x half> [[TMP17]], [[TMP16]]
; GFX9-NEXT: [[BIN_RDX15:%.*]] = fadd fast <2 x half> [[TMP18]], [[BIN_RDX]]
; GFX9-NEXT: [[BIN_RDX16:%.*]] = fadd fast <2 x half> [[TMP19]], [[BIN_RDX15]]
; GFX9-NEXT: [[BIN_RDX17:%.*]] = fadd fast <2 x half> [[TMP20]], [[BIN_RDX16]]
; GFX9-NEXT: [[BIN_RDX18:%.*]] = fadd fast <2 x half> [[TMP21]], [[BIN_RDX17]]
; GFX9-NEXT: [[BIN_RDX19:%.*]] = fadd fast <2 x half> [[TMP22]], [[BIN_RDX18]]
; GFX9-NEXT: [[BIN_RDX20:%.*]] = fadd fast <2 x half> [[TMP23]], [[BIN_RDX19]]
; GFX9-NEXT: [[TMP25:%.*]] = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH8000, <2 x half> [[BIN_RDX20]])
; GFX9-NEXT: [[BIN_RDX:%.*]] = fadd fast <2 x half> [[TMP5]], [[TMP4]]
; GFX9-NEXT: [[TMP7:%.*]] = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH8000, <2 x half> [[BIN_RDX]])
; GFX9-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
; GFX9: scalar.ph:
; GFX9-NEXT: br label [[FOR_BODY:%.*]]
; GFX9: for.body:
; GFX9-NEXT: br i1 undef, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
; GFX9: for.end:
; GFX9-NEXT: [[ADD_LCSSA:%.*]] = phi half [ undef, [[FOR_BODY]] ], [ [[TMP25]], [[MIDDLE_BLOCK]] ]
; GFX9-NEXT: [[ADD_LCSSA:%.*]] = phi half [ undef, [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
; GFX9-NEXT: ret half [[ADD_LCSSA]]
;
; VI-LABEL: @vectorize_v2f16_loop(
Expand All @@ -79,65 +43,29 @@ define half @vectorize_v2f16_loop(half addrspace(1)* noalias %s) {
; VI-NEXT: br label [[VECTOR_BODY:%.*]]
; VI: vector.body:
; VI-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; VI-NEXT: [[VEC_PHI:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
; VI-NEXT: [[VEC_PHI1:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
; VI-NEXT: [[VEC_PHI2:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
; VI-NEXT: [[VEC_PHI3:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ]
; VI-NEXT: [[VEC_PHI4:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP20:%.*]], [[VECTOR_BODY]] ]
; VI-NEXT: [[VEC_PHI5:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP21:%.*]], [[VECTOR_BODY]] ]
; VI-NEXT: [[VEC_PHI6:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
; VI-NEXT: [[VEC_PHI7:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP23:%.*]], [[VECTOR_BODY]] ]
; VI-NEXT: [[VEC_PHI:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
; VI-NEXT: [[VEC_PHI1:%.*]] = phi <2 x half> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
; VI-NEXT: [[TMP0:%.*]] = getelementptr inbounds half, half addrspace(1)* [[S:%.*]], i64 [[INDEX]]
; VI-NEXT: [[TMP1:%.*]] = bitcast half addrspace(1)* [[TMP0]] to <2 x half> addrspace(1)*
; VI-NEXT: [[WIDE_LOAD:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP1]], align 2
; VI-NEXT: [[TMP2:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 2
; VI-NEXT: [[TMP3:%.*]] = bitcast half addrspace(1)* [[TMP2]] to <2 x half> addrspace(1)*
; VI-NEXT: [[WIDE_LOAD8:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP3]], align 2
; VI-NEXT: [[TMP4:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 4
; VI-NEXT: [[TMP5:%.*]] = bitcast half addrspace(1)* [[TMP4]] to <2 x half> addrspace(1)*
; VI-NEXT: [[WIDE_LOAD9:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP5]], align 2
; VI-NEXT: [[TMP6:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 6
; VI-NEXT: [[TMP7:%.*]] = bitcast half addrspace(1)* [[TMP6]] to <2 x half> addrspace(1)*
; VI-NEXT: [[WIDE_LOAD10:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP7]], align 2
; VI-NEXT: [[TMP8:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 8
; VI-NEXT: [[TMP9:%.*]] = bitcast half addrspace(1)* [[TMP8]] to <2 x half> addrspace(1)*
; VI-NEXT: [[WIDE_LOAD11:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP9]], align 2
; VI-NEXT: [[TMP10:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 10
; VI-NEXT: [[TMP11:%.*]] = bitcast half addrspace(1)* [[TMP10]] to <2 x half> addrspace(1)*
; VI-NEXT: [[WIDE_LOAD12:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP11]], align 2
; VI-NEXT: [[TMP12:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 12
; VI-NEXT: [[TMP13:%.*]] = bitcast half addrspace(1)* [[TMP12]] to <2 x half> addrspace(1)*
; VI-NEXT: [[WIDE_LOAD13:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP13]], align 2
; VI-NEXT: [[TMP14:%.*]] = getelementptr inbounds half, half addrspace(1)* [[TMP0]], i64 14
; VI-NEXT: [[TMP15:%.*]] = bitcast half addrspace(1)* [[TMP14]] to <2 x half> addrspace(1)*
; VI-NEXT: [[WIDE_LOAD14:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP15]], align 2
; VI-NEXT: [[TMP16]] = fadd fast <2 x half> [[VEC_PHI]], [[WIDE_LOAD]]
; VI-NEXT: [[TMP17]] = fadd fast <2 x half> [[VEC_PHI1]], [[WIDE_LOAD8]]
; VI-NEXT: [[TMP18]] = fadd fast <2 x half> [[VEC_PHI2]], [[WIDE_LOAD9]]
; VI-NEXT: [[TMP19]] = fadd fast <2 x half> [[VEC_PHI3]], [[WIDE_LOAD10]]
; VI-NEXT: [[TMP20]] = fadd fast <2 x half> [[VEC_PHI4]], [[WIDE_LOAD11]]
; VI-NEXT: [[TMP21]] = fadd fast <2 x half> [[VEC_PHI5]], [[WIDE_LOAD12]]
; VI-NEXT: [[TMP22]] = fadd fast <2 x half> [[VEC_PHI6]], [[WIDE_LOAD13]]
; VI-NEXT: [[TMP23]] = fadd fast <2 x half> [[VEC_PHI7]], [[WIDE_LOAD14]]
; VI-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; VI-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; VI-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; VI-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x half>, <2 x half> addrspace(1)* [[TMP3]], align 2
; VI-NEXT: [[TMP4]] = fadd fast <2 x half> [[VEC_PHI]], [[WIDE_LOAD]]
; VI-NEXT: [[TMP5]] = fadd fast <2 x half> [[VEC_PHI1]], [[WIDE_LOAD2]]
; VI-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; VI-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; VI-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; VI: middle.block:
; VI-NEXT: [[BIN_RDX:%.*]] = fadd fast <2 x half> [[TMP17]], [[TMP16]]
; VI-NEXT: [[BIN_RDX15:%.*]] = fadd fast <2 x half> [[TMP18]], [[BIN_RDX]]
; VI-NEXT: [[BIN_RDX16:%.*]] = fadd fast <2 x half> [[TMP19]], [[BIN_RDX15]]
; VI-NEXT: [[BIN_RDX17:%.*]] = fadd fast <2 x half> [[TMP20]], [[BIN_RDX16]]
; VI-NEXT: [[BIN_RDX18:%.*]] = fadd fast <2 x half> [[TMP21]], [[BIN_RDX17]]
; VI-NEXT: [[BIN_RDX19:%.*]] = fadd fast <2 x half> [[TMP22]], [[BIN_RDX18]]
; VI-NEXT: [[BIN_RDX20:%.*]] = fadd fast <2 x half> [[TMP23]], [[BIN_RDX19]]
; VI-NEXT: [[TMP25:%.*]] = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH8000, <2 x half> [[BIN_RDX20]])
; VI-NEXT: [[BIN_RDX:%.*]] = fadd fast <2 x half> [[TMP5]], [[TMP4]]
; VI-NEXT: [[TMP7:%.*]] = call fast half @llvm.vector.reduce.fadd.v2f16(half 0xH8000, <2 x half> [[BIN_RDX]])
; VI-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
; VI: scalar.ph:
; VI-NEXT: br label [[FOR_BODY:%.*]]
; VI: for.body:
; VI-NEXT: br i1 undef, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
; VI: for.end:
; VI-NEXT: [[ADD_LCSSA:%.*]] = phi half [ undef, [[FOR_BODY]] ], [ [[TMP25]], [[MIDDLE_BLOCK]] ]
; VI-NEXT: [[ADD_LCSSA:%.*]] = phi half [ undef, [[FOR_BODY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ]
; VI-NEXT: ret half [[ADD_LCSSA]]
;
; CI-LABEL: @vectorize_v2f16_loop(
Expand Down

0 comments on commit fced87d

Please sign in to comment.