Skip to content

Commit

Permalink
[SVE][CodeGen] Lower scalable floating-point vector reductions
Browse files Browse the repository at this point in the history
Changes in this patch:
-  Minor changes to the LowerVECREDUCE_SEQ_FADD function added by @cameron.mcinally
   to also work for scalable types
- Added TableGen patterns for FP reductions with unpacked types (nxv2f16, nxv4f16 & nxv2f32)
- Asserts added to expandFMINNUM_FMAXNUM & expandVecReduceSeq for scalable types

Reviewed By: cameron.mcinally

Differential Revision: https://reviews.llvm.org/D93050
  • Loading branch information
kmclaughlin-arm committed Dec 14, 2020
1 parent 1de3e7f commit c5ced82
Show file tree
Hide file tree
Showing 5 changed files with 350 additions and 6 deletions.
10 changes: 10 additions & 0 deletions llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
Expand Up @@ -6593,6 +6593,11 @@ SDValue TargetLowering::expandFMINNUM_FMAXNUM(SDNode *Node,
unsigned NewOp = Node->getOpcode() == ISD::FMINNUM ?
ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE;
EVT VT = Node->getValueType(0);

if (VT.isScalableVector())
report_fatal_error(
"Expanding fminnum/fmaxnum for scalable vectors is undefined.");

if (isOperationLegalOrCustom(NewOp, VT)) {
SDValue Quiet0 = Node->getOperand(0);
SDValue Quiet1 = Node->getOperand(1);
Expand Down Expand Up @@ -8142,6 +8147,11 @@ SDValue TargetLowering::expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) cons

EVT VT = VecOp.getValueType();
EVT EltVT = VT.getVectorElementType();

if (VT.isScalableVector())
report_fatal_error(
"Expanding reductions for scalable vectors is undefined.");

unsigned NumElts = VT.getVectorNumElements();

SmallVector<SDValue, 8> Ops;
Expand Down
19 changes: 13 additions & 6 deletions llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Expand Up @@ -1147,6 +1147,10 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FABS, VT, Custom);
setOperationAction(ISD::FP_EXTEND, VT, Custom);
setOperationAction(ISD::FP_ROUND, VT, Custom);
setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
}

for (auto VT : {MVT::nxv2bf16, MVT::nxv4bf16, MVT::nxv8bf16})
Expand Down Expand Up @@ -16765,18 +16769,18 @@ SDValue AArch64TargetLowering::LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp,
EVT SrcVT = VecOp.getValueType();
EVT ResVT = SrcVT.getVectorElementType();

// Only fixed length FADDA handled for now.
if (!useSVEForFixedLengthVectorVT(SrcVT, /*OverrideNEON=*/true))
return SDValue();
EVT ContainerVT = SrcVT;
if (SrcVT.isFixedLengthVector()) {
ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT);
VecOp = convertToScalableVector(DAG, ContainerVT, VecOp);
}

SDValue Pg = getPredicateForVector(DAG, DL, SrcVT);
EVT ContainerVT = getContainerForFixedLengthVector(DAG, SrcVT);
SDValue Zero = DAG.getConstant(0, DL, MVT::i64);

// Convert operands to Scalable.
AccOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ContainerVT,
DAG.getUNDEF(ContainerVT), AccOp, Zero);
VecOp = convertToScalableVector(DAG, ContainerVT, VecOp);

// Perform reduction.
SDValue Rdx = DAG.getNode(AArch64ISD::FADDA_PRED, DL, ContainerVT,
Expand Down Expand Up @@ -16833,9 +16837,12 @@ SDValue AArch64TargetLowering::LowerReductionToSVE(unsigned Opcode,
// UADDV always returns an i64 result.
EVT ResVT = (Opcode == AArch64ISD::UADDV_PRED) ? MVT::i64 :
SrcVT.getVectorElementType();
EVT RdxVT = SrcVT;
if (SrcVT.isFixedLengthVector() || Opcode == AArch64ISD::UADDV_PRED)
RdxVT = getPackedSVEVectorVT(ResVT);

SDValue Pg = getPredicateForVector(DAG, DL, SrcVT);
SDValue Rdx = DAG.getNode(Opcode, DL, getPackedSVEVectorVT(ResVT), Pg, VecOp);
SDValue Rdx = DAG.getNode(Opcode, DL, RdxVT, Pg, VecOp);
SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT,
Rdx, DAG.getConstant(0, DL, MVT::i64));

Expand Down
6 changes: 6 additions & 0 deletions llvm/lib/Target/AArch64/SVEInstrFormats.td
Expand Up @@ -4551,7 +4551,10 @@ multiclass sve_fp_fast_red<bits<3> opc, string asm, SDPatternOperator op> {
def _S : sve_fp_fast_red<0b10, opc, asm, ZPR32, FPR32asZPR>;
def _D : sve_fp_fast_red<0b11, opc, asm, ZPR64, FPR64asZPR>;

def : SVE_2_Op_Pat<nxv2f16, op, nxv2i1, nxv2f16, !cast<Instruction>(NAME # _H)>;
def : SVE_2_Op_Pat<nxv4f16, op, nxv4i1, nxv4f16, !cast<Instruction>(NAME # _H)>;
def : SVE_2_Op_Pat<nxv8f16, op, nxv8i1, nxv8f16, !cast<Instruction>(NAME # _H)>;
def : SVE_2_Op_Pat<nxv2f32, op, nxv2i1, nxv2f32, !cast<Instruction>(NAME # _S)>;
def : SVE_2_Op_Pat<nxv4f32, op, nxv4i1, nxv4f32, !cast<Instruction>(NAME # _S)>;
def : SVE_2_Op_Pat<nxv2f64, op, nxv2i1, nxv2f64, !cast<Instruction>(NAME # _D)>;
}
Expand Down Expand Up @@ -4587,7 +4590,10 @@ multiclass sve_fp_2op_p_vd<bits<3> opc, string asm, SDPatternOperator op> {
def _S : sve_fp_2op_p_vd<0b10, opc, asm, ZPR32, FPR32asZPR>;
def _D : sve_fp_2op_p_vd<0b11, opc, asm, ZPR64, FPR64asZPR>;

def : SVE_3_Op_Pat<nxv2f16, op, nxv2i1, nxv2f16, nxv2f16, !cast<Instruction>(NAME # _H)>;
def : SVE_3_Op_Pat<nxv4f16, op, nxv4i1, nxv4f16, nxv4f16, !cast<Instruction>(NAME # _H)>;
def : SVE_3_Op_Pat<nxv8f16, op, nxv8i1, nxv8f16, nxv8f16, !cast<Instruction>(NAME # _H)>;
def : SVE_3_Op_Pat<nxv2f32, op, nxv2i1, nxv2f32, nxv2f32, !cast<Instruction>(NAME # _S)>;
def : SVE_3_Op_Pat<nxv4f32, op, nxv4i1, nxv4f32, nxv4f32, !cast<Instruction>(NAME # _S)>;
def : SVE_3_Op_Pat<nxv2f64, op, nxv2i1, nxv2f64, nxv2f64, !cast<Instruction>(NAME # _D)>;
}
Expand Down
256 changes: 256 additions & 0 deletions llvm/test/CodeGen/AArch64/sve-fp-reduce.ll
@@ -0,0 +1,256 @@
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -asm-verbose=0 < %s 2>%t | FileCheck %s
; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t

; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it.
; WARN-NOT: warning

; FADD

define half @fadda_nxv2f16(half %init, <vscale x 2 x half> %a) {
; CHECK-LABEL: fadda_nxv2f16:
; CHECK: ptrue p0.d
; CHECK-NEXT: fadda h0, p0, h0, z1.h
; CHECK-NEXT: ret
%res = call half @llvm.vector.reduce.fadd.nxv2f16(half %init, <vscale x 2 x half> %a)
ret half %res
}

define half @fadda_nxv4f16(half %init, <vscale x 4 x half> %a) {
; CHECK-LABEL: fadda_nxv4f16:
; CHECK: ptrue p0.s
; CHECK-NEXT: fadda h0, p0, h0, z1.h
; CHECK-NEXT: ret
%res = call half @llvm.vector.reduce.fadd.nxv4f16(half %init, <vscale x 4 x half> %a)
ret half %res
}

define half @fadda_nxv8f16(half %init, <vscale x 8 x half> %a) {
; CHECK-LABEL: fadda_nxv8f16:
; CHECK: ptrue p0.h
; CHECK-NEXT: fadda h0, p0, h0, z1.h
; CHECK-NEXT: ret
%res = call half @llvm.vector.reduce.fadd.nxv8f16(half %init, <vscale x 8 x half> %a)
ret half %res
}

define float @fadda_nxv2f32(float %init, <vscale x 2 x float> %a) {
; CHECK-LABEL: fadda_nxv2f32:
; CHECK: ptrue p0.d
; CHECK-NEXT: fadda s0, p0, s0, z1.s
; CHECK-NEXT: ret
%res = call float @llvm.vector.reduce.fadd.nxv2f32(float %init, <vscale x 2 x float> %a)
ret float %res
}

define float @fadda_nxv4f32(float %init, <vscale x 4 x float> %a) {
; CHECK-LABEL: fadda_nxv4f32:
; CHECK: ptrue p0.s
; CHECK-NEXT: fadda s0, p0, s0, z1.s
; CHECK-NEXT: ret
%res = call float @llvm.vector.reduce.fadd.nxv4f32(float %init, <vscale x 4 x float> %a)
ret float %res
}

define double @fadda_nxv2f64(double %init, <vscale x 2 x double> %a) {
; CHECK-LABEL: fadda_nxv2f64:
; CHECK: ptrue p0.d
; CHECK-NEXT: fadda d0, p0, d0, z1.d
; CHECK-NEXT: ret
%res = call double @llvm.vector.reduce.fadd.nxv2f64(double %init, <vscale x 2 x double> %a)
ret double %res
}

; FADDV

define half @faddv_nxv2f16(half %init, <vscale x 2 x half> %a) {
; CHECK-LABEL: faddv_nxv2f16:
; CHECK: ptrue p0.d
; CHECK-NEXT: faddv h1, p0, z1.h
; CHECK-NEXT: fadd h0, h0, h1
; CHECK-NEXT: ret
%res = call fast half @llvm.vector.reduce.fadd.nxv2f16(half %init, <vscale x 2 x half> %a)
ret half %res
}

define half @faddv_nxv4f16(half %init, <vscale x 4 x half> %a) {
; CHECK-LABEL: faddv_nxv4f16:
; CHECK: ptrue p0.s
; CHECK-NEXT: faddv h1, p0, z1.h
; CHECK-NEXT: fadd h0, h0, h1
; CHECK-NEXT: ret
%res = call fast half @llvm.vector.reduce.fadd.nxv4f16(half %init, <vscale x 4 x half> %a)
ret half %res
}

define half @faddv_nxv8f16(half %init, <vscale x 8 x half> %a) {
; CHECK-LABEL: faddv_nxv8f16:
; CHECK: ptrue p0.h
; CHECK-NEXT: faddv h1, p0, z1.h
; CHECK-NEXT: fadd h0, h0, h1
; CHECK-NEXT: ret
%res = call fast half @llvm.vector.reduce.fadd.nxv8f16(half %init, <vscale x 8 x half> %a)
ret half %res
}

define float @faddv_nxv2f32(float %init, <vscale x 2 x float> %a) {
; CHECK-LABEL: faddv_nxv2f32:
; CHECK: ptrue p0.d
; CHECK-NEXT: faddv s1, p0, z1.s
; CHECK-NEXT: fadd s0, s0, s1
; CHECK-NEXT: ret
%res = call fast float @llvm.vector.reduce.fadd.nxv2f32(float %init, <vscale x 2 x float> %a)
ret float %res
}

define float @faddv_nxv4f32(float %init, <vscale x 4 x float> %a) {
; CHECK-LABEL: faddv_nxv4f32:
; CHECK: ptrue p0.s
; CHECK-NEXT: faddv s1, p0, z1.s
; CHECK-NEXT: fadd s0, s0, s1
; CHECK-NEXT: ret
%res = call fast float @llvm.vector.reduce.fadd.nxv4f32(float %init, <vscale x 4 x float> %a)
ret float %res
}

define double @faddv_nxv2f64(double %init, <vscale x 2 x double> %a) {
; CHECK-LABEL: faddv_nxv2f64:
; CHECK: ptrue p0.d
; CHECK-NEXT: faddv d1, p0, z1.d
; CHECK-NEXT: fadd d0, d0, d1
; CHECK-NEXT: ret
%res = call fast double @llvm.vector.reduce.fadd.nxv2f64(double %init, <vscale x 2 x double> %a)
ret double %res
}

; FMAXV

define half @fmaxv_nxv2f16(<vscale x 2 x half> %a) {
; CHECK-LABEL: fmaxv_nxv2f16:
; CHECK: ptrue p0.d
; CHECK-NEXT: fmaxnmv h0, p0, z0.h
; CHECK-NEXT: ret
%res = call half @llvm.vector.reduce.fmax.nxv2f16(<vscale x 2 x half> %a)
ret half %res
}

define half @fmaxv_nxv4f16(<vscale x 4 x half> %a) {
; CHECK-LABEL: fmaxv_nxv4f16:
; CHECK: ptrue p0.s
; CHECK-NEXT: fmaxnmv h0, p0, z0.h
; CHECK-NEXT: ret
%res = call half @llvm.vector.reduce.fmax.nxv4f16(<vscale x 4 x half> %a)
ret half %res
}

define half @fmaxv_nxv8f16(<vscale x 8 x half> %a) {
; CHECK-LABEL: fmaxv_nxv8f16:
; CHECK: ptrue p0.h
; CHECK-NEXT: fmaxnmv h0, p0, z0.h
; CHECK-NEXT: ret
%res = call half @llvm.vector.reduce.fmax.nxv8f16(<vscale x 8 x half> %a)
ret half %res
}

define float @fmaxv_nxv2f32(<vscale x 2 x float> %a) {
; CHECK-LABEL: fmaxv_nxv2f32:
; CHECK: ptrue p0.d
; CHECK-NEXT: fmaxnmv s0, p0, z0.s
; CHECK-NEXT: ret
%res = call float @llvm.vector.reduce.fmax.nxv2f32(<vscale x 2 x float> %a)
ret float %res
}

define float @fmaxv_nxv4f32(<vscale x 4 x float> %a) {
; CHECK-LABEL: fmaxv_nxv4f32:
; CHECK: ptrue p0.s
; CHECK-NEXT: fmaxnmv s0, p0, z0.s
; CHECK-NEXT: ret
%res = call float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float> %a)
ret float %res
}

define double @fmaxv_nxv2f64(<vscale x 2 x double> %a) {
; CHECK-LABEL: fmaxv_nxv2f64:
; CHECK: ptrue p0.d
; CHECK-NEXT: fmaxnmv d0, p0, z0.d
; CHECK-NEXT: ret
%res = call double @llvm.vector.reduce.fmax.nxv2f64(<vscale x 2 x double> %a)
ret double %res
}

; FMINV

define half @fminv_nxv2f16(<vscale x 2 x half> %a) {
; CHECK-LABEL: fminv_nxv2f16:
; CHECK: ptrue p0.d
; CHECK-NEXT: fminnmv h0, p0, z0.h
; CHECK-NEXT: ret
%res = call half @llvm.vector.reduce.fmin.nxv2f16(<vscale x 2 x half> %a)
ret half %res
}

define half @fminv_nxv4f16(<vscale x 4 x half> %a) {
; CHECK-LABEL: fminv_nxv4f16:
; CHECK: ptrue p0.s
; CHECK-NEXT: fminnmv h0, p0, z0.h
; CHECK-NEXT: ret
%res = call half @llvm.vector.reduce.fmin.nxv4f16(<vscale x 4 x half> %a)
ret half %res
}

define half @fminv_nxv8f16(<vscale x 8 x half> %a) {
; CHECK-LABEL: fminv_nxv8f16:
; CHECK: ptrue p0.h
; CHECK-NEXT: fminnmv h0, p0, z0.h
; CHECK-NEXT: ret
%res = call half @llvm.vector.reduce.fmin.nxv8f16(<vscale x 8 x half> %a)
ret half %res
}

define float @fminv_nxv2f32(<vscale x 2 x float> %a) {
; CHECK-LABEL: fminv_nxv2f32:
; CHECK: ptrue p0.d
; CHECK-NEXT: fminnmv s0, p0, z0.s
; CHECK-NEXT: ret
%res = call float @llvm.vector.reduce.fmin.nxv2f32(<vscale x 2 x float> %a)
ret float %res
}

define float @fminv_nxv4f32(<vscale x 4 x float> %a) {
; CHECK-LABEL: fminv_nxv4f32:
; CHECK: ptrue p0.s
; CHECK-NEXT: fminnmv s0, p0, z0.s
; CHECK-NEXT: ret
%res = call float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float> %a)
ret float %res
}

define double @fminv_nxv2f64(<vscale x 2 x double> %a) {
; CHECK-LABEL: fminv_nxv2f64:
; CHECK: ptrue p0.d
; CHECK-NEXT: fminnmv d0, p0, z0.d
; CHECK-NEXT: ret
%res = call double @llvm.vector.reduce.fmin.nxv2f64(<vscale x 2 x double> %a)
ret double %res
}

declare half @llvm.vector.reduce.fadd.nxv2f16(half, <vscale x 2 x half>)
declare half @llvm.vector.reduce.fadd.nxv4f16(half, <vscale x 4 x half>)
declare half @llvm.vector.reduce.fadd.nxv8f16(half, <vscale x 8 x half>)
declare float @llvm.vector.reduce.fadd.nxv2f32(float, <vscale x 2 x float>)
declare float @llvm.vector.reduce.fadd.nxv4f32(float, <vscale x 4 x float>)
declare double @llvm.vector.reduce.fadd.nxv2f64(double, <vscale x 2 x double>)

declare half @llvm.vector.reduce.fmax.nxv2f16(<vscale x 2 x half>)
declare half @llvm.vector.reduce.fmax.nxv4f16(<vscale x 4 x half>)
declare half @llvm.vector.reduce.fmax.nxv8f16(<vscale x 8 x half>)
declare float @llvm.vector.reduce.fmax.nxv2f32(<vscale x 2 x float>)
declare float @llvm.vector.reduce.fmax.nxv4f32(<vscale x 4 x float>)
declare double @llvm.vector.reduce.fmax.nxv2f64(<vscale x 2 x double>)

declare half @llvm.vector.reduce.fmin.nxv2f16(<vscale x 2 x half>)
declare half @llvm.vector.reduce.fmin.nxv4f16(<vscale x 4 x half>)
declare half @llvm.vector.reduce.fmin.nxv8f16(<vscale x 8 x half>)
declare float @llvm.vector.reduce.fmin.nxv2f32(<vscale x 2 x float>)
declare float @llvm.vector.reduce.fmin.nxv4f32(<vscale x 4 x float>)
declare double @llvm.vector.reduce.fmin.nxv2f64(<vscale x 2 x double>)

0 comments on commit c5ced82

Please sign in to comment.