Skip to content

Commit

Permalink
[SVE][Codegen] Lower legal min & max operations
Browse files Browse the repository at this point in the history
Summary:
This patch adds AArch64ISD nodes for [S|U]MIN_PRED
and [S|U]MAX_PRED, and lowers both SVE intrinsics and
IR operations for min and max to these nodes.

There are two forms of these instructions for SVE: a predicated
form and an immediate (unpredicated) form. The patterns
which existed for the latter have been updated to match a
predicated node with an immediate and map this
to the immediate instruction.

Reviewers: sdesmalen, efriedma, dancgr, rengolin

Reviewed By: efriedma

Subscribers: huihuiz, tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, cfe-commits, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D79087
  • Loading branch information
kmclaughlin-arm committed May 4, 2020
1 parent e737847 commit 19f5da9
Show file tree
Hide file tree
Showing 7 changed files with 484 additions and 30 deletions.
2 changes: 1 addition & 1 deletion llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
Expand Up @@ -3092,7 +3092,7 @@ bool AArch64DAGToDAGISel::SelectSVESignedArithImm(SDValue N, SDValue &Imm) {
if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
int64_t ImmVal = CNode->getSExtValue();
SDLoc DL(N);
if (ImmVal >= -127 && ImmVal < 127) {
if (ImmVal >= -128 && ImmVal < 128) {
Imm = CurDAG->getTargetConstant(ImmVal, DL, MVT::i32);
return true;
}
Expand Down
40 changes: 32 additions & 8 deletions llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
Expand Up @@ -188,10 +188,6 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::UADDSAT, VT, Legal);
setOperationAction(ISD::SSUBSAT, VT, Legal);
setOperationAction(ISD::USUBSAT, VT, Legal);
setOperationAction(ISD::SMAX, VT, Legal);
setOperationAction(ISD::UMAX, VT, Legal);
setOperationAction(ISD::SMIN, VT, Legal);
setOperationAction(ISD::UMIN, VT, Legal);
}

for (auto VT :
Expand Down Expand Up @@ -887,6 +883,10 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
setOperationAction(ISD::SDIV, VT, Custom);
setOperationAction(ISD::UDIV, VT, Custom);
setOperationAction(ISD::SMIN, VT, Custom);
setOperationAction(ISD::UMIN, VT, Custom);
setOperationAction(ISD::SMAX, VT, Custom);
setOperationAction(ISD::UMAX, VT, Custom);
}
}
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
Expand Down Expand Up @@ -1285,6 +1285,10 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
case AArch64ISD::TLSDESC_CALLSEQ: return "AArch64ISD::TLSDESC_CALLSEQ";
case AArch64ISD::SDIV_PRED: return "AArch64ISD::SDIV_PRED";
case AArch64ISD::UDIV_PRED: return "AArch64ISD::UDIV_PRED";
case AArch64ISD::SMIN_PRED: return "AArch64ISD::SMIN_PRED";
case AArch64ISD::UMIN_PRED: return "AArch64ISD::UMIN_PRED";
case AArch64ISD::SMAX_PRED: return "AArch64ISD::SMAX_PRED";
case AArch64ISD::UMAX_PRED: return "AArch64ISD::UMAX_PRED";
case AArch64ISD::ADC: return "AArch64ISD::ADC";
case AArch64ISD::SBC: return "AArch64ISD::SBC";
case AArch64ISD::ADDS: return "AArch64ISD::ADDS";
Expand Down Expand Up @@ -3354,9 +3358,17 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
case ISD::EXTRACT_SUBVECTOR:
return LowerEXTRACT_SUBVECTOR(Op, DAG);
case ISD::SDIV:
return LowerDIV(Op, DAG, AArch64ISD::SDIV_PRED);
return LowerToPredicatedOp(Op, DAG, AArch64ISD::SDIV_PRED);
case ISD::UDIV:
return LowerDIV(Op, DAG, AArch64ISD::UDIV_PRED);
return LowerToPredicatedOp(Op, DAG, AArch64ISD::UDIV_PRED);
case ISD::SMIN:
return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMIN_PRED);
case ISD::UMIN:
return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMIN_PRED);
case ISD::SMAX:
return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMAX_PRED);
case ISD::UMAX:
return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMAX_PRED);
case ISD::SRA:
case ISD::SRL:
case ISD::SHL:
Expand Down Expand Up @@ -7663,7 +7675,7 @@ SDValue AArch64TargetLowering::LowerDUPQLane(SDValue Op,
return DAG.getNode(ISD::BITCAST, DL, VT, TBL);
}

SDValue AArch64TargetLowering::LowerDIV(SDValue Op,
SDValue AArch64TargetLowering::LowerToPredicatedOp(SDValue Op,
SelectionDAG &DAG,
unsigned NewOp) const {
EVT VT = Op.getValueType();
Expand Down Expand Up @@ -11435,7 +11447,19 @@ static SDValue performIntrinsicCombine(SDNode *N,
return DAG.getNode(AArch64ISD::SDIV_PRED, SDLoc(N), N->getValueType(0),
N->getOperand(1), N->getOperand(2), N->getOperand(3));
case Intrinsic::aarch64_sve_udiv:
return DAG.getNode(AArch64ISD::UDIV_PRED, SDLoc(N), N->getValueType(0),
return DAG.getNode(AArch64ISD::UDIV_PRED, SDLoc(N), N->getValueType(0),
N->getOperand(1), N->getOperand(2), N->getOperand(3));
case Intrinsic::aarch64_sve_smin:
return DAG.getNode(AArch64ISD::SMIN_PRED, SDLoc(N), N->getValueType(0),
N->getOperand(1), N->getOperand(2), N->getOperand(3));
case Intrinsic::aarch64_sve_umin:
return DAG.getNode(AArch64ISD::UMIN_PRED, SDLoc(N), N->getValueType(0),
N->getOperand(1), N->getOperand(2), N->getOperand(3));
case Intrinsic::aarch64_sve_smax:
return DAG.getNode(AArch64ISD::SMAX_PRED, SDLoc(N), N->getValueType(0),
N->getOperand(1), N->getOperand(2), N->getOperand(3));
case Intrinsic::aarch64_sve_umax:
return DAG.getNode(AArch64ISD::UMAX_PRED, SDLoc(N), N->getValueType(0),
N->getOperand(1), N->getOperand(2), N->getOperand(3));
case Intrinsic::aarch64_sve_fadda:
return combineSVEReductionOrderedFP(N, AArch64ISD::FADDA_PRED, DAG);
Expand Down
8 changes: 6 additions & 2 deletions llvm/lib/Target/AArch64/AArch64ISelLowering.h
Expand Up @@ -55,6 +55,10 @@ enum NodeType : unsigned {
// Arithmetic instructions
SDIV_PRED,
UDIV_PRED,
SMIN_PRED,
UMIN_PRED,
SMAX_PRED,
UMAX_PRED,

// Arithmetic instructions which write flags.
ADDS,
Expand Down Expand Up @@ -793,8 +797,8 @@ class AArch64TargetLowering : public TargetLowering {
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerDUPQLane(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerDIV(SDValue Op, SelectionDAG &DAG,
unsigned NewOp) const;
SDValue LowerToPredicatedOp(SDValue Op, SelectionDAG &DAG,
unsigned NewOp) const;
SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
Expand Down
26 changes: 15 additions & 11 deletions llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
Expand Up @@ -149,13 +149,17 @@ def AArch64andv_pred : SDNode<"AArch64ISD::ANDV_PRED", SDT_AArch64Reduce>;
def AArch64lasta : SDNode<"AArch64ISD::LASTA", SDT_AArch64Reduce>;
def AArch64lastb : SDNode<"AArch64ISD::LASTB", SDT_AArch64Reduce>;

def SDT_AArch64DIV : SDTypeProfile<1, 3, [
def SDT_AArch64Arith : SDTypeProfile<1, 3, [
SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisVec<3>,
SDTCVecEltisVT<1,i1>, SDTCisSameAs<2,3>
]>;

def AArch64sdiv_pred : SDNode<"AArch64ISD::SDIV_PRED", SDT_AArch64DIV>;
def AArch64udiv_pred : SDNode<"AArch64ISD::UDIV_PRED", SDT_AArch64DIV>;
def AArch64sdiv_pred : SDNode<"AArch64ISD::SDIV_PRED", SDT_AArch64Arith>;
def AArch64udiv_pred : SDNode<"AArch64ISD::UDIV_PRED", SDT_AArch64Arith>;
def AArch64smin_pred : SDNode<"AArch64ISD::SMIN_PRED", SDT_AArch64Arith>;
def AArch64umin_pred : SDNode<"AArch64ISD::UMIN_PRED", SDT_AArch64Arith>;
def AArch64smax_pred : SDNode<"AArch64ISD::SMAX_PRED", SDT_AArch64Arith>;
def AArch64umax_pred : SDNode<"AArch64ISD::UMAX_PRED", SDT_AArch64Arith>;

def SDT_AArch64ReduceWithInit : SDTypeProfile<1, 3, [SDTCisVec<1>, SDTCisVec<3>]>;
def AArch64clasta_n : SDNode<"AArch64ISD::CLASTA_N", SDT_AArch64ReduceWithInit>;
Expand Down Expand Up @@ -232,10 +236,10 @@ let Predicates = [HasSVE] in {
defm EOR_ZI : sve_int_log_imm<0b01, "eor", "eon", xor>;
defm AND_ZI : sve_int_log_imm<0b10, "and", "bic", and>;

defm SMAX_ZI : sve_int_arith_imm1<0b00, "smax", smax>;
defm SMIN_ZI : sve_int_arith_imm1<0b10, "smin", smin>;
defm UMAX_ZI : sve_int_arith_imm1_unsigned<0b01, "umax", umax>;
defm UMIN_ZI : sve_int_arith_imm1_unsigned<0b11, "umin", umin>;
defm SMAX_ZI : sve_int_arith_imm1<0b00, "smax", AArch64smax_pred>;
defm SMIN_ZI : sve_int_arith_imm1<0b10, "smin", AArch64smin_pred>;
defm UMAX_ZI : sve_int_arith_imm1_unsigned<0b01, "umax", AArch64umax_pred>;
defm UMIN_ZI : sve_int_arith_imm1_unsigned<0b11, "umin", AArch64umin_pred>;

defm MUL_ZI : sve_int_arith_imm2<"mul", mul>;
defm MUL_ZPmZ : sve_int_bin_pred_arit_2<0b000, "mul", int_aarch64_sve_mul>;
Expand Down Expand Up @@ -280,10 +284,10 @@ let Predicates = [HasSVE] in {
defm FABS_ZPmZ : sve_int_un_pred_arit_1_fp<0b100, "fabs", int_aarch64_sve_fabs>;
defm FNEG_ZPmZ : sve_int_un_pred_arit_1_fp<0b101, "fneg", int_aarch64_sve_fneg>;

defm SMAX_ZPmZ : sve_int_bin_pred_arit_1<0b000, "smax", int_aarch64_sve_smax>;
defm UMAX_ZPmZ : sve_int_bin_pred_arit_1<0b001, "umax", int_aarch64_sve_umax>;
defm SMIN_ZPmZ : sve_int_bin_pred_arit_1<0b010, "smin", int_aarch64_sve_smin>;
defm UMIN_ZPmZ : sve_int_bin_pred_arit_1<0b011, "umin", int_aarch64_sve_umin>;
defm SMAX_ZPmZ : sve_int_bin_pred_arit_1<0b000, "smax", AArch64smax_pred>;
defm UMAX_ZPmZ : sve_int_bin_pred_arit_1<0b001, "umax", AArch64umax_pred>;
defm SMIN_ZPmZ : sve_int_bin_pred_arit_1<0b010, "smin", AArch64smin_pred>;
defm UMIN_ZPmZ : sve_int_bin_pred_arit_1<0b011, "umin", AArch64umin_pred>;
defm SABD_ZPmZ : sve_int_bin_pred_arit_1<0b100, "sabd", int_aarch64_sve_sabd>;
defm UABD_ZPmZ : sve_int_bin_pred_arit_1<0b101, "uabd", int_aarch64_sve_uabd>;

Expand Down
21 changes: 13 additions & 8 deletions llvm/lib/Target/AArch64/SVEInstrFormats.td
Expand Up @@ -324,6 +324,11 @@ class SVE_1_Op_Imm_Arith_Pat<ValueType vt, SDPatternOperator op, ZPRRegOp zprty,
: Pat<(vt (op (vt zprty:$Op1), (vt (AArch64dup (it (cpx i32:$imm)))))),
(inst $Op1, i32:$imm)>;

class SVE_1_Op_Imm_Arith_Pred_Pat<ValueType vt, ValueType pt, SDPatternOperator op,
ZPRRegOp zprty, ValueType it, ComplexPattern cpx, Instruction inst>
: Pat<(vt (op (pt (AArch64ptrue 31)), (vt zprty:$Op1), (vt (AArch64dup (it (cpx i32:$imm)))))),
(inst $Op1, i32:$imm)>;

class SVE_1_Op_Imm_Log_Pat<ValueType vt, SDPatternOperator op, ZPRRegOp zprty,
ValueType it, ComplexPattern cpx, Instruction inst>
: Pat<(vt (op (vt zprty:$Op1), (vt (AArch64dup (it (cpx i64:$imm)))))),
Expand Down Expand Up @@ -3840,10 +3845,10 @@ multiclass sve_int_arith_imm1<bits<2> opc, string asm, SDPatternOperator op> {
def _S : sve_int_arith_imm<0b10, { 0b1010, opc }, asm, ZPR32, simm8>;
def _D : sve_int_arith_imm<0b11, { 0b1010, opc }, asm, ZPR64, simm8>;

def : SVE_1_Op_Imm_Arith_Pat<nxv16i8, op, ZPR8, i32, SVEArithSImmPat, !cast<Instruction>(NAME # _B)>;
def : SVE_1_Op_Imm_Arith_Pat<nxv8i16, op, ZPR16, i32, SVEArithSImmPat, !cast<Instruction>(NAME # _H)>;
def : SVE_1_Op_Imm_Arith_Pat<nxv4i32, op, ZPR32, i32, SVEArithSImmPat, !cast<Instruction>(NAME # _S)>;
def : SVE_1_Op_Imm_Arith_Pat<nxv2i64, op, ZPR64, i64, SVEArithSImmPat, !cast<Instruction>(NAME # _D)>;
def : SVE_1_Op_Imm_Arith_Pred_Pat<nxv16i8, nxv16i1, op, ZPR8, i32, SVEArithSImmPat, !cast<Instruction>(NAME # _B)>;
def : SVE_1_Op_Imm_Arith_Pred_Pat<nxv8i16, nxv8i1, op, ZPR16, i32, SVEArithSImmPat, !cast<Instruction>(NAME # _H)>;
def : SVE_1_Op_Imm_Arith_Pred_Pat<nxv4i32, nxv4i1, op, ZPR32, i32, SVEArithSImmPat, !cast<Instruction>(NAME # _S)>;
def : SVE_1_Op_Imm_Arith_Pred_Pat<nxv2i64, nxv2i1, op, ZPR64, i64, SVEArithSImmPat, !cast<Instruction>(NAME # _D)>;
}

multiclass sve_int_arith_imm1_unsigned<bits<2> opc, string asm, SDPatternOperator op> {
Expand All @@ -3852,10 +3857,10 @@ multiclass sve_int_arith_imm1_unsigned<bits<2> opc, string asm, SDPatternOperato
def _S : sve_int_arith_imm<0b10, { 0b1010, opc }, asm, ZPR32, imm0_255>;
def _D : sve_int_arith_imm<0b11, { 0b1010, opc }, asm, ZPR64, imm0_255>;

def : SVE_1_Op_Imm_Arith_Pat<nxv16i8, op, ZPR8, i32, SVEArithUImmPat, !cast<Instruction>(NAME # _B)>;
def : SVE_1_Op_Imm_Arith_Pat<nxv8i16, op, ZPR16, i32, SVEArithUImmPat, !cast<Instruction>(NAME # _H)>;
def : SVE_1_Op_Imm_Arith_Pat<nxv4i32, op, ZPR32, i32, SVEArithUImmPat, !cast<Instruction>(NAME # _S)>;
def : SVE_1_Op_Imm_Arith_Pat<nxv2i64, op, ZPR64, i64, SVEArithUImmPat, !cast<Instruction>(NAME # _D)>;
def : SVE_1_Op_Imm_Arith_Pred_Pat<nxv16i8, nxv16i1, op, ZPR8, i32, SVEArithUImmPat, !cast<Instruction>(NAME # _B)>;
def : SVE_1_Op_Imm_Arith_Pred_Pat<nxv8i16, nxv8i1, op, ZPR16, i32, SVEArithUImmPat, !cast<Instruction>(NAME # _H)>;
def : SVE_1_Op_Imm_Arith_Pred_Pat<nxv4i32, nxv4i1, op, ZPR32, i32, SVEArithUImmPat, !cast<Instruction>(NAME # _S)>;
def : SVE_1_Op_Imm_Arith_Pred_Pat<nxv2i64, nxv2i1, op, ZPR64, i64, SVEArithUImmPat, !cast<Instruction>(NAME # _D)>;
}

multiclass sve_int_arith_imm2<string asm, SDPatternOperator op> {
Expand Down
176 changes: 176 additions & 0 deletions llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll
Expand Up @@ -43,3 +43,179 @@ define <vscale x 2 x i64> @udiv_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b
%div = udiv <vscale x 2 x i64> %a, %b
ret <vscale x 2 x i64> %div
}

;
; SMIN
;

define <vscale x 16 x i8> @smin_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
; CHECK-LABEL: @smin_i8
; CHECK-DAG: ptrue p0.b
; CHECK-DAG: smin z0.b, p0/m, z0.b, z1.b
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 16 x i8> %a, %b
%min = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b
ret <vscale x 16 x i8> %min
}

define <vscale x 8 x i16> @smin_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
; CHECK-LABEL: @smin_i16
; CHECK-DAG: ptrue p0.h
; CHECK-DAG: smin z0.h, p0/m, z0.h, z1.h
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 8 x i16> %a, %b
%min = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b
ret <vscale x 8 x i16> %min
}

define <vscale x 4 x i32> @smin_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
; CHECK-LABEL: smin_i32:
; CHECK-DAG: ptrue p0.s
; CHECK-DAG: smin z0.s, p0/m, z0.s, z1.s
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 4 x i32> %a, %b
%min = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b
ret <vscale x 4 x i32> %min
}

define <vscale x 2 x i64> @smin_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
; CHECK-LABEL: smin_i64:
; CHECK-DAG: ptrue p0.d
; CHECK-DAG: smin z0.d, p0/m, z0.d, z1.d
; CHECK-NEXT: ret
%cmp = icmp slt <vscale x 2 x i64> %a, %b
%min = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b
ret <vscale x 2 x i64> %min
}

;
; UMIN
;

define <vscale x 16 x i8> @umin_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
; CHECK-LABEL: @umin_i8
; CHECK-DAG: ptrue p0.b
; CHECK-DAG: umin z0.b, p0/m, z0.b, z1.b
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 16 x i8> %a, %b
%min = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b
ret <vscale x 16 x i8> %min
}

define <vscale x 8 x i16> @umin_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
; CHECK-LABEL: @umin_i16
; CHECK-DAG: ptrue p0.h
; CHECK-DAG: umin z0.h, p0/m, z0.h, z1.h
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 8 x i16> %a, %b
%min = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b
ret <vscale x 8 x i16> %min
}

define <vscale x 4 x i32> @umin_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
; CHECK-LABEL: umin_i32:
; CHECK-DAG: ptrue p0.s
; CHECK-DAG: umin z0.s, p0/m, z0.s, z1.s
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 4 x i32> %a, %b
%min = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b
ret <vscale x 4 x i32> %min
}

define <vscale x 2 x i64> @umin_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
; CHECK-LABEL: umin_i64:
; CHECK-DAG: ptrue p0.d
; CHECK-DAG: umin z0.d, p0/m, z0.d, z1.d
; CHECK-NEXT: ret
%cmp = icmp ult <vscale x 2 x i64> %a, %b
%min = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b
ret <vscale x 2 x i64> %min
}

;
; SMAX
;

define <vscale x 16 x i8> @smax_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
; CHECK-LABEL: @smax_i8
; CHECK-DAG: ptrue p0.b
; CHECK-DAG: smax z0.b, p0/m, z0.b, z1.b
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 16 x i8> %a, %b
%min = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b
ret <vscale x 16 x i8> %min
}

define <vscale x 8 x i16> @smax_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
; CHECK-LABEL: @smax_i16
; CHECK-DAG: ptrue p0.h
; CHECK-DAG: smax z0.h, p0/m, z0.h, z1.h
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 8 x i16> %a, %b
%min = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b
ret <vscale x 8 x i16> %min
}

define <vscale x 4 x i32> @smax_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
; CHECK-LABEL: smax_i32:
; CHECK-DAG: ptrue p0.s
; CHECK-DAG: smax z0.s, p0/m, z0.s, z1.s
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 4 x i32> %a, %b
%min = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b
ret <vscale x 4 x i32> %min
}

define <vscale x 2 x i64> @smax_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
; CHECK-LABEL: smax_i64:
; CHECK-DAG: ptrue p0.d
; CHECK-DAG: smax z0.d, p0/m, z0.d, z1.d
; CHECK-NEXT: ret
%cmp = icmp sgt <vscale x 2 x i64> %a, %b
%min = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b
ret <vscale x 2 x i64> %min
}

;
; UMAX
;

define <vscale x 16 x i8> @umax_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
; CHECK-LABEL: @umax_i8
; CHECK-DAG: ptrue p0.b
; CHECK-DAG: umax z0.b, p0/m, z0.b, z1.b
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 16 x i8> %a, %b
%min = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b
ret <vscale x 16 x i8> %min
}

define <vscale x 8 x i16> @umax_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
; CHECK-LABEL: @umax_i16
; CHECK-DAG: ptrue p0.h
; CHECK-DAG: umax z0.h, p0/m, z0.h, z1.h
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 8 x i16> %a, %b
%min = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b
ret <vscale x 8 x i16> %min
}

define <vscale x 4 x i32> @umax_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
; CHECK-LABEL: umax_i32:
; CHECK-DAG: ptrue p0.s
; CHECK-DAG: umax z0.s, p0/m, z0.s, z1.s
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 4 x i32> %a, %b
%min = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b
ret <vscale x 4 x i32> %min
}

define <vscale x 2 x i64> @umax_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
; CHECK-LABEL: umax_i64:
; CHECK-DAG: ptrue p0.d
; CHECK-DAG: umax z0.d, p0/m, z0.d, z1.d
; CHECK-NEXT: ret
%cmp = icmp ugt <vscale x 2 x i64> %a, %b
%min = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b
ret <vscale x 2 x i64> %min
}

0 comments on commit 19f5da9

Please sign in to comment.